text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
#
# Copyright (c), 2016-2017, Quantum Espresso Foundation and SISSA (Scuola
# Internazionale Superiore di Studi Avanzati). All rights reserved.
# This file is distributed under the terms of the LGPL-2.1 license. See the
# file 'LICENSE' in the root directory of the present distribution, or
# https://opensource.org/licenses/LGPL-2.1
#
"""
Execute postqe module as a script (see PEP-338).
"""
if not locals()['__package__']:
# When this module is loaded before the package then __package__ is None or ''
# and the relative imports are disabled. In this case import the package and
# set __package__.
#
# $ python postqe --> __package__ == ''
# $ python postqe/__main__.py --> __package__ is None
#
# Ref: https://www.python.org/dev/peps/pep-0366/ for details.
import os
import sys
pkg_search_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if sys.path[0] != pkg_search_path:
sys.path.insert(0, pkg_search_path)
import postqe
__package__ = postqe.__name__
from .cli import main
main()
|
QEF/postqe
|
postqe/__main__.py
|
Python
|
lgpl-2.1
| 1,093
|
[
"Quantum ESPRESSO"
] |
5a75617629f5b566c36bd23e9a9f45a5a7d3dc19bc10666f6046bf15a7dcf4b7
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Live value resolution.
Live values are extracted from the known execution context.
Requires activity and reaching definitions analyses.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import inspect_utils
from tensorflow.python.autograph.pyct import transformer
class LiveValueResolver(transformer.Base):
"""Annotates nodes with live values."""
def __init__(self, context, literals):
super(LiveValueResolver, self).__init__(context)
self.literals = literals
def visit_ClassDef(self, node):
self.generic_visit(node)
anno.setanno(
node, 'live_val', self.ctx.info.namespace[node.name])
return node
def visit_Name(self, node):
self.generic_visit(node)
if isinstance(node.ctx, gast.Load):
defs = anno.getanno(node, anno.Static.DEFINITIONS, ())
is_defined = bool(defs)
has_single_def = len(defs) == 1
if not is_defined:
if node.id in self.literals:
anno.setanno(node, 'live_val', self.literals[node.id])
elif node.id in self.ctx.info.namespace:
obj = self.ctx.info.namespace[node.id]
anno.setanno(node, 'live_val', obj)
if hasattr(obj, '__name__'):
anno.setanno(node, 'fqn', (obj.__name__,))
elif hasattr(obj, '__class__'):
obj_class = obj.__class__
anno.setanno(node, 'fqn',
(obj_class.__module__, obj_class.__name__))
else:
# If the symbol value is for example a primitive, then it will not
# have a name.
pass
elif node.id in inspect_utils.SPECIAL_BUILTINS:
# Note: if the user redefined any of these symbols, then they would
# be visible in the namespace and we would never reach this branch.
anno.setanno(
node, 'live_val', inspect_utils.SPECIAL_BUILTINS[node.id])
else:
pass
# TODO(mdan): Should we raise an error here?
# Can encounter this when:
# * a symbol truly lacks reference
# * a symbol is new, like the new name of a function we just renamed.
else:
pass
# TODO(mdan): Attempt to trace its value through the local chain.
# TODO(mdan): Use type annotations as fallback.
if has_single_def:
def_, = defs
# Note: param_of is a weakref.
if def_.param_of and def_.param_of() is self.enclosing_entities[0]:
if node.id in self.ctx.info.arg_values:
obj = self.ctx.info.arg_values[node.id]
anno.setanno(node, 'live_val', obj)
anno.setanno(node, 'fqn', (obj.__class__.__name__,))
return node
def visit_Attribute(self, node):
self.generic_visit(node)
if anno.hasanno(node.value, 'live_val'):
assert anno.hasanno(node.value, 'fqn')
parent_object = anno.getanno(node.value, 'live_val')
anno.setanno(node, 'parent_type', type(parent_object))
anno.setanno(node, 'fqn', anno.getanno(node.value, 'fqn') + (node.attr,))
if hasattr(parent_object, node.attr):
# This can happen when the attribute's creation and use depend on the
# same static condition, for example:
#
# if cond:
# foo.bar = baz
# if cond:
# x = foo.bar
#
anno.setanno(node, 'live_val', getattr(parent_object, node.attr))
# TODO(mdan): Investigate the role built-in annotations can play here.
elif anno.hasanno(node.value, 'type'):
parent_type = anno.getanno(node.value, 'type')
if hasattr(parent_type, node.attr):
# This should hold for static members like methods.
# This would not hold for dynamic members like function attributes.
# For the dynamic case, we simply leave the node without an annotation,
# and let downstream consumers figure out what to do.
anno.setanno(node, 'parent_type', parent_type)
anno.setanno(node, 'live_val', getattr(parent_type, node.attr))
anno.setanno(node, 'fqn',
anno.getanno(node.value, 'type_fqn') + (node.attr,))
elif isinstance(node.value, gast.Name):
# TODO(mdan): Figure out what to do when calling attribute on local object
# Maybe just leave as-is?
pass
return node
def resolve(node, context, literals):
return LiveValueResolver(context, literals).visit(node)
|
jendap/tensorflow
|
tensorflow/python/autograph/pyct/static_analysis/live_values.py
|
Python
|
apache-2.0
| 5,241
|
[
"VisIt"
] |
89d0a597b1d21310c613b8efd5127cea499b02254e66a375ecd194edcbe77a30
|
import glob, os, shutil
from astropy.io import fits
import astropy.io.fits as pyfits # for compatibility...
from multiprocessing import Pool
import argparse
import shutil
import numpy as np
import numpy.ma
import wfc3tools
from datetime import datetime
def parse_args():
"""Parse command line arguements.
Parameters:
Nothing
Returns:
arguments: argparse.Namespace object
An object containing all of the added arguments.
Outputs:
Nothing
"""
copy_help = 'Copy files back to original target directory? Default False'
parser = argparse.ArgumentParser()
parser.add_argument('-c', help=copy_help, action='store_true',
required=False)
arguments = parser.parse_args()
return arguments
def fix_ramp(raw):
if fits.getval(raw,'filter') != 'F110W' or os.path.exists(raw.replace('raw.fits','flb.fits')) or fits.getval(raw,'subarray') == 'T':
return
else:
print 'Flattening Ramp for: {}'.format(raw)
make_IMA_FLT(raw)
# shutil.move(raw.replace('raw.fits','ima.tra'),'ex/')
# shutil.move(raw.replace('raw.fits','.tra'),'ex/')
flt = raw.replace('raw.fits','flt.fits')
shutil.move(flt,flt.replace('flt.fits','flb.fits'))
print 'FLATTENING COMPLETE'
return
def update_flt(raw):
# Copy Pipeline flt to ir_aux_files directory
# Add Gabe's extra bad pixels to FLT DQ
# Flag persistence pixels (>0.6*ERR) in FLT DQ if persist.fits exists
# Replace FLT data with flattened ramp from make_IMA_FLT
# Add header keywords indicating when this script was run
# Write fixes into original FLT to preserve WCS transformations
flt = raw.replace('raw.fits', 'flt.fits')
if not os.path.exists(flt):
try:
orig_flt = glob.glob('../targets/*/{}'.format(flt))[0]
shutil.copy(orig_flt, '.')
except:
print raw, fits.getval(raw,'TARGNAME'), 'NOT FOUND, REMOVING'
os.remove(raw)
return
# print 'BEGINNING FLT UPDATE'
# print '________________________________________________________'
else:
print 'FLT {} already in directory, skipping'.format(flt)
return flt
hdu = fits.open(flt, mode='update')
orig_dq = hdu['DQ'].data
new_flags = fits.getdata('/astro/pabeta/wfc3/data/badpix_spars200_Nov9.fits')
if orig_dq.shape == (1014,1014):
new_dq = np.bitwise_or(orig_dq,new_flags)
else:
hdr1 = hdu[1].header
ltv1, ltv2 = abs(hdr1['LTV1']), abs(hdr1['LTV2'])
naxis1, naxis2 = hdr1['NAXIS1'], hdr1['NAXIS2']
flags_sub = new_flags[ltv2:ltv2+naxis2,ltv1:ltv1+naxis1]
new_dq = np.bitwise_or(orig_dq,flags_sub)
today = datetime.today()
date = '{}-{}-{}'.format(today.year,today.month,today.day)
hdu[0].header['BPIXFLAG'] = date
proposid = hdu[0].header['PROPOSID']
root = hdu[0].header['ROOTNAME']
vis = root[4:6]
pers_path_potential = '/grp/hst/wfc3a/GO_Links/{}/Visit{}/Persist/{}'.format(proposid,vis,root.lower()+'_persist.fits')
pers_path = glob.glob(pers_path_potential)
if len(pers_path)==1:
pers_path = pers_path[0]
err = hdu['ERR'].data
pers_flags = np.zeros(err.shape, dtype=np.uint16)
pers_data = fits.getdata(pers_path)
pers_flags[pers_data>0.6*err] = 1024
new_dq = np.bitwise_or(new_dq,pers_flags)
hdu[0].header['PERSFLAG'] = date
print 'FLAGGING PERSISTENCE'
else: hdu[0].header['PERSFLAG'] = 'NONE'
print 'ADDING BAD PIXELS TO DQ ARRAY FOR {}'.format(flt)
hdu['DQ'].data = new_dq
flb = flt.replace('flt.fits','flb.fits')
if os.path.exists(flb):
flb_data = fits.getdata(flb,1)
hdu['SCI'].data = flb_data
print 'REPLACING PIPELINE FLT DATA WITH FLATTENED RAMP FOR {}'.format(flt)
hdu[0].header['FLATRAMP'] = date
hdu.close()
return flt
def check_flt(flt_path):
flt = os.path.split(flt_path)[-1]
det = fits.getval(flt_path,'DETECTOR')
filt = fits.getval(flt_path,'FILTER')
if det == 'IR' and filt == 'F110W':
if not os.path.exists(flt.replace('flt.fits','raw.fits')):
print 'NO RAW FOR {}'.format(flt)
asn_id = fits.getval(flt_path, 'asn_id')
if asn_id == 'NONE':
asn_id = fits.getval(flt_path, 'rootname').upper()
return asn_id
# raise
return None
def get_targ_dir(flt):
try: orig_flt = glob.glob('../targets/*/{}'.format(flt))[0]
except: print 'FLT IS:', flt
targ_dir = os.path.split(orig_flt)[0]
return targ_dir
def backup_flts(targ_dir):
# Copies 'unfixed' flts into backup folder within target directory
print targ_dir
backup_dir = '{}/backup_flts'.format(targ_dir)
if not os.path.exists(backup_dir):
os.mkdir(backup_dir)
for flt_path in glob.glob('{}/i*flt.fits'.format(targ_dir)):
flt = os.path.split(flt_path)[-1]
if os.path.exists(flt):
if os.path.exists('{}/{}'.format(backup_dir,flt)):
print 'BACKUP EXISTS OR NON-ORIGINAL FOR {}'.format(flt_path)
continue
else:
shutil.move(flt_path,backup_dir)
shutil.copy(flt,targ_dir)
else: continue
return
def copy_fixed_flts(flts):
p = Pool(32)
print flts
targ_dirs = set(p.map(get_targ_dir,flts))
p.map(backup_flts, targ_dirs)
if __name__ == '__main__':
import sys
sys.path.insert(0, '/astro/pabeta/wfc3/')
from reprocess_wfc3 import make_IMA_FLT, fetch_calibs
raws = glob.glob('*raw.fits')
p = Pool(32)
missing_ids = p.map(check_flt, glob.glob('/astro/pabeta/targets/*/i*flt.fits'))
p.map(fix_ramp, raws)
fixed_flts = set(p.map(update_flt, raws))
if None in fixed_flts:
fixed_flts.remove(None)
copy_fixed_flts(fixed_flts)
if missing_ids.count(None) == len(missing_ids):
print 'ALL RAWS HERE'
else:
missing_ids = set(missing_ids)
missing_ids.remove(None)
print 'MISSING RAWS:'
for mid in missing_ids: print '{}, '.format(mid)
|
gbrammer/pabeta
|
driz_tools/ir_data_fixes.py
|
Python
|
mit
| 6,164
|
[
"VisIt"
] |
cb43851cff7f2c8d23ac3ddb9e9c49f24fac1da3ea4601b75e4da2f0c14a07d7
|
# ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
# Author: Rastko Sknepnek
#
# Division of Physics
# School of Engineering, Physics and Mathematics
# University of Dundee
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
# Integrator code for batch processing of full data runs (incorporating parts of earlier analysis scripts)
# Data interfacing
from read_data import *
from read_param import *
# Pre-existing analysis scripts
from polar_defects_analysis import *
#from glob import glob
# This is the structured data file hierarchy. Replace as appropriate (do not go the Yaouen way and fully automatize ...)
basefolder = '/home/silke/Documents/CurrentProjects/Rastko/Runs/RunsMarchJ1/'
outfolder= '/home/silke/Documents/CurrentProjects/Rastko/analysis/'
#JList=['10', '1', '0.1', '0.01']
#'0.005','0.01','0.02','0.05','0.1','0.2','0.5','1'
vList=['0.005','0.01']
JList=['1']
nu_r='0.002'
phi='1'
r=28.2094791
sigma=1
nstep=10000000
nsave=10000
nsnap=int(nstep/nsave)
skip=0
startvtk=0
for J in JList:
for v0 in vList:
#param = Param(basefolder)
#/home/silke/Documents/CurrentProjects/Rastko/Runs/RunsMarchJ1/data_v0_0.005/data_j_1_sphere/sphere_v0_0.005_j_1_0010000000.dat
files = sorted(glob(basefolder+'/data_v0_' + v0 + '/data_j_' + J +'_sphere/sphere_*.dat'))[skip:]
#files = sorted(glob(basefolder+'J_'+ J +'/sphere_*.dat'))[skip:]
defects=np.zeros((len(files),32))
ndefect=np.zeros((len(files),2))
u=0
for f in files:
print f
outname =basefolder+'/data_v0_' + v0 + '/data_j_' + J +'_sphere/frame_data' + str(u-startvtk)+'.vtk'
if u<startvtk:
defects_n, defects_v,numdefect_n,numdefect_v=getDefects(f,float(r),sigma,outname,'polar',False,False)
else:
defects_n, defects_v,numdefect_n,numdefect_v=getDefects(f,float(r),sigma,outname,'polar',False,True)
outname = '.'.join((f).split('.')[:-1]) + '_defects.vtk'
outname =basefolder+'/data_v0_' + v0 + '/data_j_' + J +'_sphere/frame_defects' + str(u-startvtk)+'.vtk'
print outname
writeDefects(defects_n, defects_v,numdefect_n,numdefect_v,outname)
defects[u,0:4]=defects_n[0,:]
defects[u,4:8]=defects_n[1,:]
defects[u,8:12]=defects_n[2,:]
defects[u,12:16]=defects_n[3,:]
defects[u,16:20]=defects_v[0,:]
defects[u,20:24]=defects_v[1,:]
defects[u,24:28]=defects_v[2,:]
defects[u,28:32]=defects_v[3,:]
ndefect[u,0]=numdefect_n
ndefect[u,1]=numdefect_v
u+=1
outfile2=outfolder + 'defects_J_' + J + 'v0_'+ v0 +'_polar.dat'
np.savetxt(outfile2,np.concatenate((ndefect,defects),axis=1),fmt='%12.6g', header='ndefect (orientation, velocity) defects (orientation, velocity)')
|
sknepneklab/SAMoS
|
analysis/batch_polar/batch_defects_J1_v0.01.py
|
Python
|
gpl-3.0
| 3,268
|
[
"VTK"
] |
f7c859ac79f4dd735013311544086c86ec5ecbae611e6768e93dc4d8afd69a95
|
# -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import tempfile
import time
import re
import sys
import os
from contextlib import contextmanager
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from splinter.driver import DriverAPI, ElementAPI
from splinter.element_list import ElementList
from splinter.utils import warn_deprecated
from splinter.request_handler.status_code import StatusCode
if sys.version_info[0] > 2:
_meth_func = '__func__'
_func_name = '__name__'
else:
_meth_func = 'im_func'
_func_name = 'func_name'
class switch_window:
def __init__(self, browser, window_handle):
self.browser = browser
self.window_handle = window_handle
def __enter__(self):
self.current_window_handle = self.browser.driver.current_window_handle
self.browser.driver.switch_to_window(self.window_handle)
def __exit__(self, type, value, traceback):
if self.current_window_handle in self.browser.driver.window_handles:
self.browser.driver.switch_to_window(self.current_window_handle)
class Window(object):
""" A class representing a browser window """
def __init__(self, browser, name):
self._browser = browser
self.name = name
@property
def title(self):
""" The title of this window """
with switch_window(self._browser, self.name):
return self._browser.title
@property
def url(self):
""" The url of this window """
with switch_window(self._browser, self.name):
return self._browser.url
@property
def index(self):
""" The index of this window in browser.windows """
return self._browser.driver.window_handles.index(self.name)
@property
def prev(self):
""" Return the previous window """
prev_index = self.index - 1
prev_handle = self._browser.driver.window_handles[prev_index]
return Window(self._browser, prev_handle)
@property
def next(self):
""" Return the next window """
next_index = (self.index + 1) % len(self._browser.driver.window_handles)
next_handle = self._browser.driver.window_handles[next_index]
return Window(self._browser, next_handle)
def is_current():
doc = "Whether this window is currently the browser's active window."
def fget(self):
return self._browser.driver.current_window_handle == self.name
def fset(self, value):
if value is True:
self._browser.driver.switch_to_window(self.name)
else:
raise TypeError("can only set to True")
return locals()
is_current = property(**is_current())
def close(self):
""" Close this window. If this window is active, switch to previous window """
target = self.prev if (self.is_current and self.prev != self) else None
with switch_window(self._browser, self.name):
self._browser.driver.close()
if target is not None:
target.is_current = True
def close_others(self):
self.is_current = True
for window in self._browser.windows:
if window != self:
window.close()
def __eq__(self, other):
return self._browser == other._browser and self.name == other.name
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<Window %s: %s>" % (self.name, self.url)
class Windows(object):
""" A class representing all open browser windows """
def __init__(self, browser):
self._browser = browser
def __len__(self):
return len(self._browser.driver.window_handles)
def __getitem__(self, key):
window_handles = self._browser.driver.window_handles
try:
return Window(self._browser, window_handles[key])
except TypeError:
if key not in window_handles:
raise KeyError(key)
return Window(self._browser, key)
def current():
doc = "The currently active window"
def fget(self):
current_handle = self._browser.driver.current_window_handle
return Window(self._browser, current_handle) if current_handle else None
def fset(self, value):
self._browser.driver.switch_to_window(value.name)
return locals()
current = property(**current())
def __repr__(self):
return str([Window(self._browser, handle) for handle in self._browser.driver.window_handles])
class BaseWebDriver(DriverAPI):
def __init__(self, wait_time=2):
self.wait_time = wait_time
self.status_code = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.quit()
@property
def title(self):
return self.driver.title
@property
def html(self):
return self.driver.page_source
@property
def url(self):
return self.driver.current_url
def visit(self, url):
self.status_code = None
self.driver.get(url)
self.status_code = StatusCode(200, 'OK')
def back(self):
self.driver.back()
def forward(self):
self.driver.forward()
def reload(self):
self.driver.refresh()
def execute_script(self, script):
self.driver.execute_script(script)
def evaluate_script(self, script):
return self.driver.execute_script("return %s" % script)
def is_element_visible(self, finder, selector, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if finder(selector) and finder(selector).visible:
return True
return False
def is_element_not_visible(self, finder, selector, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
element = finder(selector)
if not element or (element and not element.visible):
return True
return False
def is_element_visible_by_css(self, css_selector, wait_time=None):
return self.is_element_visible(self.find_by_css, css_selector, wait_time)
def is_element_not_visible_by_css(self, css_selector, wait_time=None):
return self.is_element_not_visible(self.find_by_css, css_selector, wait_time)
def is_element_visible_by_xpath(self, xpath, wait_time=None):
return self.is_element_visible(self.find_by_xpath, xpath, wait_time)
def is_element_not_visible_by_xpath(self, xpath, wait_time=None):
return self.is_element_not_visible(self.find_by_xpath, xpath, wait_time)
def is_element_present(self, finder, selector, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if finder(selector):
return True
return False
def is_element_not_present(self, finder, selector, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if not finder(selector):
return True
return False
def is_element_present_by_css(self, css_selector, wait_time=None):
return self.is_element_present(self.find_by_css, css_selector, wait_time)
def is_element_not_present_by_css(self, css_selector, wait_time=None):
return self.is_element_not_present(self.find_by_css, css_selector, wait_time)
def is_element_present_by_xpath(self, xpath, wait_time=None):
return self.is_element_present(self.find_by_xpath, xpath, wait_time)
def is_element_not_present_by_xpath(self, xpath, wait_time=None):
return self.is_element_not_present(self.find_by_xpath, xpath, wait_time)
def is_element_present_by_tag(self, tag, wait_time=None):
return self.is_element_present(self.find_by_tag, tag, wait_time)
def is_element_not_present_by_tag(self, tag, wait_time=None):
return self.is_element_not_present(self.find_by_tag, tag, wait_time)
def is_element_present_by_name(self, name, wait_time=None):
return self.is_element_present(self.find_by_name, name, wait_time)
def is_element_not_present_by_name(self, name, wait_time=None):
return self.is_element_not_present(self.find_by_name, name, wait_time)
def is_element_present_by_value(self, value, wait_time=None):
return self.is_element_present(self.find_by_value, value, wait_time)
def is_element_not_present_by_value(self, value, wait_time=None):
return self.is_element_not_present(self.find_by_value, value, wait_time)
def is_element_present_by_text(self, text, wait_time=None):
return self.is_element_present(self.find_by_text, text, wait_time)
def is_element_not_present_by_text(self, text, wait_time=None):
return self.is_element_not_present(self.find_by_text, text, wait_time)
def is_element_present_by_id(self, id, wait_time=None):
return self.is_element_present(self.find_by_id, id, wait_time)
def is_element_not_present_by_id(self, id, wait_time=None):
return self.is_element_not_present(self.find_by_id, id, wait_time)
def get_alert(self):
return AlertElement(self.driver.switch_to_alert())
def is_text_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
try:
self.driver.find_element_by_tag_name('body').text.index(text)
return True
except ValueError:
pass
except NoSuchElementException:
# This exception will be thrown if the body tag isn't present
# This has occasionally been observed. Assume that the
# page isn't fully loaded yet
pass
return False
def is_text_not_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
try:
self.driver.find_element_by_tag_name('body').text.index(text)
except ValueError:
return True
except NoSuchElementException:
# This exception will be thrown if the body tag isn't present
# This has occasionally been observed. Assume that the
# page isn't fully loaded yet
pass
return False
@contextmanager
def get_iframe(self, id):
self.driver.switch_to.frame(id)
try:
yield self
finally:
self.driver.switch_to.frame(None)
def find_option_by_value(self, value):
return self.find_by_xpath(
'//option[@value="%s"]' % value, original_find="option by value", original_query=value)
def find_option_by_text(self, text):
return self.find_by_xpath(
'//option[normalize-space(text())="%s"]' % text, original_find="option by text",
original_query=text)
def find_link_by_href(self, href):
return self.find_by_xpath('//a[@href="%s"]' % href, original_find="link by href", original_query=href)
def find_link_by_partial_href(self, partial_href):
return self.find_by_xpath(
'//a[contains(@href, "%s")]' % partial_href, original_find="link by partial href",
original_query=partial_href)
def find_link_by_partial_text(self, partial_text):
return self.find_by_xpath(
'//a[contains(normalize-space(.), "%s")]' % partial_text,
original_find="link by partial text", original_query=partial_text)
def find_link_by_text(self, text):
return self.find_by_xpath(
'//a[text()="%s"]' % text, original_find="link by text", original_query=text)
def find_by(self, finder, selector, original_find=None, original_query=None):
elements = None
end_time = time.time() + self.wait_time
func_name = getattr(getattr(finder, _meth_func), _func_name)
find_by = original_find or func_name[func_name.rfind('_by_') + 4:]
query = original_query or selector
while time.time() < end_time:
try:
elements = finder(selector)
if not isinstance(elements, list):
elements = [elements]
except NoSuchElementException:
pass
if elements:
return ElementList(
[self.element_class(element, self) for element in elements],
find_by=find_by, query=query)
return ElementList([], find_by=find_by, query=query)
def find_by_css(self, css_selector):
return self.find_by(
self.driver.find_elements_by_css_selector, css_selector, original_find='css',
original_query=css_selector)
def find_by_xpath(self, xpath, original_find=None, original_query=None):
original_find = original_find or "xpath"
original_query = original_query or xpath
return self.find_by(
self.driver.find_elements_by_xpath, xpath, original_find=original_find,
original_query=original_query)
def find_by_name(self, name):
return self.find_by(self.driver.find_elements_by_name, name)
def find_by_tag(self, tag):
return self.find_by(self.driver.find_elements_by_tag_name, tag)
def find_by_value(self, value):
return self.find_by_xpath('//*[@value="%s"]' % value, original_find='value', original_query=value)
def find_by_text(self, text):
return self.find_by_xpath('//*[text()="%s"]' % text,
original_find='text', original_query=text)
def find_by_id(self, id):
return self.find_by(self.driver.find_element_by_id, id)
def fill(self, name, value):
field = self.find_by_name(name).first
field.value = value
attach_file = fill
def fill_form(self, field_values):
for name, value in field_values.items():
elements = self.find_by_name(name)
element = elements.first
if element['type'] in ['text', 'password', 'tel'] or element.tag_name == 'textarea':
element.value = value
elif element['type'] == 'checkbox':
if value:
element.check()
else:
element.uncheck()
elif element['type'] == 'radio':
for field in elements:
if field.value == value:
field.click()
elif element._element.tag_name == 'select':
element.find_by_value(value).first._element.click()
else:
element.value = value
def type(self, name, value, slowly=False):
element = self.find_by_name(name).first._element
if slowly:
return TypeIterator(element, value)
element.send_keys(value)
return value
def choose(self, name, value):
fields = self.find_by_name(name)
for field in fields:
if field.value == value:
field.click()
def check(self, name):
self.find_by_name(name).first.check()
def uncheck(self, name):
self.find_by_name(name).first.uncheck()
def screenshot(self, name=None, suffix='.png'):
name = name or ''
(fd, filename) = tempfile.mkstemp(prefix=name, suffix=suffix)
# don't hold the file
os.close(fd)
self.driver.get_screenshot_as_file(filename)
return filename
def select(self, name, value):
self.find_by_xpath('//select[@name="%s"]//option[@value="%s"]' % (name, value)).first._element.click()
def select_by_text(self, name, text):
self.find_by_xpath('//select[@name="%s"]/option[text()="%s"]' % (name, text)).first._element.click()
def quit(self):
self.driver.quit()
@property
def cookies(self):
return self._cookie_manager
@property
def windows(self):
return Windows(self)
class TypeIterator(object):
def __init__(self, element, keys):
self._element = element
self._keys = keys
def __iter__(self):
for key in self._keys:
self._element.send_keys(key)
yield key
class WebDriverElement(ElementAPI):
def __init__(self, element, parent):
self._element = element
self.parent = parent
self.action_chains = ActionChains(parent.driver)
def _get_value(self):
return self['value'] or self._element.text
def _set_value(self, value):
if self._element.get_attribute('type') != 'file':
self._element.clear()
self._element.send_keys(value)
value = property(_get_value, _set_value)
@property
def text(self):
return self._element.text
@property
def tag_name(self):
return self._element.tag_name
def clear(self):
if self._element.get_attribute('type') in ['textarea', 'text', 'password', 'tel']:
self._element.clear()
def fill(self, value):
self.value = value
def select(self, value):
self.find_by_xpath(
'//select[@name="%s"]/option[@value="%s"]' % (self["name"], value))._element.click()
def select_by_text(self, text):
self.find_by_xpath('//select[@name="%s"]/option[text()="%s"]' % (self["name"], text))._element.click()
def type(self, value, slowly=False):
if slowly:
return TypeIterator(self._element, value)
self._element.send_keys(value)
return value
def click(self):
self._element.click()
def check(self):
if not self.checked:
self._element.click()
def uncheck(self):
if self.checked:
self._element.click()
@property
def checked(self):
return self._element.is_selected()
selected = checked
@property
def visible(self):
return self._element.is_displayed()
@property
def html(self):
return self['innerHTML']
@property
def outer_html(self):
return self['outerHTML']
def find_by_css(self, selector, original_find=None, original_query=None):
find_by = original_find or 'css'
query = original_query or selector
elements = self._element.find_elements_by_css_selector(selector)
return ElementList(
[self.__class__(element, self.parent) for element in elements], find_by=find_by, query=query)
def find_by_xpath(self, selector, original_find=None, original_query=None):
elements = ElementList(self._element.find_elements_by_xpath(selector))
return ElementList(
[self.__class__(element, self.parent) for element in elements], find_by='xpath', query=selector)
def find_by_name(self, name):
elements = ElementList(self._element.find_elements_by_name(name))
return ElementList(
[self.__class__(element, self.parent) for element in elements], find_by='name', query=name)
def find_by_tag(self, tag):
elements = ElementList(self._element.find_elements_by_tag_name(tag))
return ElementList(
[self.__class__(element, self.parent) for element in elements], find_by='tag', query=tag)
def find_by_value(self, value):
selector = '[value="%s"]' % value
return self.find_by_css(selector, original_find='value', original_query=value)
def find_by_text(self, text):
selector = '//*[text()="%s"]' % text
return self.find_by_xpath(selector, original_find='text', original_query=text)
def find_by_id(self, id):
elements = ElementList(self._element.find_elements_by_id(id))
return ElementList(
[self.__class__(element, self.parent) for element in elements], find_by='id', query=id)
def has_class(self, class_name):
return bool(re.search(r'(?:^|\s)' + re.escape(class_name) + r'(?:$|\s)', self['class']))
def mouse_over(self):
"""
Performs a mouse over the element.
Currently works only on Chrome driver.
"""
self.action_chains.move_to_element(self._element)
self.action_chains.perform()
def mouse_out(self):
"""
Performs a mouse out the element.
Currently works only on Chrome driver.
"""
self.action_chains.move_by_offset(5000, 5000)
self.action_chains.perform()
mouseover = warn_deprecated(mouse_over, 'mouseover')
def double_click(self):
"""
Performs a double click in the element.
Currently works only on Chrome driver.
"""
self.action_chains.double_click(self._element)
self.action_chains.perform()
def right_click(self):
"""
Performs a right click in the element.
Currently works only on Chrome driver.
"""
self.action_chains.context_click(self._element)
self.action_chains.perform()
def drag_and_drop(self, droppable):
"""
Performs drag a element to another elmenet.
Currently works only on Chrome driver.
"""
self.action_chains.drag_and_drop(self._element, droppable._element)
self.action_chains.perform()
def __getitem__(self, attr):
return self._element.get_attribute(attr)
class AlertElement(object):
def __init__(self, alert):
self._alert = alert
self.text = alert.text
def accept(self):
self._alert.accept()
def dismiss(self):
self._alert.dismiss()
def fill_with(self, text):
self._alert.send_keys(text)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
|
maximmaxim345/Sheep-it-blender-plugin
|
splinter/driver/webdriver/__init__.py
|
Python
|
gpl-3.0
| 22,326
|
[
"VisIt"
] |
7c56743d01083256c1f5e8ef13a5c11ac823d0b087ba2c50128038b1162a2fa7
|
""" converted from Matlab code
source: http://www.robots.ox.ac.uk/~fwood/teaching/AIMS_CDT_ML_2015/homework/HW_2_em/
% This main file is used to execute the em algorithm for gaussian mixture
% modeling. The data is the fisher iris data where each row of data are
% four measurements taken from the pedal of an iris flower. The value is e
% is a small number to asses convergence of the algorithm. Whent the
% likelihood of the data under the model ceases to increase by e every time
% the algorithm is assumed to have converged. Important variables are
% listed below.
%
% data : data matrix n x d with rows as elements of data
% gamma : a n x k matrix of responsilities. each row should sum to 1.
% pi : column vector of probabilities for each class
% param : mu : d x k matrix of class centers listed as columns
% sigma : k x 1 cell array of class covariance matrices (each are d x d)"""
import numpy as np
import scipy.stats
import pandas
from plot_data import plot_data
from m_step_gaussian_mixture import m_step_gaussian_mixture
from log_likelihood_gaussian_mixture import log_likelihood_gaussian_mixture
from e_step_gaussian_mixture import e_step_gaussian_mixture
# % k is the number of clusters to use, you should experiment with this
# % number and MAKE SURE YOUR CODE WORKS FOR ANY VALUE OF K >= 1
k = 20;
e = .01;
data = pandas.read_csv("data.csv", header=None).as_matrix()
# % this sets the initial values of the gamma matrix, the matrix of
# % responsibilities, randomly based on independent draws from a dirichlet
# % distribution.
gamma = scipy.stats.dirichlet.rvs(np.ones(len(data) * k))
gamma = np.reshape(gamma, (len(data), k))
rows_sum = np.sum(gamma, axis=1)
gamma /= rows_sum[:, None]
# % to facilitate visualization, we label each data point by the cluster
# % which takes most responsibility for it.
labels = np.argmax(gamma, 1)
m = gamma[labels]
# % this draws a plot of the initial labeling.
# plot_data(data, labels)
# % given the initial labeling we set mu, sigma, and pi based on the m step
# % and calculate the likelihood.
ll = -np.infty
[mu, sigma, pi] = m_step_gaussian_mixture(data, gamma)
nll = log_likelihood_gaussian_mixture(data, mu, sigma, pi)
print('log likelihood = %f' % (nll,))
# % the loop iterates until convergence as determined by e.
while ll + e < nll:
ll = nll
gamma = e_step_gaussian_mixture(data, pi, mu, sigma)
[mu, sigma, pi] = m_step_gaussian_mixture(data, gamma)
nll = log_likelihood_gaussian_mixture(data, mu, sigma, pi)
print('log likelihood = %f' % (nll,))
labels = np.argmax(gamma, 1)
m = gamma[labels]
plot_data(data, labels);
|
leonardbj/AIMS
|
src/ML_Algorithms/ExpectationMaximization/main_gaussian_mixture.py
|
Python
|
mit
| 2,633
|
[
"Gaussian"
] |
ebfa4f81b5c45ef3ff15df5f1db199013b4b00e05941fe8f19b8c817dea573a7
|
"""K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..metrics.pairwise import pairwise_distances_argmin_min
from ..utils.extmath import row_norms, squared_norm, stable_cumsum
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.six import string_types
from . import _k_means
from ._k_means_elkan import k_means_elkan
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X : array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters : integer
The number of seeds to choose
x_squared_norms : array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state : numpy.RandomState
The generator used to initialize the centers.
n_local_trials : integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features), dtype=X.dtype)
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0, np.newaxis], X, Y_norm_squared=x_squared_norms,
squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(stable_cumsum(closest_dist_sq),
rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _validate_center_shape(X, n_centers, centers):
"""Check if centers is compatible with X and n_centers"""
if len(centers) != n_centers:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, n_centers))
if centers.shape[1] != X.shape[1]:
raise ValueError(
"The number of features of the initial centers %s "
"does not match the number of features of the data %s."
% (centers.shape[1], X.shape[1]))
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
algorithm="auto", return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter : int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# Validate init array
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X):
X_mean = X.mean(axis=0)
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init -= X_mean
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_clusters == 1:
# elkan doesn't make sense for a single cluster, full will produce
# the right result.
algorithm = "full"
if algorithm == "auto":
algorithm = "full" if sp.issparse(X) else 'elkan'
if algorithm == "full":
kmeans_single = _kmeans_single_lloyd
elif algorithm == "elkan":
kmeans_single = _kmeans_single_elkan
else:
raise ValueError("Algorithm must be 'auto', 'full' or 'elkan', got"
" %s" % str(algorithm))
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(kmeans_single)(X, n_clusters, max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single_elkan(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
if sp.issparse(X):
raise ValueError("algorithm='elkan' not supported for sparse input X")
X = check_array(X, order="C")
random_state = check_random_state(random_state)
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
centers = np.ascontiguousarray(centers)
if verbose:
print('Initialization complete')
centers, labels, n_iter = k_means_elkan(X, n_clusters, centers, tol=tol,
max_iter=max_iter, verbose=verbose)
inertia = np.sum((X - centers[labels]) ** 2, dtype=np.float64)
return labels, inertia, centers, n_iter
def _kmeans_single_lloyd(X, n_clusters, max_iter=300, init='k-means++',
verbose=False, x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X : array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode
x_squared_norms : array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# Breakup nearest neighbor distance computation into batches to prevent
# memory blowup in the case of a large number of samples and clusters.
# TODO: Once PR #7383 is merged use check_inputs=False in metric_kwargs.
labels, mindist = pairwise_distances_argmin_min(
X=X, Y=centers, metric='euclidean', metric_kwargs={'squared': True})
# cython k-means code assumes int32 inputs
labels = labels.astype(np.int32)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X : float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms : array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers : float array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances : float array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels : int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=X.dtype)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X : array, shape (n_samples, n_features)
k : int
number of centroids
init : {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms : array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers : array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if x_squared_norms is None:
x_squared_norms = row_norms(X, squared=True)
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.randint(0, n_samples, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if isinstance(init, string_types) and init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif isinstance(init, string_types) and init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
# ensure that the centers have the same dtype as X
# this is a requirement of fused types of cython
centers = np.array(init, dtype=X.dtype)
elif callable(init):
centers = init(X, k, random_state=random_state)
centers = np.asarray(centers, dtype=X.dtype)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
_validate_center_shape(X, k, centers)
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
algorithm : "auto", "full" or "elkan", default="auto"
K-means algorithm to use. The classical EM-style algorithm is "full".
The "elkan" variation is more efficient by using the triangle
inequality, but currently doesn't support sparse data. "auto" chooses
"elkan" for dense data and "full" for sparse data.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Examples
--------
>>> from sklearn.cluster import KMeans
>>> import numpy as np
>>> X = np.array([[1, 2], [1, 4], [1, 0],
... [4, 2], [4, 4], [4, 0]])
>>> kmeans = KMeans(n_clusters=2, random_state=0).fit(X)
>>> kmeans.labels_
array([0, 0, 0, 1, 1, 1], dtype=int32)
>>> kmeans.predict([[0, 0], [4, 4]])
array([0, 1], dtype=int32)
>>> kmeans.cluster_centers_
array([[ 1., 2.],
[ 4., 2.]])
See also
--------
MiniBatchKMeans
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster than the default batch implementation.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=1, algorithm='auto'):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
self.algorithm = algorithm
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32])
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
# Note: numpy >= 1.10 does not support '/=' for the following
# expression for a mixture of int and float (see numpy issue #6464)
centers[center_idx] = centers[center_idx] / counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulate the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Read more in the :ref:`User Guide <mini_batch_kmeans>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
See also
--------
KMeans
The classic implementation of the clustering method based on the
Lloyd's algorithm. It consumes the whole set of input data at each
iteration.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster.
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C',
dtype=[np.float64, np.float32])
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, dtype=X.dtype)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, dtype=X.dtype)
distances = np.zeros(self.batch_size, dtype=X.dtype)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.randint(0, n_samples, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.randint(
0, n_samples, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=X.dtype)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=X.dtype)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, dtype=X.dtype), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
|
jaidevd/scikit-learn
|
sklearn/cluster/k_means_.py
|
Python
|
bsd-3-clause
| 59,631
|
[
"Gaussian"
] |
3abcd15904180a0b8bfbacb67c57071a94600de2bd57a44404b9b1661959d46a
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pyxpad_main.ui'
#
# Created: Fri Oct 7 15:25:29 2016
# by: pyside-uic 0.2.15 running on PySide 1.2.4
#
# WARNING! All changes made in this file will be lost!
# Hand-edited to make it work with Qt4 & Qt5
from Qt import QtCore, __qt_version__
from Qt.QtWidgets import *
if __qt_version__.split('.')[0] == '5':
UnicodeUTF8 = 0
else:
UnicodeUTF8 = QApplication.UnicodeUTF8
# end of hand-edits
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(803, 659)
self.centralwidget = QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout_2 = QGridLayout(self.centralwidget)
self.gridLayout_2.setObjectName("gridLayout_2")
self.tabWidget = QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.sourceTab = QWidget()
self.sourceTab.setObjectName("sourceTab")
self.gridLayout_3 = QGridLayout(self.sourceTab)
self.gridLayout_3.setObjectName("gridLayout_3")
self.gridLayout = QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.sourceDescription = QCheckBox(self.sourceTab)
self.sourceDescription.setObjectName("sourceDescription")
self.gridLayout.addWidget(self.sourceDescription, 0, 6, 1, 1)
self.shotLabel = QLabel(self.sourceTab)
self.shotLabel.setObjectName("shotLabel")
self.gridLayout.addWidget(self.shotLabel, 0, 0, 1, 1)
self.tracePattern = QLineEdit(self.sourceTab)
self.tracePattern.setObjectName("tracePattern")
self.gridLayout.addWidget(self.tracePattern, 0, 5, 1, 1)
self.shotInput = QLineEdit(self.sourceTab)
self.shotInput.setObjectName("shotInput")
self.gridLayout.addWidget(self.shotInput, 0, 1, 1, 1)
self.readDataButton = QPushButton(self.sourceTab)
self.readDataButton.setObjectName("readDataButton")
self.gridLayout.addWidget(self.readDataButton, 0, 2, 1, 1)
self.traceLabel = QLabel(self.sourceTab)
self.traceLabel.setObjectName("traceLabel")
self.gridLayout.addWidget(self.traceLabel, 0, 4, 1, 1)
self.lastShotButton = QPushButton(self.sourceTab)
self.lastShotButton.setObjectName("lastShotButton")
self.gridLayout.addWidget(self.lastShotButton, 0, 3, 1, 1)
self.gridLayout_3.addLayout(self.gridLayout, 0, 0, 1, 1)
self.splitter = QSplitter(self.sourceTab)
self.splitter.setOrientation(QtCore.Qt.Horizontal)
self.splitter.setObjectName("splitter")
self.treeView = QTreeWidget(self.splitter)
self.treeView.setColumnCount(1)
self.treeView.setObjectName("treeView")
self.treeView.headerItem().setText(0, "Source")
self.treeView.header().setVisible(False)
self.treeView.header().setDefaultSectionSize(200)
self.treeView.header().setStretchLastSection(True)
self.sourceTable = QTableWidget(self.splitter)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sourceTable.sizePolicy().hasHeightForWidth())
self.sourceTable.setSizePolicy(sizePolicy)
self.sourceTable.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.sourceTable.setAlternatingRowColors(True)
self.sourceTable.setSelectionMode(QAbstractItemView.MultiSelection)
self.sourceTable.setShowGrid(False)
self.sourceTable.setColumnCount(1)
self.sourceTable.setObjectName("sourceTable")
self.sourceTable.setColumnCount(1)
self.sourceTable.setRowCount(0)
self.sourceTable.horizontalHeader().setVisible(False)
self.sourceTable.horizontalHeader().setDefaultSectionSize(200)
self.sourceTable.horizontalHeader().setStretchLastSection(False)
self.sourceTable.verticalHeader().setVisible(False)
self.sourceTable.verticalHeader().setDefaultSectionSize(20)
self.gridLayout_3.addWidget(self.splitter, 1, 0, 1, 1)
self.tabWidget.addTab(self.sourceTab, "")
self.dataTab = QWidget()
self.dataTab.setObjectName("dataTab")
self.gridLayout_5 = QGridLayout(self.dataTab)
self.gridLayout_5.setObjectName("gridLayout_5")
self.splitter_2 = QSplitter(self.dataTab)
self.splitter_2.setOrientation(QtCore.Qt.Vertical)
self.splitter_2.setObjectName("splitter_2")
self.dataTable = QTableWidget(self.splitter_2)
self.dataTable.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.dataTable.setAlternatingRowColors(True)
self.dataTable.setSelectionMode(QAbstractItemView.MultiSelection)
self.dataTable.setSelectionBehavior(QAbstractItemView.SelectRows)
self.dataTable.setShowGrid(False)
self.dataTable.setGridStyle(QtCore.Qt.SolidLine)
self.dataTable.setObjectName("dataTable")
self.dataTable.setColumnCount(4)
self.dataTable.setRowCount(0)
item = QTableWidgetItem()
self.dataTable.setHorizontalHeaderItem(0, item)
item = QTableWidgetItem()
self.dataTable.setHorizontalHeaderItem(1, item)
item = QTableWidgetItem()
self.dataTable.setHorizontalHeaderItem(2, item)
item = QTableWidgetItem()
self.dataTable.setHorizontalHeaderItem(3, item)
self.dataTable.horizontalHeader().setStretchLastSection(True)
self.dataTable.verticalHeader().setDefaultSectionSize(20)
self.textOutput = QTextEdit(self.splitter_2)
self.textOutput.setReadOnly(True)
self.textOutput.setObjectName("textOutput")
self.layoutWidget = QWidget(self.splitter_2)
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout_4 = QGridLayout(self.layoutWidget)
self.gridLayout_4.setContentsMargins(0, 0, 0, 0)
self.gridLayout_4.setObjectName("gridLayout_4")
self.label_3 = QLabel(self.layoutWidget)
self.label_3.setObjectName("label_3")
self.gridLayout_4.addWidget(self.label_3, 0, 0, 1, 1)
self.commandInput = ConsoleWidget(self.layoutWidget)
self.commandInput.setObjectName("commandInput")
self.gridLayout_4.addWidget(self.commandInput, 0, 1, 1, 1)
self.commandButton = QPushButton(self.layoutWidget)
self.commandButton.setObjectName("commandButton")
self.gridLayout_4.addWidget(self.commandButton, 0, 2, 1, 1)
self.gridLayout_5.addWidget(self.splitter_2, 0, 0, 1, 1)
self.tabWidget.addTab(self.dataTab, "")
self.plotTab = QWidget()
self.plotTab.setObjectName("plotTab")
self.tabWidget.addTab(self.plotTab, "")
self.gridLayout_2.addWidget(self.tabWidget, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 803, 19))
self.menubar.setObjectName("menubar")
self.menuFile = QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuAdd_source = QMenu(self.menuFile)
self.menuAdd_source.setObjectName("menuAdd_source")
self.menuPlot = QMenu(self.menubar)
self.menuPlot.setObjectName("menuPlot")
self.menuCommand = QMenu(self.menubar)
self.menuCommand.setObjectName("menuCommand")
self.menuHelp = QMenu(self.menubar)
self.menuHelp.setObjectName("menuHelp")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionLoadState = QAction(MainWindow)
self.actionLoadState.setObjectName("actionLoadState")
self.actionSaveState = QAction(MainWindow)
self.actionSaveState.setObjectName("actionSaveState")
self.actionExit = QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionXPAD_tree = QAction(MainWindow)
self.actionXPAD_tree.setObjectName("actionXPAD_tree")
self.actionNetCDF_file = QAction(MainWindow)
self.actionNetCDF_file.setObjectName("actionNetCDF_file")
self.actionPlot = QAction(MainWindow)
self.actionPlot.setObjectName("actionPlot")
self.actionOPlot = QAction(MainWindow)
self.actionOPlot.setObjectName("actionOPlot")
self.actionMPlot = QAction(MainWindow)
self.actionMPlot.setObjectName("actionMPlot")
self.actionXYPlot = QAction(MainWindow)
self.actionXYPlot.setObjectName("actionXYPlot")
self.actionZPlot = QAction(MainWindow)
self.actionZPlot.setObjectName("actionZPlot")
self.actionBOUT_data = QAction(MainWindow)
self.actionBOUT_data.setObjectName("actionBOUT_data")
self.actionContour = QAction(MainWindow)
self.actionContour.setObjectName("actionContour")
self.actionContour_filled = QAction(MainWindow)
self.actionContour_filled.setObjectName("actionContour_filled")
self.actionClearFig = QAction(MainWindow)
self.actionClearFig.setObjectName("actionClearFig")
self.actionWrite_ASCII = QAction(MainWindow)
self.actionWrite_ASCII.setObjectName("actionWrite_ASCII")
self.actionAdd = QAction(MainWindow)
self.actionAdd.setObjectName("actionAdd")
self.actionSubtract = QAction(MainWindow)
self.actionSubtract.setObjectName("actionSubtract")
self.actionMultiply = QAction(MainWindow)
self.actionMultiply.setObjectName("actionMultiply")
self.actionDivide = QAction(MainWindow)
self.actionDivide.setObjectName("actionDivide")
self.actionChop = QAction(MainWindow)
self.actionChop.setObjectName("actionChop")
self.actionIntegrate = QAction(MainWindow)
self.actionIntegrate.setObjectName("actionIntegrate")
self.actionDf_dt = QAction(MainWindow)
self.actionDf_dt.setObjectName("actionDf_dt")
self.actionSmooth = QAction(MainWindow)
self.actionSmooth.setObjectName("actionSmooth")
self.actionLow_pass_filter = QAction(MainWindow)
self.actionLow_pass_filter.setObjectName("actionLow_pass_filter")
self.actionHigh_pass_filter = QAction(MainWindow)
self.actionHigh_pass_filter.setObjectName("actionHigh_pass_filter")
self.actionBand_pass_filter = QAction(MainWindow)
self.actionBand_pass_filter.setObjectName("actionBand_pass_filter")
self.actionFFTP = QAction(MainWindow)
self.actionFFTP.setObjectName("actionFFTP")
self.actionRunFFT = QAction(MainWindow)
self.actionRunFFT.setObjectName("actionRunFFT")
self.actionReciprocal = QAction(MainWindow)
self.actionReciprocal.setObjectName("actionReciprocal")
self.actionExponential = QAction(MainWindow)
self.actionExponential.setObjectName("actionExponential")
self.actionAbsolute = QAction(MainWindow)
self.actionAbsolute.setObjectName("actionAbsolute")
self.actionArctan = QAction(MainWindow)
self.actionArctan.setObjectName("actionArctan")
self.actionNlog = QAction(MainWindow)
self.actionNlog.setObjectName("actionNlog")
self.actionNorm = QAction(MainWindow)
self.actionNorm.setObjectName("actionNorm")
self.actionInvert = QAction(MainWindow)
self.actionInvert.setObjectName("actionInvert")
self.actionAddCon = QAction(MainWindow)
self.actionAddCon.setObjectName("actionAddCon")
self.actionSubCon = QAction(MainWindow)
self.actionSubCon.setObjectName("actionSubCon")
self.actionMulCon = QAction(MainWindow)
self.actionMulCon.setObjectName("actionMulCon")
self.actionDivCon = QAction(MainWindow)
self.actionDivCon.setObjectName("actionDivCon")
self.actionPowCon = QAction(MainWindow)
self.actionPowCon.setObjectName("actionPowCon")
self.actionChangeName = QAction(MainWindow)
self.actionChangeName.setObjectName("actionChangeName")
self.actionChangeUnits = QAction(MainWindow)
self.actionChangeUnits.setObjectName("actionChangeUnits")
self.actionClip = QAction(MainWindow)
self.actionClip.setObjectName("actionClip")
self.actionStats = QAction(MainWindow)
self.actionStats.setObjectName("actionStats")
self.actionTimeOff = QAction(MainWindow)
self.actionTimeOff.setObjectName("actionTimeOff")
self.actionAbout = QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.actionDeleteTrace = QAction(MainWindow)
self.actionDeleteTrace.setObjectName("actionDeleteTrace")
self.menuAdd_source.addAction(self.actionNetCDF_file)
self.menuAdd_source.addAction(self.actionXPAD_tree)
self.menuAdd_source.addAction(self.actionBOUT_data)
self.menuFile.addAction(self.menuAdd_source.menuAction())
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionLoadState)
self.menuFile.addAction(self.actionSaveState)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionWrite_ASCII)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionExit)
self.menuPlot.addAction(self.actionPlot)
self.menuPlot.addAction(self.actionOPlot)
self.menuPlot.addAction(self.actionMPlot)
self.menuPlot.addAction(self.actionZPlot)
self.menuPlot.addAction(self.actionXYPlot)
self.menuPlot.addAction(self.actionContour)
self.menuPlot.addAction(self.actionContour_filled)
self.menuPlot.addAction(self.actionClearFig)
self.menuCommand.addAction(self.actionDeleteTrace)
self.menuCommand.addSeparator()
self.menuCommand.addAction(self.actionChop)
self.menuCommand.addSeparator()
self.menuCommand.addAction(self.actionIntegrate)
self.menuCommand.addAction(self.actionDf_dt)
self.menuCommand.addAction(self.actionSmooth)
self.menuCommand.addAction(self.actionBand_pass_filter)
self.menuCommand.addSeparator()
self.menuCommand.addAction(self.actionAdd)
self.menuCommand.addAction(self.actionSubtract)
self.menuCommand.addAction(self.actionMultiply)
self.menuCommand.addAction(self.actionDivide)
self.menuCommand.addSeparator()
self.menuCommand.addAction(self.actionFFTP)
self.menuCommand.addAction(self.actionRunFFT)
self.menuCommand.addSeparator()
self.menuCommand.addAction(self.actionReciprocal)
self.menuCommand.addAction(self.actionExponential)
self.menuCommand.addAction(self.actionAbsolute)
self.menuCommand.addAction(self.actionArctan)
self.menuCommand.addAction(self.actionNlog)
self.menuCommand.addAction(self.actionNorm)
self.menuCommand.addAction(self.actionInvert)
self.menuCommand.addAction(self.actionAddCon)
self.menuCommand.addAction(self.actionSubCon)
self.menuCommand.addAction(self.actionMulCon)
self.menuCommand.addAction(self.actionDivCon)
self.menuCommand.addAction(self.actionPowCon)
self.menuCommand.addAction(self.actionChangeName)
self.menuCommand.addAction(self.actionChangeUnits)
self.menuCommand.addAction(self.actionClip)
self.menuCommand.addAction(self.actionStats)
self.menuCommand.addAction(self.actionTimeOff)
self.menuHelp.addAction(self.actionAbout)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuPlot.menuAction())
self.menubar.addAction(self.menuCommand.menuAction())
self.menubar.addAction(self.menuHelp.menuAction())
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QApplication.translate("MainWindow", "PyXPad", None, UnicodeUTF8))
self.sourceDescription.setText(QApplication.translate("MainWindow", "Description", None, UnicodeUTF8))
self.shotLabel.setText(QApplication.translate("MainWindow", "Shot:", None, UnicodeUTF8))
self.readDataButton.setText(QApplication.translate("MainWindow", "&Read", None, UnicodeUTF8))
self.traceLabel.setText(QApplication.translate("MainWindow", "Trace:", None, UnicodeUTF8))
self.lastShotButton.setToolTip(QApplication.translate("MainWindow", "Get last shot number", None, UnicodeUTF8))
self.lastShotButton.setText(QApplication.translate("MainWindow", "&Last shot", None, UnicodeUTF8))
self.sourceTable.setToolTip(QApplication.translate("MainWindow", "List of available signals", None, UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.sourceTab), QApplication.translate("MainWindow", "&Sources", None, UnicodeUTF8))
self.dataTable.horizontalHeaderItem(0).setText(QApplication.translate("MainWindow", "Name", None, UnicodeUTF8))
self.dataTable.horizontalHeaderItem(1).setText(QApplication.translate("MainWindow", "Source", None, UnicodeUTF8))
self.dataTable.horizontalHeaderItem(2).setText(QApplication.translate("MainWindow", "Trace", None, UnicodeUTF8))
self.dataTable.horizontalHeaderItem(3).setText(QApplication.translate("MainWindow", "Comments", None, UnicodeUTF8))
self.label_3.setText(QApplication.translate("MainWindow", "Command:", None, UnicodeUTF8))
self.commandButton.setText(QApplication.translate("MainWindow", "Run", None, UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.dataTab), QApplication.translate("MainWindow", "&Data", None, UnicodeUTF8))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.plotTab), QApplication.translate("MainWindow", "&Plot", None, UnicodeUTF8))
self.menuFile.setTitle(QApplication.translate("MainWindow", "&File", None, UnicodeUTF8))
self.menuAdd_source.setTitle(QApplication.translate("MainWindow", "&Add source", None, UnicodeUTF8))
self.menuPlot.setTitle(QApplication.translate("MainWindow", "&Graphics", None, UnicodeUTF8))
self.menuCommand.setTitle(QApplication.translate("MainWindow", "&Command", None, UnicodeUTF8))
self.menuHelp.setTitle(QApplication.translate("MainWindow", "&Help", None, UnicodeUTF8))
self.actionLoadState.setText(QApplication.translate("MainWindow", "&Load state", None, UnicodeUTF8))
self.actionSaveState.setText(QApplication.translate("MainWindow", "&Save state", None, UnicodeUTF8))
self.actionExit.setText(QApplication.translate("MainWindow", "E&xit", None, UnicodeUTF8))
self.actionExit.setToolTip(QApplication.translate("MainWindow", "Exit pyXpad", None, UnicodeUTF8))
self.actionExit.setShortcut(QApplication.translate("MainWindow", "Ctrl+Q", None, UnicodeUTF8))
self.actionXPAD_tree.setText(QApplication.translate("MainWindow", "XPAD tree", None, UnicodeUTF8))
self.actionXPAD_tree.setToolTip(QApplication.translate("MainWindow", "Load a tree of XPAD items", None, UnicodeUTF8))
self.actionNetCDF_file.setText(QApplication.translate("MainWindow", "NetCDF file", None, UnicodeUTF8))
self.actionPlot.setText(QApplication.translate("MainWindow", "&Plot", None, UnicodeUTF8))
self.actionOPlot.setText(QApplication.translate("MainWindow", "&OPlot", None, UnicodeUTF8))
self.actionMPlot.setText(QApplication.translate("MainWindow", "&MPlot", None, UnicodeUTF8))
self.actionXYPlot.setText(QApplication.translate("MainWindow", "&XYPlot", None, UnicodeUTF8))
self.actionZPlot.setText(QApplication.translate("MainWindow", "&ZPlot", None, UnicodeUTF8))
self.actionBOUT_data.setText(QApplication.translate("MainWindow", "BOUT++ data", None, UnicodeUTF8))
self.actionBOUT_data.setToolTip(QApplication.translate("MainWindow", "Read BOUT++ data directory", None, UnicodeUTF8))
self.actionBOUT_data.setShortcut(QApplication.translate("MainWindow", "Ctrl+S", None, UnicodeUTF8))
self.actionContour.setText(QApplication.translate("MainWindow", "&Contour", None, UnicodeUTF8))
self.actionContour_filled.setText(QApplication.translate("MainWindow", "Contour &filled", None, UnicodeUTF8))
self.actionClearFig.setText(QApplication.translate("MainWindow", "C&lear Figure", None, UnicodeUTF8))
self.actionWrite_ASCII.setText(QApplication.translate("MainWindow", "&Write ASCII", None, UnicodeUTF8))
self.actionAdd.setText(QApplication.translate("MainWindow", "X+Y (Sum Channels)", None, UnicodeUTF8))
self.actionSubtract.setText(QApplication.translate("MainWindow", "X-Y", None, UnicodeUTF8))
self.actionMultiply.setText(QApplication.translate("MainWindow", "X*Y", None, UnicodeUTF8))
self.actionDivide.setText(QApplication.translate("MainWindow", "X/Y", None, UnicodeUTF8))
self.actionChop.setText(QApplication.translate("MainWindow", "Chop", None, UnicodeUTF8))
self.actionIntegrate.setText(QApplication.translate("MainWindow", "Integrate", None, UnicodeUTF8))
self.actionDf_dt.setText(QApplication.translate("MainWindow", "df/dt", None, UnicodeUTF8))
self.actionSmooth.setText(QApplication.translate("MainWindow", "Smooth", None, UnicodeUTF8))
self.actionLow_pass_filter.setText(QApplication.translate("MainWindow", "Low pass filter", None, UnicodeUTF8))
self.actionHigh_pass_filter.setText(QApplication.translate("MainWindow", "High pass filter", None, UnicodeUTF8))
self.actionBand_pass_filter.setText(QApplication.translate("MainWindow", "Band pass filter", None, UnicodeUTF8))
self.actionFFTP.setText(QApplication.translate("MainWindow", "FFTP", None, UnicodeUTF8))
self.actionRunFFT.setText(QApplication.translate("MainWindow", "Running FFT", None, UnicodeUTF8))
self.actionReciprocal.setText(QApplication.translate("MainWindow", "1/X", None, UnicodeUTF8))
self.actionExponential.setText(QApplication.translate("MainWindow", "exp", None, UnicodeUTF8))
self.actionAbsolute.setText(QApplication.translate("MainWindow", "abs", None, UnicodeUTF8))
self.actionArctan.setText(QApplication.translate("MainWindow", "arctan", None, UnicodeUTF8))
self.actionNlog.setText(QApplication.translate("MainWindow", "ln", None, UnicodeUTF8))
self.actionNorm.setText(QApplication.translate("MainWindow", "Normalise", None, UnicodeUTF8))
self.actionInvert.setText(QApplication.translate("MainWindow", "Invert", None, UnicodeUTF8))
self.actionAddCon.setText(QApplication.translate("MainWindow", "X+C", None, UnicodeUTF8))
self.actionSubCon.setText(QApplication.translate("MainWindow", "X-C", None, UnicodeUTF8))
self.actionMulCon.setText(QApplication.translate("MainWindow", "X*C", None, UnicodeUTF8))
self.actionDivCon.setText(QApplication.translate("MainWindow", "X/C", None, UnicodeUTF8))
self.actionPowCon.setText(QApplication.translate("MainWindow", "X^C", None, UnicodeUTF8))
self.actionChangeName.setText(QApplication.translate("MainWindow", "Change Name", None, UnicodeUTF8))
self.actionChangeUnits.setText(QApplication.translate("MainWindow", "Change Units", None, UnicodeUTF8))
self.actionClip.setText(QApplication.translate("MainWindow", "Clip", None, UnicodeUTF8))
self.actionStats.setText(QApplication.translate("MainWindow", "Statistics", None, UnicodeUTF8))
self.actionTimeOff.setText(QApplication.translate("MainWindow", "Time Offset", None, UnicodeUTF8))
self.actionAbout.setText(QApplication.translate("MainWindow", "&About", None, UnicodeUTF8))
self.actionDeleteTrace.setText(QApplication.translate("MainWindow", "&Delete Trace", None, UnicodeUTF8))
from .console_widget import ConsoleWidget
|
bendudson/pyxpad
|
pyxpad/pyxpad_main.py
|
Python
|
gpl-3.0
| 24,180
|
[
"NetCDF"
] |
92aaa3bd6713b1d9b6784c5ca1221a171980e0c54b9dcb7ba6cf87d9253e216d
|
from netconfig.drivers import Prompt, Port, Ports, Config, Model, Firmware, Layer1, Vlan, SpanningTree, MacAddress, Layer2, Routes, Arps, Layer3, Transceiver, FRU, Password, Users, System, Device, PortChannels, Module, Stats, Rfc2863
from slac_utils.net import prefixlen_to_netmask, truncate_physical_port, netmask_to_prefixlen, to_ip
from re import compile, match, search, sub, DOTALL, finditer
from netconfig.drivers import DeviceException, IncompatibleDeviceException
from os import path
import logging
#######################################################################
# Cisco IOS Switch
#######################################################################
class PromptCiscoIos( Prompt ):
mode = {
'exec': "\>",
'enable': "\#",
'config': '\(config\)#',
'config-if': "\(config-(sub)?if\)#",
'config-vlan': '\((config-)?vlan\)#',
'config-line': '\(config-line\)\#',
'config-sync-pre': '\(config-sync\)#',
'config-sync': '\(config-sync-sp\)#',
'config-sync-if': "\(config-sync-sp-if\)#",
}
interact = {
'enable_password': 'Password: ',
'pager' : ' --More-- $', # pager (press space for more)
'question' : "\[.*\](\:|\?) $",
#'yes_no' : "(\? \[yes\/no\]\:|\(y/n\)\?\[\w\]) $",
'yes_no' : "(\? \[yes\/no\]\:|\(y/n\)(\?\[\w\])?) $",
'config_modified': 'System configuration has been modified\. Save\? \[yes\/no\]:',
'reload': 'Proceed with reload\? \[confirm\]',
'confirm': ' \[confirm?\]',
'default_answer': " considered\):",
}
error = {
'command': "Invalid command at",
'input': "Invalid input detected at",
'incomplete': "% Incomplete command",
'denied': '% Access denied',
'authorization': '(Error: AAA authorization failed|cmd not authorized: this incident has been reported|Command authorization failed)',
'rejected: ': 'Command rejected:.*$'
}
wizard_responses = {
r'Would you like to enter the initial configuration dialog\? \[yes/no\]:': 'no',
}
def terminal_buffer( self, size=0 ):
return self.ask('terminal length %s' % (size,))
def mode_exec( self ):
n = 3
while n > 0:
if self.current_cursor == self.cursor( 'mode', 'exec' ):
return True
elif self.current_cursor == self.cursor( 'mode', 'enable' ):
self.ask('disable')
elif self.current_cursor in ( self.cursor('mode', 'config'), self.cursor('mode','config-if'), self.cursor('mode','config-vlan') ):
# ios switches can stall the prompt sometimes after runnign end
self.ask('end', timeout=self.prompt.timeouts['long'] )
else:
logging.debug(' trying to move to %s from current prompt %s' % ( self.cursor('mode','exec'), self.current_cursor) )
n = n - 1
logging.debug("could not change from cursor %s to mode exec" % (self.current_cursor) )
return False
def mode_enable( self ):
n = 4
logging.debug('attempting to get into enable mode')
while n > 0:
logging.debug(' current prompt ' + str(self.current_cursor))
if self.current_cursor == self.cursor( 'mode', 'exec' ):
self.new_line()
res = self.ask( 'enable', preempt=False )
elif self.current_cursor == self.cursor( 'interact', 'enable_password' ):
logging.debug(" sending enable password")
res = self.ask( self.connector.enable_password, preempt=False, suppress_command_output=True )
elif self.current_cursor in ( self.cursor('mode','config'), self.cursor('mode','config-if' ), self.cursor('mode','config-vlan') ):
res = self.ask( 'end' )
elif self.current_cursor == self.cursor( 'mode', 'enable' ):
return True
elif self.current_cursor == self.cursor( 'error', 'denied' ):
logging.error("access denied")
return False
else:
logging.debug('trying to move to ' + self.cursor('mode','enable') + ' from current prompt ' + self.current_cursor)
self.ask('exit')
n = n - 1
logging.warn("could not change from cursor " + str(self.current_cursor) + ' to mode enable')
return False
def mode_config( self ):
# logging.warn("entering mode config")
res = self.request( 'configure terminal', cursor=self.cursor('mode','enable') )
if self.current_cursor == self.cursor('mode','config'):
return True
return False
class PortsCiscoIos( Ports ):
ports = {}
int_status_fields = ( 'port', 'alias', 'status', 'vlan', 'duplex', 'speed', None )
int_status_port_types_ignore = ( 'SFP', 'Transceiver', 'Present', 'Connector', 'X2' )
int_status_exclude = [ 'Port Name Status', '------- ------------------ ------------ -------- ------ ------- ----' ]
# globals
lldp = None
cdp = None
show_run_threshold = 8
show_run_regexes = [
r'^\s*interface (?P<port>.*)\s*$',
r'^\s+description (?P<alias>.*)\s*$',
r'^\s+switchport access vlan (?P<access_vlan>\d+)\s*$',
r'^\s+switchport trunk allowed vlan add (?P<trunked_vlans_additional>[\d\,\-]+)\s*$', # TODO: add across multi
r'^\s+switchport trunk allowed vlan (?P<trunked_vlans>[\d\,\-]+)\s*$', #
r'^\s+switchport trunk native vlan (?P<native_vlan>\d+)$',
# TODO: deal with notag on native vlan?
# r'^\s+no switchport trunk native vlan (?P<native_vlan>tag)$',
r'^\s+switchport (?P<type>private-vlan host)\-association\s+(?P<private_vlan_one>\d+)\s(?P<private_vlan_two>\d+)\s*$',
r'^\s+switchport mode (?P<type>.*)\s*$',
r'^\s+switchport (?P<portnoneg>nonegotiate)',
r'^\s+switchport voice vlan (?P<voice_vlan>\d+)$',
r'^\s+duplex (?P<duplex>.*)$',
r'^\s+speed (?P<speed>.*)$',
r'^\s+(?P<shutdown>shutdown)',
r'^\s+(?P<port_security>switchport port-security)\s*$',
r'^\s+switchport port-security maximum (?P<port_security_max>\d+)',
r'^\s+switchport port-security aging time (?P<port_security_aging_time>\d+)',
r'^\s+switchport port-security violation (?P<port_security_violation>\w+)',
r'^\s+switchport port-security aging type (?P<port_security_aging_type>\w+)',
r'^\s+logging event (?P<logging_event_link_status>link-status)',
r'^\s+logging event (?P<logging_event_trunk_status>trunk-status)',
r'^\s+logging event (?P<logging_event_bundle_status>bundle-status)',
# TODO: check against global?
r'^\s+(?P<cdp>no cdp enable)',
# TODO: lldp? check against global
r'^\s+(?P<no_lldp_transmit>no lldp transmit)',
r'^\s+(?P<no_lldp_receive>no lldp receive)',
r'^\s+spanning-tree (?P<noportfast>portfast) disable',
r'^\s+spanning-tree (?P<portfast>portfast)$',
r'^\s+spanning-tree (?P<nobpduguard>bpduguard) disable',
# TODO: 'spanning-tree portfast' on port
r'^\s+storm-control broadcast level (?P<storm_control_broadcast>\S+)$',
r'^\s+storm-control multicast level (?P<storm_control_multicast>\S+)$',
r'^\s+ip dhcp snooping (?P<dhcp_snooping>\S+)\s*$',
]
def initiate(self):
# set global port values like cdp and lldp
self.lldp = True if self.prompt.ask('show lldp', fail_okay=True ) else False
self.cdp = None
try:
self.cdp = True if self.prompt.ask('show cdp', fail_okay=False ) else False
except:
# stupid nexus's
try:
self.cdp = True if self.prompt.ask('show cdp global') else False
except:
pass
self.prompt.ask('')
# logging.debug("LLDP: %s" % self.lldp)
def _int_run( self, port=None ):
# filter if necessary
search_cmd = 'show running-config'
if not port == None:
if isinstance( port, dict ):
logging.warn("DONT RUN WITH THIS AS ARG: %s" % (port,))
search_cmd = '%s interface %s' % ( search_cmd, port['port'] )
else:
search_cmd = '%s interface %s' % ( search_cmd, port )
try:
for d in self.prompt.tell_and_match_block( search_cmd, self.show_run_regexes, timeout=self.prompt.timeouts['long'] ):
d['portnoneg'] = True if 'portnoneg' in d else False
if 'type' in d:
d['type'] = d['type'].lower()
# vlans
if 'type' in d:
if d['type'] == 'access' \
and 'access_vlan' in d:
d['vlan'] = [ d['access_vlan'] ]
del d['access_vlan']
elif d['type'] == 'trunk':
if 'trunked_vlans' in d:
d['vlan'] = []
for i in d['trunked_vlans'].split(','):
g = match( r'^(?P<start>\d+)\-(?P<end>\d+)$', i )
if g:
s,e = g.group('start','end')
for n in xrange( int(s), int(e)+1 ):
# vlans['trunk'][str(n)] = True
d['vlan'].append( str(n) )
else:
# vlans['trunk'][i] = True
d['vlan'].append( str(i) )
del d['trunked_vlans']
if 'trunked_vlans_additional' in d:
for i in d['trunked_vlans_additional'].split(','):
g = match( r'^(?P<start>\d+)\-(?P<end>\d+)$', i )
if g:
s,e = g.group('start','end')
for n in xrange( int(s), int(e)+1 ):
# vlans['trunk'][str(n)] = True
d['vlan'].append( str(n) )
else:
# vlans['trunk'][i] = True
d['vlan'].append( str(i) )
del d['trunked_vlans_additional']
# if no native-vlan, assume 1
if not 'native_vlan' in d:
d['native_vlan'] = 1
elif d['type'] == 'private-vlan host' \
and 'private_vlan_one' in d and 'private_vlan_two' in d:
# logging.debug(" private vlan! %s %s " % (d['private_vlan_one'], d['private_vlan_two']))
# d['type'] = 'private-vlan host'
d['vlan'] = [ d['private_vlan_one'], d['private_vlan_two'] ]
del d['private_vlan_one']
del d['private_vlan_two']
if d['type'] in ( 'fex-fabric', 'routed' ):
# clear trunked_vlans
del d['trunked_vlans']
if 'shutdown' in d:
d['state'] = False
del d['shutdown']
if 'speed' in d:
# if not d['speed'] == 'nonegotiate':
if d['speed'].startswith('auto'):
s = d['speed'].replace( 'auto ', '')
d['speed'] = s.replace(' ', ',')
if 'autoneg' in d:
if d['autoneg'] == None:
del d['autoneg']
else:
d['autoneg'] = True
# TOOD: globals?
d['cdp'] = False if 'cdp' in d else self.cdp
# lldp
d['lldp'] = {}
for i in ( 'no_lldp_transmit', 'no_lldp_receive' ):
a,b,c = i.split('_')
d[b][c] = False if i in d else self.lldp
if i in d:
del d[i]
# spanningtree stuff: TODO: globals
if 'portfast' in d:
d['portfast'] = True
else:
d['portfast'] = False if 'noportfast' in d else True
if 'noportfast' in d:
del d['noportfast']
# bdpuguard
d['bpduguard'] = False if 'nobpduguard' in d else True
if 'nobpduguard' in d:
del d['nobpduguard']
# voice stuff
d['voice_vlan'] = int(d['voice_vlan']) if 'voice_vlan' in d else False
# alias
if not 'alias' in d:
d['alias'] = ''
d['logging'] = {}
for i in ( 'logging_event_bundle_status', 'logging_event_trunk_status', 'logging_event_link_status' ):
j = i.replace( 'logging_event_', '' ).replace('_','-')
if i in d:
d['logging'][j] = True
del d[i]
else:
d['logging'][j] = False
# 3550's don't report link-status... what to do?
d['security'] = {}
for i in ( 'port_security_max', 'port_security_aging_time', 'port_security_violation', 'port_security_aging_type' ):
if i in d:
j = i.replace('port_security_','').replace('_','-')
d['security'][j] = d[i]
del d[i]
if not 'port_security' in d:
d['security'] = False
if d['security']:
# set default values
if not 'max' in d['security'] and not 'port_security_max' in d:
d['security']['max'] = 1
d['storm-control'] = {}
for i in ( 'storm_control_multicast', 'storm_control_broadcast', 'storm_control_unicast' ):
j = i.replace('storm_control_','')
if i in d:
d['storm-control'][j] = d[i]
del d[i]
else:
d['storm-control'][j] = False
# yield!
if 'port' in d:
d['port'] = truncate_physical_port( d['port'] )
logging.debug(" >: show run output %s" % (d,))
yield d
except Exception,e:
logging.error("ERR: (%s) %s" % (type(e),e))
return
def _int_status_validate( self, info, k, v ):
if k == 'vlan':
logging.debug(" validating %s as %s: %s" % (k,v,info) )
if v == 'trunk':
info['vlan'] = []
info['type'] = 'trunk'
elif v == 'routed':
info['vlan'] = []
info['type'] = 'routed'
elif v == 'unassigned':
info['vlan'] = []
info['type'] = 'unassigned'
else:
a = v.split(',')
# logging.debug(' vlans: %s' % (a,))
info['vlan'] = [ int(v) for v in a ]
info['type'] = 'access'
else:
info[k] = v
# logging.debug(" set to " + str(info))
return info
def _int_status( self, filter=None, ignore_case=True ):
""" parse ports using show int status """
this_f = [ f for f in self.int_status_fields ]
n = 0
# format the query line for the ports
search_cmd = 'show int status '
if not filter == None:
if ignore_case:
# do some regexp to avoid case sensitivity
regexp = ''
for s in filter:
bit = s
if s.isalpha():
bit = '[' + s.lower() + s.upper() + ']'
regexp = regexp + bit
filter = regexp
search_cmd = search_cmd + ' | inc ' + str(filter)
# run and parse
for l in self.prompt.tell( search_cmd, cursor=[ self.prompt.cursor('mode', 'enable'), self.prompt.cursor('mode','exec')], timeout=self.prompt.timeouts['medium'] ):
# logging.debug(" =: '%s'" % self.int_status_exclude )
# logging.debug(" >: %s" % (l) )
ignore = False
if not self.int_status_exclude == None:
excludes = self.int_status_exclude
if not isinstance( self.int_status_exclude, list ):
excludes = [ self.int_status_exclude ]
for e in excludes:
if match( sub(r'\s+','',e), sub(r'\s+','',l) ):
ignore = True
if ignore:
logging.debug(" skipping... %s" % (l,))
continue;
# use assume that 'alias' may have spaces, so splitting just by spaces will not work
# so we scan through the arrays, and if we see an alias, we then work backwards
info = Port()
d = compile("\s{1,}").split(l.strip())
# logging.debug(" d: " + str(len(d)))
if len(d) < 2 or match(r'^\s+$',l):
continue
# logging.debug(" items " + str(len(d)) + ": " + str(d))
# copy fields as we need to use these as index
f = [ i for i in this_f ]
# go through datalist and compare to field list
for n in xrange(0,len(this_f)):
i = this_f[n]
# logging.debug(" n: " + str(n) + ", i: " + str(i) )
# alias field may have spaces, so we always consider that one last
if i == 'alias':
# logging.debug(" found alias")
break
a = f.pop(0)
b = d.pop(0)
if not a == None or not a == 'None':
# logging.debug(" parsed " + str(a) + ' as ' + str(b))
# info[a] = b
info = self._int_status_validate( info, a, b )
# reverse now
# if we have a port that does not have a 'type', then ignore from list
if b.startswith( 'Po' ) and not l.endswith('--'): # '--' = nexus
logging.debug(" found port without a type")
d.append('')
for n in xrange(len(f), 0, -1):
i = this_f[n]
# logging.debug(" n: %s\ti: %s" %(n,i) )
if i == 'alias':
break
a = f.pop(-1)
b = d.pop(-1)
# deal with stupid no worded port type
if i == None and ( b in ( '10G', 'auto', '100', '10' ) or b.startswith( 'a-' ) ):
d.append( b )
# logging.debug(" a: " + str(a) + ", b: " + str(b) )
# deal with stupid two worded port type
if b in self.int_status_port_types_ignore:
b = d.pop(-1)
if not a == None or not a == 'None':
# logging.debug(" parsing %s as %s" %(a,b))
# info[a] = b
info = self._int_status_validate( info, a, b )
logging.debug(" >: " + str(info))
# add placehoders for other variables
if not 'autoneg' in info:
info['autoneg'] = True
if not 'speed' in self.int_status_fields:
info['speed'] = 'auto'
if not 'duplex' in self.int_status_fields:
info['duplex'] = 'auto'
if not 'type' in info:
info['type'] = None
if not 'vlan' in info:
info['vlan'] = 1
info['native_vlan'] = 1
# add alias
for i in f:
# logging.debug(" cleaning remaining items: " + str(i) + " (f="+str(len(f))+",d="+str(len(d))+") from " + str(d))
if len(d) == 0: info[i] = None
elif len(f) == 1 and i == 'alias': info['alias'] = ' '.join(d)
else:
logging.debug(" i=%s, d=%s" % (i,d))
info[i] = d.pop(-1)
# logging.debug(" cleaned")
logging.debug(" found port (int status): %s" %(info,))
yield info
return
def _normalise(self, info):
"""
iterates through the keys in dict info, and cleans up the info about the interface
valid keys for info dict are:
speed duplex autoneg state vlan alias
"""
# logging.debug('normalising ' + str(info))
for k,v in info.iteritems():
if not v == None and type(v) == str:
info[k] = info[k].strip()
if 'speed' in info and 'duplex' in info:
# logging.error("SPEED/DUPLEX %s %s" % (info['speed'],info['duplex']))
# determine autoneg of port
if match( '(^a-|^A-)', info['speed'] ) \
or match( '(^a-|^A-)', info['duplex'] ):
if not 'autoneg' in info:
info['autoneg'] = True
for i in [ 'speed', 'duplex' ]:
info[i] = info[i].replace( 'a-', '' )
# logging.debug( ' found true value of ' + str(i) + ' as ' + info[i] )
else:
if not 'autoneg' in info:
info['autoneg'] = False
elif not 'autoneg' in info:
info['autoneg'] = False
# deal with show int desc where we have a protocol up/down
if 'protocol' in info and 'state' in info:
# logging.error("PROTOCOL: " + str(info['protocol']))
if info['state'] == 'admin down':
info['state'] = False
else:
info['state'] = True
if info['protocol'] == 'up': info['protocol'] = True
elif info['protocol'] == 'down': info['protocol'] = False
else: info['protocol'] = None
# logging.debug(" found true value of state as " + str(info['state']))
# state
if 'state' in info:
if info['state'] == 'up':
info['state'] = True
elif info['state'] == 'down':
info['state'] == False
else:
info['state'] = None
if 'status' in info:
if info['status'] in ( 'connected', 'connect', 'up' ):
info['state'] = True
info['protocol'] = True
elif info['status'] in ( 'notconnect', 'notconnec' ):
info['state'] = True
info['protocol'] = False
elif info['status'] in ( 'disabled', 'disable', 'down' ):
info['state'] = False
info['protocol'] = False
elif info['status'] == 'err-disabled':
info['state'] = 'err-disabled'
info['protocol'] = False
elif info['status'] in ( 'sfpAbsent', 'xcvrInval', 'sfpInvali', 'adminCfgC', 'xcvrAbsen' ):
info['state'] = False
info['protocol'] = False
elif info['status'] in ( 'monitoring' ):
info['state'] = 'monitoring'
info['protocol'] = True
elif info['status'] in ( 'noOperMem', 'suspndByV', 'suspnd', 'channelDo' ):
info['state'] = 'suspended'
info['protocol'] = False
elif info['status'] in ( 'faulty' ):
info['state'] = 'faulty'
info['protocol'] = False
# BUG?
# elif info['status'] == 'routed':
# info['type'] = 'routed'
# info['protocol'] = False
else:
raise DeviceException, "do not know how to map port state '%s'" % (info['status'])
# check vlan
# TODO: check for numbers?
if 'vlan' in info and info['vlan'] == 'routed':
info['type'] = info['vlan']
info['vlan'] = None
# alias
if not 'alias' in info:
info['alias'] == None
elif info['alias'] == '':
info['alias'] == None
return info
def _match_vlan_name( self, number, vlans ):
number = int(number)
if number in vlans:
return vlans[number]['name']
else:
logging.debug("could not determine vlan name for vlan %s from %s " % (number,vlans,))
return None
def _merge_port_info( self, i, j, vlans ):
logging.debug(" merging: %s, %s" % (i,j) )
k = dict( i.items() + j.items() )
p = Port( self._normalise( k ) )
# update vlan info
if 'vlan' in p and not p['vlan'] == None:
v = []
# default is for vlan 1
for k in p['vlan']:
name = self._match_vlan_name( k, vlans )
if name:
v.append( name )
else:
v.append( str('unknown') )
p['vlan_name'] = v
else:
p['vlan_name'] = None
# native vlan name
if 'native_vlan' in p and not p['native_vlan'] == None:
p['native_vlan_name'] = self._match_vlan_name( p['native_vlan'], vlans )
# update voice vlan name
if 'voice_vlan' in p and p['voice_vlan']:
p['voice_vlan_name'] = self._match_vlan_name( p['voice_vlan'], vlans )
logging.debug(" merged: " + str(p))
return p
def _get( self, port=None, **kwargs ):
ports = []
# use int status to filter list
if port == '/':
port = None
for i in self._int_status( filter=port ):
ports.append( i )
# if there is already a match, then use that
if len(ports) > 0 and not port == None:
logging.debug("narrowing down exact matching ports")
found = []
for p in ports:
if p['port'].lower() == port.lower():
found.append( p )
if len(found) == 1:
ports = found
# vlans
system_vlans = dict(self.parent.layer2.vlan)
# get extra detail from show run
run_ports = {}
run_with = []
# do show run on all if too many ports
# logging.error("PORTS LEN: %s (%s)" % (ports,len(ports)))
if len(ports) > self.show_run_threshold:
run_with = [ None ]
else:
run_with = [ p['port'] for p in ports ]
for r in run_with:
# logging.debug("R: (%s) %s" % (type(r),r,))
for j in self._int_run( port=r ):
# logging.error("JPORT: %s, %s" % (j['port'], j))
run_ports[j['port']] = j
# yield
for p in ports:
k = p['port']
j = {}
if k in run_ports:
j = run_ports[k]
x = self._merge_port_info( p, j, system_vlans )
yield k, x
return
def get( self, port=None ):
if self.lldp == None or self.cdp == None:
self.initiate()
logging.debug("getting port " + str(port))
found = []
for p,i in self._get( port=port ):
found.append( i )
logging.debug(" found ports ("+str(len(found))+"): " + str(found))
if len( found ) == 0:
raise Exception, 'no ports found '+ str(port)
elif len( found ) == 1:
return found[0]
else:
# find exact match
for i in found:
if i['port'].lower() == port.lower():
logging.debug(" found single match %s" % (i,))
return i
raise Exception, 'not unique ' + str(port) + ", count: " + str(len(found))
def filter(self, string=None, **kwargs):
if self.lldp == None or self.cdp == None:
self.initiate()
for k,p in self._get( port=string ):
f = self._filter( p, **kwargs )
if f:
yield f
return
def enter_port_mode( self, port ):
# logging.warn('entering port mode')
logging.debug("current %s" % (self.prompt.current_cursor,))
if self.prompt.ask( 'interface ' + str(port), cursor=[ self.prompt.cursor('mode','config'), self.prompt.cursor('mode','config-if') ], output_wait=1.0 ):
logging.debug(" %s == %s" % (self.prompt.current_cursor, self.prompt.cursor('mode', 'config-if')))
if self.prompt.current_cursor == self.prompt.cursor('mode', 'config-if'):
return True
logging.debug('nope')
raise Exception, 'could not enter port config mode'
def stanza( self, port, **kwargs ):
if not 'port' in port:
raise SyntaxError, 'need port object'
if self.prompt.ask( 'interface %s' %(port['port']), cursor=( self.prompt.cursor('mode','config'), self.prompt.cursor('mode','config-if') ) ):
if self.prompt.current_cursor == self.prompt.cursor('mode', 'config-if'):
self._enter_prompt = self.prompt.current_cursor
return self.prompt
raise Exception, 'could not enter port config mode'
def set_alias( self, port, value, other, enter_port_mode=True ):
logging.debug("set alias %s to %s" % (port,value) )
cmd = None
if value == '' or value == None or value == True or value == False:
cmd = ' no description'
else:
cmd = ' description ' + str(value)
if not enter_port_mode or self.enter_port_mode( port ):
return self.prompt.ask( cmd )
return False
def set_state( self, port, value, other, enter_port_mode=True ):
logging.debug("set state %s to %s" % (port,value) )
cmd = None
if value == True:
cmd = ' no shut'
elif value == False:
cmd = ' shut'
else:
raise DeviceException, 'unknown port state %s' % (value)
return self.prompt.ask( cmd )
def set_autoneg( self, port, value, other, enter_port_mode=True ):
if value in ( True, 'auto' ):
if not enter_port_mode or self.enter_port_mode( port ):
return self.prompt.ask( ' no duplex' ) and self.prompt.ask( ' no speed' )
# should probably try to determine that a speed is defined or duplex
else:
if 'speed' in other:
self.set_speed( port, other['speed'], other, enter_port_mode=False )
else:
# assume current speed is what is required (only for active ports)
self.set_speed( port, '100', other, enter_port_mode=False )
if 'duplex' in other:
self.set_duplex( port, other['duplex'], other, enter_port_mode=False )
else:
self.set_duplex( port, 'full', other, enter_port_mode=False )
return True
def set_speed( self, port, value, other, enter_port_mode=True ):
cmd = None
if not value == None:
value = value.replace( ',', ' ' )
autoneg = False
if 'autoneg' in other:
autoneg = other['autoneg']
# logging.debug('SPEED: %s, autoneg: %s->%s (other %s)' %(value,'',autoneg,repr(other)))
if autoneg == True and value == None:
cmd = ' no speed'
elif autoneg == True and value:
cmd = ' speed auto ' + value
elif autoneg == False and value:
cmd = ' speed ' + str(value)
if not enter_port_mode or self.enter_port_mode( port ):
return self.prompt.ask( cmd )
return False
def set_duplex( self, port, value, other, enter_port_mode=True ):
cmd = ' duplex ' + str(value)
if value == 'auto' or value == None:
cmd = ' no duplex'
if not enter_port_mode or self.enter_port_mode( port ):
return self.prompt.ask( cmd )
return False
def set_type_clear( self, port, type, other, enter_port_mode=True ):
""" clear the switchport types """
logging.debug("clearing port type %s" % (type,))
if not type == 'private-vlan host':
self.prompt.ask( 'no switchport private-vlan host-association', fail_okay=True )
if not type == 'trunk':
self.prompt.ask("no switchport trunk native vlan") \
and self.prompt.ask("no switchport trunk allowed vlan") \
and self.prompt.ask('no switchport trunk native vlan tag', fail_okay=True )
if not type == 'access':
self.prompt.ask("no switch access vlan")
return True
def set_type_clear_post( self, port, type, other, enter_port_mode=True ):
""" clear the switchport types after initial commands """
logging.debug("clearing post port type %s" % (type,))
if not type == 'trunk':
self.prompt.ask("no switchport mode trunk", fail_okay=True)
self.prompt.ask("no switchport trunk encapsulation", fail_okay=True)
return True
def set_type_access( self, port, value, other, enter_port_mode=True ):
""" configure port for access on vlan value """
logging.debug("set type access: %s" %(value))
if len( value ) == 1:
# older ios devices dont like the switchport command
self.prompt.ask( ' switchport', fail_okay=True ) \
and self.prompt.ask( ' switch mode access' )
vlan = value[0]
logging.debug(" access vlan %s (%s)" % (vlan, value))
configured_vlans = dict(self.parent.layer2.vlan)
if vlan in configured_vlans:
return self.prompt.ask( ' switchport access vlan %s' %(vlan,) )
else:
raise DeviceException, 'vlan %s is not available on this device' % (vlan,)
raise DeviceException, "input format '%s' incorrect" % (value,)
def set_native_vlan( self, port, value, other, enter_port_mode=True ):
""" configure port for native vlan """
logging.debug("set native vlan: %s %s" %(type(value),value))
if isinstance( value, int ):
return self.prompt.ask( 'switchport trunk native vlan %s' % (value,) )
elif isinstance( value, str ):
vlan = None
for i,v in dict(self.parent.layer2.vlan).iteritems():
if 'name' in v and search( value, v['name'] ):
vlan = i
if vlan:
return self.prompt.ask( ' switchport trunk native vlan %s ' %(vlan,) )
else:
raise DeviceException, 'vlan %s is not available on this device' % (vlan,)
raise DeviceException, "input format '%s' incorrect" % (value,)
def set_type_trunk( self, port, value, other, enter_port_mode=True ):
""" set the list of vlans trunked on this port """
logging.debug("set type trunk: %s" %(value))
if len( value ):
configured_vlans = dict(self.parent.layer2.vlan)
cmd = ' switchport trunk allowed vlan %s' % ( ','.join([ str(i) for i in value ]) )
#logging.error(" TRUNK: %s" % (cmd,))
return True
# for v in value:
# if not v in configured_vlans:
# raise DeviceException, 'vlan %s is not available' % (v,)
# if self.enter_port_mode( port ):
# cmd = ' switchport trunk allowed vlan %s' % ( ','.join([ str(i) for i in value ]) )
# return self.prompt.ask( cmd )
# return False
raise DeviceException, 'cannot remove all vlans from trunk'
def set_type_privatevlan_host( self, port, value, other, enter_port_mode=True ):
""" set to private vlan """
if type(value) == unicode:
v = []
for i in value.split(','):
v.append( int(i) )
value = v
logging.debug('set private vlan: ' + str(value) + " ("+str(type(value))+")")
if len(value) == 2:
configured_vlans = dict(self.parent.layer2.vlan)
if value[0] in configured_vlans and value[1] in configured_vlans:
if self.enter_port_mode( port ):
return self.prompt.ask( ' no switchport noneg' ) \
and self.prompt.ask( ' no switchport access vlan' ) \
and self.prompt.ask( ' no switchport mode access' ) \
and self.prompt.ask( ' switchport private-vlan host-association ' + str(value[0]) + " " + str(value[1]) ) \
and self.prompt.ask( ' switchport mode private-vlan host' ) \
and self.prompt.ask( ' no cdp enable' )
return False
raise DeviceException, 'private vlans ' + str(value) + " does not exist"
raise DeviceException, 'vlan format incorrect ('+str(value)+')'
def set_voice_vlan( self, port, value, other, enter_port_mode=True ):
""" set the voice vlan """
logging.debug('set voice vlan: ' + str(value))
if value == False:
return self.prompt.ask( 'no switchport voice vlan' )
elif value == True:
# find vlan - assume ends in VOICE
for i,v in dict(self.parent.layer2.vlan).iteritems():
if 'name' in v and search( r'-VOICE$', v['name'] ):
value = i
return self.prompt.ask( 'switchport voice vlan %s' % (value,))
def set_portnoneg( self, port, value, other, enter_port_mode=True ):
""" set the port type negotiation """
logging.debug('set port neg ')
if value == True:
self.prompt.ask( 'switchport nonegotiate')
elif value == False:
self.prompt.ask( 'no switchport nonegotiate')
else:
pass
return True
def set_logging( self, port, dict, other, enter_port_mode=True ):
""" set the port logging """
logging.debug('set port logging %s' % (dict,))
for k,v in dict.iteritems():
if v:
self.prompt.ask( 'logging event %s' % (k,))
elif v == False:
self.prompt.ask( 'no logging event %s' % (k,))
elif v == None:
pass
return True
def set_security( self, port, value, other, enter_port_mode=True ):
""" set the port security """
logging.debug('set port security %s' % (value,))
if isinstance(value,bool) and value == False:
if self.prompt.ask( 'no switch port-security'):
# clean up
for i in ( 'aging', 'mac-address', 'maximum', 'violation' ):
self.prompt.ask( 'no switchport port-security %s' % i )
else:
return False
elif isinstance(value,dict):
if other['type'] in ( 'access', ):
self.prompt.ask( 'switch mode %s' % (other['type'],) )
else:
raise Exception, 'unsupported type for port security enforcement'
if not self.prompt.ask('switch port-security'):
raise Exception, "could not enforce port security"
for k,v in value.iteritems():
key = k.replace('-',' ')
if v:
self.prompt.ask( 'switchport port-security %s %s' % (key,v))
elif v == False:
self.prompt.ask( 'no switchport port-security %s' % (key,))
elif v == None:
pass
else:
raise Exception, 'unknown security state %s' % (value,)
return True
def set_storm_control( self, port, dict, other, enter_port_mode=True ):
""" set the storm control """
logging.debug('set storm control %s' % (dict,))
for k,v in dict.iteritems():
if v:
self.prompt.ask( 'storm-control %s level %s' % (k,v))
elif v == False:
self.prompt.ask( 'no storm-control %s' % (k,))
elif v == None:
pass
return True
def set_cdp( self, port, value, other, enter_port_mode=True ):
""" set cdp status """
if value == True:
self.prompt.ask( 'cdp enable' )
elif value == False:
self.prompt.ask( 'no cdp enable')
else:
pass
return True
def _set_lldp( self, key, value ):
if value == True:
return self.prompt.ask( 'lldp %s' % (key,) )
elif value == False:
return self.prompt.ask( 'no lldp %s' % (key,))
return
def set_lldp( self, port, value, other, enter_port_mode=True ):
""" set lldp transmit """
keys = ( 'transmit', 'receive' )
if isinstance( value, dict ):
for k in keys:
if k in value:
self._set_lldp( k, value[k] )
elif isinstance( value, bool ):
for k in keys:
self._set_lldp( k, value )
return True
def set_bpduguard( self, port, value, other, enter_port_mode=True ):
""" set_bpduguard """
if value == True:
# logging.error('no spanning-tree bpduguard disable' )
return self.prompt.ask( 'no spanning-tree bpduguard disable' )
elif value == False:
# logging.error('spanning-tree bpduguard disable' )
return self.prompt.ask( 'spanning-tree bpduguard disable')
return True
def set_portfast( self, port, value, other, enter_port_mode=True ):
""" set_portfast """
if value == True:
# logging.error('no spanning-tree portfast disable' )
return self.prompt.ask( 'no spanning-tree portfast disable' )
elif value == False:
# logging.error('spanning-tree portfast disable' )
return self.prompt.ask( 'spanning-tree portfast disable')
return True
def set_dhcp_snooping( self, port, value, other, enter_port_mode=True ):
""" set_dhcp_snooping """
if value in ( 'trust', 'limit' ):
logging.error('setting dhcp snooping to %s' % (value,) )
return self.prompt.ask( 'ip dhcp snooping %s' % (value,) )
elif value in ( None, False ):
logging.error('removing dhcp snooping settings' )
return self.prompt.ask( 'no ip dhcp snooping trust') and self.prompt.ask( 'no ip dhcp snooping limit')
return False
class PortChannelsCiscoIos( PortChannels ):
show_cmd = 'show etherchannel summary'
regexes = [
r'^\s*(?P<group>\d+)\s+(?P<port_channel>\S+)\((?P<port_channel_state>\S+)\)\s+(?P<protocol>\S+)\s+(?P<members>.*)$',
r'^\s+(?P<members>\D\S+)$'
]
members_regex = compile( r'\s*(?P<port>\S+)\((?P<state>\S+)\)\s*' )
state_map = {
'D': 'down',
'U': 'up',
'P': 'up',
'I': 'stand-alone',
'H': 'hot-standby',
's': 'suspended',
'r': 'removed',
'S': 'layer2',
'R': 'layer3',
'M': 'min-links not met',
}
def _get( self, *args, **kwargs ):
for b in self.prompt.tell_and_get_block( self.show_cmd ):
this = {}
po = None
members = []
for l in b:
# logging.debug(" L: %s" % (l))
for r in self.regexes:
m = match( r, l )
if m:
# logging.debug( " matched (po %s): %s" % (po,r,))
d = m.groupdict()
if 'port_channel' in d:
po = d['port_channel']
if po:
for k,v in d.iteritems():
if k == 'members':
members.append( v )
# logging.debug(" appended: %s (%s)" % (v, members))
else:
this[k] = v
this['members'] = []
# parse members
for l in members:
# logging.debug(" members: %s" % (l,))
for m in self.members_regex.finditer( l ):
d = m.groupdict()
if not d['state'] in self.state_map:
raise 'port channel state %s not defined' % (d['state'])
d['state'] = self.state_map[ d['state'] ]
this['members'].append( d )
if 'port_channel' in this:
# logging.debug(" >= %s (%s)" % (this,this['port_channel_state'][0]))
# parse po state
this['layer'] = self.state_map[ this['port_channel_state'][0] ]
this['state'] = self.state_map[ this['port_channel_state'][1] ]
del this['port_channel_state']
yield po, this
class ConfigCiscoIos( Config ):
header_skip_lines = 3
def get_running_config( self ):
# commands = [ 'show running-config all', 'show running-config' ]
commands = [ 'show running-config', ]
for cmd in commands:
c = [ i.rstrip() for i in self.prompt.tell( cmd, cursor=self.prompt.cursor( 'mode', 'enable' ), timeout=self.prompt.timeouts['long'] ) ]
if self.prompt.current_cursor == self.prompt.cursor( 'error', 'input' ):
continue
if len(c) > self.header_skip_lines + 1:
for x in xrange( 0, self.header_skip_lines ):
c.pop(0)
c.insert(0, '')
# print "%s"%c
return c
return None
def commit(self):
# try wr mem first (copy run start doesn't update the headers on the show run)
okay = self.prompt.ask( 'wr mem',
fail_okay=True,
cursor=self.prompt.cursor('mode','enable'),
timeout=self.prompt.timeouts['long']
)
if not okay:
okay = self.prompt.request( 'copy running-config startup-config',
cursor=self.prompt.cursor('mode','enable'),
timeout=self.prompt.timeouts['long'],
interact={
'question': "" # take default
}
)
return okay
class ModelCiscoIos( Model ):
"""
Model information for a generic Cisco IOS Switch
"""
def get( self, cached=False ):
return [ m['model'] for m in self.parent.get(cached=cached) ]
class FirmwareCiscoIos( Firmware ):
"""
Firmware components for a generic Cisco IOS switch
"""
def __is_3850(self):
is_3850 = False
for m in self.parent.model.get(cached=True):
if '3850' in m:
is_3850 = True
# 3850's
return is_3850
def transfer_firmware_image( self, *paths, **kwargs ):
# overwrite=True, image_only=True, dry_run=False, *paths ):
"""
copy the list of files over to the switch
"""
err = []
okay = []
is_3850 = self.__is_3850()
if not is_3850:
# create command line
cmd = 'archive download-sw '
if not 'overwrite' in kwargs:
kwargs['overwrite'] = True
if 'overwrite' in kwargs and kwargs['overwrite']:
cmd = cmd + ' /overwrite '
if 'image_only' in kwargs and kwargs['image_only']:
cmd = cmd + ' /imageonly '
cmd = cmd + ' '.join( paths )
if 'dry_run' in kwargs and kwargs['dry_run']:
logging.info('%s' % (cmd,))
return False
t = len(paths)*self.prompt.timeouts['very_long']
# logging.warn("CMD: %s" % cmd )
for l in self.prompt.tell( cmd, cursor=self.prompt.cursor('mode','enable'), timeout=t ):
logging.debug(" downloading...")
if match( '(\%)?(?i)Error\:? (?P<message>.*$)', l ):
err.append( l )
elif match( 'New software image installed in ', l ):
okay.append( l )
elif match( 'examining image...', l ):
logging.debug(' downloaded')
elif match( 'Deleting ', l ):
logging.debug(' clearing')
elif match( 'Installing ', l ):
logging.debug(' installing')
elif is_3850:
# interact={ 'Destination filename': "\n", 'Do you want to over write? [confirm]': 'y' }
# logging.error("INSTALL paths=%s, kwargs=%s" % (paths, kwargs))
filepath = paths[-1]
cmd = "copy %s flash:" % (filepath,)
# logging.error("CMD: %s" % (cmd, ))
logging.info(" downloading firmware...")
for l in self.prompt.tell( cmd, cursor=self.prompt.cursor('mode','enable'), timeout=self.prompt.timeouts['very_long'] ):
logging.debug("> %s" % (l,) )
if match( 'Accessing ', l ):
logging.debug("accessing...")
elif match( '\[OK', l ):
logging.debug('done')
# install
file = path.basename( filepath )
# cat3k_caa-universalk9.SPA.03.03.05.SE.150-1.EZ5.bin
cmd = 'software install file flash:%s on-reboot verbose' % (file,)
# logging.error("CMD: %s" % (cmd,) )
logging.info("installing firmware...")
for l in self.prompt.tell( cmd, cursor=self.prompt.cursor('mode','enable'), timeout=self.prompt.timeouts['very_long'] ):
logging.debug("> %s" % (l,) )
if search( 'SUCCESS: ', l ):
logging.debug('success')
elif search( 'Error', l ):
err.append( l )
if len(err):
raise DeviceException, ' '.join( err )
return len( okay ) > 0
def check_boot_variable( self, set_to=None ):
# show boot
boot_list = []
for l in self.prompt.tell( 'show boot', cursor=self.prompt.cursor('mode','enable') ):
m = search( r'^BOOT path-list\s*\:\s+(?P<path>.*)$', l )
if m:
boot_list.append(m.group('path'))
# TODO: compare bootlist variables against what is requested
logging.debug("BOOTLIST: " + str(boot_list))
return boot_list
def firmware_image_to_version( self, image_path ):
""" works out what the IOS version number is from the filename """
m = []
version = None
if m.append( search( r'(\d{3})\-(\d{2})\.(\w+)\.(tar|bin)$', image_path ) ) or m[-1]:
# major minor
this = m[-1].group(1)
version = this[0] + this[1] + '.' + this[2]
# bracket
this = m[-1].group(2)
version = version + '(' + this + ')'
# final
version = version + m[-1].group(3)
# 3850's
elif m.append( search( r'(\d{2}\.\d{2}\.\d{2}\.SE)\.(tar|bin)$', image_path ) ) or m[-1]:
version = m[-1].group(1)
version = version.replace( '.SE', 'SE' )
return version
def firmware_filename( self, image, version ):
if self.__is_3850():
# logging.error("IMAGE %s %s" % (image,version))
# cat3k_caa-universalk9.SPA.03.03.05.SE.150-1.EZ5.bin
# cat3k_caa-universalk9 03.03.05SE
v = version.replace('SE','.SE')
return "%s.SPA.%s.150-1.EZ5.bin" % (image,v)
else:
# maps the image and version strings to a filename
image = image.replace( '-m', '-' )
if not image[-1] == '-':
image = image + '-'
version = version.replace('.','').replace('(','-').replace(')','.')
return "%star.%s.tar" % (image, version)
def image( self ):
return [ m['sw_image'] for m in self.parent.get() ]
def version(self):
return [ m['sw_version'] for m in self.parent.get() ]
# def upgrade(self):
class Layer1CiscoIos( Layer1 ):
""" returns cdp and lldp information from the switch """
cdp_matches = {
'neighbour': r'^Device ID:\s*(?P<peer_device>\S+)',
'ip_address': r'\s+IP(v4)? (a|A)ddress: (?P<peer_address>\S+)',
'platform': r'^Platform: (?P<peer_platform>.*)\s*,\s+Capabilities: (?P<peer_capabilities>.*)',
'interfaces': r'^Interface: (?P<physical_port>\S+),\s+Port ID \(outgoing port\): (?P<peer_physical_port>\S+)',
'holdtime': r'^Holdtime\s*: (?P<holdtime>\d+) sec',
'new': r'^---------',
}
lldp_matches = {
'peer_ip_address': r'Chassis id: (?P<peer_ip_address>\S+)',
'peer_mac_address': r'Port id: (?P<peer_mac_address>\S+)',
'capabilities': r'Enabled Capabilities: (?P<peer_capabilities>\S+)',
'platform': r'\s+Model: (?P<peer_platform>.*)\s*',
'vendor': r'\s+Manufacturer: (?P<peer_vendor>.*)\s*',
'new': r'^----------'
}
def parse_item(self,d):
# logging.warn('parsing %s' % (d,))
if 'peer_device' in d:
d['peer_device'] = d['peer_device'].lower()
if '(' in d['peer_device']:
bits = d['peer_device'].replace(')','').split('(')
d['peer_device'] = bits.pop(0)
try:
d['peer_serial'] = bits.pop(0)
except:
pass
if 'peer_address' in d and to_ip( d['peer_address'] ):
d['peer_ip_address'] = d['peer_address']
if 'peer_capabilities' in d:
for c in d['peer_capabilities'].split():
if c == 'Trans-Bridge':
d['capability_bridge'] = True
for c in d['peer_capabilities'].split(','):
if c == 'B':
d['capability_bridge'] = True
elif c == 'T':
d['capability_telephone'] = True
for p in ( 'peer_physical_port', 'physical_port' ):
if p in d:
d[p] = truncate_physical_port( d[p] )
for i in ( 'peer_capabilities', 'peer_address' ):
if i in d:
del d[i]
return d
def add_item( self, items, d ):
if 'physical_port' in d:
d['physical_port'] = truncate_physical_port( d['physical_port'] )
if not d['physical_port'] in items:
items[d['physical_port']] = {}
items[ d['physical_port'] ].update( self.parse_item( d ) )
def _get( self, *args, **kwargs ):
items = {}
this = {}
for l in self.prompt.tell( 'show cdp nei detail', cursor=[self.prompt.cursor('mode','enable'), self.prompt.cursor('mode','exec')] ):
# logging.warn("> %s" % l,)
for n in self.cdp_matches:
m = match( self.cdp_matches[n], l )
if m:
# new item, clear
if n == 'new':
if len(this.keys()):
self.add_item( items, this )
this = {}
else:
d = m.groupdict( m )
this.update( d )
# logging.warn(" - %s: %s" % (n,this))
# don't forget last item
self.add_item( items, this )
# do lldp
try:
this = {}
# stupid cmd output doesn't support showing the local interface witth detail,
# so we have to run two commands
lldp_lookup = {}
for l in self.prompt.tell( 'show lldp nei', cursor=[self.prompt.cursor('mode','enable'), self.prompt.cursor('mode','exec')] ):
m = match( r'(?P<peer_ip_address>\S+) \s+ (?P<physical_port>\S+) \s+ (?P<holdtime>\d+) \s+ (?P<peer_capability>\S+) \s+ (?P<peer_mac_address>\S+)', l )
if m:
d = m.groupdict()
lldp_lookup[ d['peer_mac_address'] ] = d
# logging.error("LLDP LOOKUP %s" % (lldp_lookup,))
for l in self.prompt.tell( 'show lldp nei detail', cursor=[self.prompt.cursor('mode','enable'), self.prompt.cursor('mode','exec')] ):
for n in self.lldp_matches:
m = match( self.lldp_matches[n], l )
if m:
if n == 'new':
# lookup the physical port for this
if len(this.keys()):
if this['peer_mac_address'] in lldp_lookup:
this['physical_port'] = lldp_lookup[ this['peer_mac_address' ] ]['physical_port']
# check others?
self.add_item( items, this )
else:
d = m.groupdict(m)
this.update(d)
# don't forget last item
self.add_item( items, this )
except:
logging.debug("no lldp support")
pass
# return all
for k,v in items.iteritems():
yield k,v
return
def on_peer_device(self):
return self._on('peer_device')
def on_physical_port(self):
return self._on('physical_port')
def on_peer_physical_port(self):
return self._on('peer_physical_port')
def on_peer_ip_address(self):
return self._on('peer_ip_address')
def on_peer_mac_address(self):
return self._on('peer_mac_address')
def on_capability_bridge(self):
return self._on('capability_bridge')
def on_capability_telephone(self):
return self._on('capability_telephone')
class VlanCiscoIos( Vlan ):
def _get( self, *args, **kwargs ):
tries = 0
max_tries = 4
n = 0
# for i in self.prompt.tell( 'show vlan brief'):
# logging.error('%s' % (i,))
while n == 0 and tries < max_tries:
tries = tries + 1
for d in self.prompt.tell_and_match(
'show vlan brief',
r'^(?P<number>\d+)\s+(?P<name>\S+)\s+(?P<status>[/a-z]+)\s*' ):
n = n + 1
d['number'] = int(d['number'])
yield d['number'], d
return
def add( self, number, name ):
ok = False
if self.prompt.ask( 'vlan %s' % (number,), cursor=self.prompt.cursor('mode','config') ):
ok = self.prompt.ask( ' name %s' % (name,), cursor=self.prompt.cursor('mode','config-vlan'))
else:
# old xl's, need vlan database
if self.prompt.ask( 'vlan database', cursor=self.prompt.cursor('mode','enable') ):
ok = self.prompt.ask(' vlan %s name %s' % (number, name), cursor=self.prompt.cursor('mode','config-vlan'))
self.prompt.ask("exit")
if ok:
# force refresh on next poll
self._cache = None
return ok
def remove( self, number ):
if not int(number) in dict(self):
raise DeviceException, 'vlan %s does not exist' % (number)
self._cache = None
return self.prompt.ask( 'no vlan %s' % (number,), cursor=self.prompt.cursor('mode','config') )
class SpanningTreeCiscoIos( SpanningTree ):
matches = [
r'^ Port \d+ \((?P<port>(\w|\/)+)\) of VLAN(?P<vlan>\d+) is (?P<mode>.*)$',
# { 'regex': r'^ Designated root has priority (?P<root_priority>\d+), address (?P<root_address>.*)$', 'fields': ['root_priority', 'root_address'] },
r'^ The port is in the (?P<portfast>portfast) mode',
r'^ BPDU: sent \d+, received \d+$',
]
other_matches = {
'vlan': r'^VLAN0{0,}(?P<vlan>\d+)',
'protocol': r'^ Spanning tree enabled protocol (?P<protocol>\S+)\s*',
'priority': r'\s+(?P<group>\w+)\s+ID\s+Priority\s+(?P<priority>\d+)\s*',
'address': r'\s+Address\s+(?P<address>\w+\.\w+\.\w+)\s*',
'cost': r'\s+Cost\s+(?P<cost>\d+)\s*',
'is_root': r'\s+This bridge is the root',
'port': r'\s+Port\s+(?P<port_number>\d+)\s+\((?P<physical_port>.*)\)\s*',
'int': r'(?P<physical_port>\S+)\s+(?P<role>\w+) (?P<status>\w+) (?P<cost>\d+)\s+(?P<port_priority>\d+)\.(?P<port_number>\d+)\s+(?P<port_type>.*)\s*',
}
def _get(self, *args, **kwargs ):
item = {}
got_vlan = False
got_interface = False # mark end of all interfaces, during find should be None
for l in self.prompt.tell('show spanning-tree', cursor=self.prompt.cursor('mode','enable') ):
logging.debug(">: " + str(l))
for t,r in self.other_matches.iteritems():
# logging.debug(" t: %s" % t)
m = search( r, l )
if m:
if t == 'vlan':
if 'vlan' in item:
if 'group' in item:
del item['group']
# logging.debug("YIELD1: %s"%(item,) )
yield item['vlan'], item
logging.debug('clearing')
item = {}
got_vlan = True
elif t == 'is_root':
logging.debug(" setting as root bridge")
item['root_cost'] = 0
item['root_port'] = 0
# pre find the group
d = m.groupdict()
if 'group' in d:
item['group'] = d['group']
# add ports to an array
if t == 'int':
got_interface = None
if not 'ports' in item:
item['ports'] = []
item['ports'].append( d )
logging.debug(' >= %s' % (d,))
else:
for k,v in d.iteritems():
g = str(k)
if 'group' in item:
g = item['group'].lower() + '_' + str(k)
item[g] = v
logging.debug(' >= %s\t: %s' % (g,v))
# mark end
if got_interface == None and l == '':
got_interface = True
if got_vlan and got_interface:
got_vlan = False
got_interface = False
if 'group' in item:
del item['group']
# logging.debug("YIELD2: %s"%(item,) )
yield item['vlan'], item
return
def on_root(self):
return self._on('root_address')
def on_cost(self):
return self._on('root_cost')
def on_vlan(self):
return self._on('vlan')
def on_priority(self):
return self._on('root_priority')
def on_port(self):
return self._on('root_port')
def on_port_number(self):
return self._on('root_port_number')
def on_root_physical_port(self):
return self._on('root_physical_port')
def on_bridge_priority(self):
return self._on('bridge_priority')
def get(self, port=None ):
for o in self.prompt.tell( 'show spanning-tree interface ' + str(port) + ' detail', cursor=self.prompt.cursor('mode','enable') ):
for m in self.matches:
# logging.debug(" looking for: " + str(d['fields']))
ok = match( m, o )
if ok:
g = ok.groupdict()
# create new set if necessary
if len(g) == 0:
logging.debug("new spanning tree found " + str(this))
# commit last set
if len(this.keys()) > 0:
if not 'portfast' in this:
this['portfast'] = False
else:
this['portfast'] = True
logging.debug('THIS: ' + str(this))
k = this['port'].lower() + '-' + this['vlan'].lower()
yield this
this = {}
# grab info
for f in g:
# if found new set
this[f] = g[f]
logging.debug(" >: found " + str(f) + " = " + str(this[f]))
return
class MacAddressCiscoIos( MacAddress ):
regexes = [
r'\*?\s*(?P<vlan>\d+)\s+(?P<mac_address>\w{4}\.\w{4}\.\w{4})\s+(?P<type>\w+( pv)?)\s*(?P<learned>\w+)?\s+(?P<age>(\d+|-))?\s*(?P<physical_port>(\w+|\/)+)$', # 'ios'
r'^(?P<source>.*)\s+(?P<vlan>\d+)\s+(?P<mac_address>\w{4}\.\w{4}\.\w{4})\s+(?P<type>\w+)\s+(?P<age>(\d|\-)+)\s+.*\s+.*\s+(?P<physical_port>.*)\s*$', # 'nexus'
r'^(?P<mac_address>\w{4}\.\w{4}\.\w{4})\s+(?P<type>\w+)\s+(?P<vlan>\d+)\s+(?P<physical_port>(\w+|\/)+)$', # 'ios-xl'
]
def _get( self, *args, **kwargs ):
self._cache = {}
cmd = 'show mac address-table '
for o in self.prompt.tell( 'show version | inc Internetwork|-XL'):
# old sup's
if o == 'Cisco Internetwork Operating System Software':
cmd = 'show mac-address-table'
# old xl's
elif '-XL' in o:
cmd = 'show mac'
if 'port' in kwargs:
cmd = cmd + ' | inc ' + str(kwargs['port'])
for d in self.prompt.tell_and_match( cmd, self.regexes, timeout=self.prompt.timeouts['long'] ):
for i in ( 'mac_address', 'type' ):
d[i] = d[i].lower()
d['vlan'] = int( d['vlan'] )
if 'learned' in d and d['learned'] == 'Yes':
d['status'] = 'learned'
if not 'status' in d or d['status'] == None:
d['status'] = 'learned'
# logging.warn("HERE: %s" % (d,))
yield d['mac_address'], d
return
class Layer2CiscoIos( Layer2 ):
vlan = VlanCiscoIos
spanningtree = SpanningTreeCiscoIos
mac_address = MacAddressCiscoIos
class RoutesCisco( Routes ):
matches = {
'o' : r'^O (?P<ospf_type>\w+)?\s+ (?P<prefix>\d+\.\d+\.\d+\.\d+)/(?P<prefix_len>\d+)',
'o2': r'^O(\s+|\*)(?P<ospf_type>\w+)?\s+(?P<prefix>\d+\.\d+\.\d+\.\d+)(/(?P<prefix_len>\d+))?( \[\d+/\d+\] via (?P<next_hop>\d+\.\d+\.\d+\.\d+), )?',
'p': r'^\s+\[\d+/\d+\] via (?P<next_hop>\d+\.\d+\.\d+\.\d+), ',
'h' : r'^\s+(?P<prefix>\d+\.\d+\.\d+\.\d+)(/(?P<prefix_len>\d+))',
'c' : r'^(C|L)\s* (?P<prefix>\d+\.\d+\.\d+\.\d+)(/(?P<prefix_len>\d+))? is directly connected, (?P<interface>.*)$',
's' : r'^S\s+(?P<prefix>\d+\.\d+\.\d+\.\d+)(/(?P<prefix_len>\d+) .*via (?P<next_hop>\d+\.\d+\.\d+\.\d+))',
}
command = 'show ip route'
def vrfs( self ):
vrfs = {}
try:
for o in self.prompt.tell( 'show vrf brief | inc ip', cursor=self.prompt.cursor('mode','enable') ):
# logging.info("O %s" % (o,) )
m = match( r'^\s+(?P<name>\w+)\s+(?P<rd>(\d|\:)+)\s+', o )
if m:
d = m.groupdict()
# logging.info(" found %s" % (d,))
vrfs[d['name']] = d['rd']
except:
pass
return vrfs
def _get( self, *args, **kwargs ):
this = {}
indent = 0
last_indent = None
# determine vrfs
vrfs = self.vrfs()
if len(vrfs.keys()):
for vrf in vrfs:
for k, v in self.do( self.command + ' vrf ' + vrf ):
# add vrf info
v['vrf'] = vrf
v['vrf_rd'] = vrfs[vrf]
yield k, v
else:
for k,v in self.do( self.command ):
yield k,v
return
def do( self, command ):
this = {}
indent = 0
last_indent = None
for o in self.prompt.tell( command, cursor=self.prompt.cursor('mode','enable'), timeout=self.prompt.timeouts['long'] ):
# use the fact that the indentation levels represent how the lines relate
logging.debug("")
logging.debug('%s'%(o,))
matched = []
t = {}
for k,r in self.matches.iteritems():
m = match( r, o )
if m:
matched.append( k )
for x,y in m.groupdict().iteritems():
# remove any NOnes
if y:
t[x] = y
if len(matched) > 0:
# only the prefix defiens how indented it is, get the number of chars from the start
if 'prefix' in t:
indent = o.index(t['prefix'])
# logging.debug("indent: %s" % (indent,))
# else:
# logging.debug("no index - inherit")
logging.debug("%s%s> %s" % (indent, matched,t,))
# assume all not local for now
t['local'] = False
# if indented less, then wipe out
if last_indent == None or 'h' in matched: # initial or header
this = t
elif indent > last_indent or not 'prefix' in t:
this = dict( this.items() + t.items() )
elif indent == last_indent and 'c' in matched:
# directly connected needs to inhereit ('c')
this = dict( this.items() + t.items() )
this['local'] = True
else:
this = t
# don't return the subnet headers
if not 'h' in matched and not 'o' in matched:
# logging.debug(">>> %s" % (this))
if 'netmask' in this and not 'prefix_len' in this:
this['prefix_len'] = str(netmask_to_prefixlen( this['netmask'] ))
if 'prefix_len' in this and ( not 'netmask' in this or this['netmask'] == 'None'):
this['netmask'] = str( prefixlen_to_netmask( this['prefix_len'] ) )
if not 'next_hop' in this:
this['next_hop'] = '0.0.0.0'
n = this['prefix']
yield n, this
last_indent = indent
return
class ArpsCisco( Arps ):
matches = {
'show ip arp': r'^(\w+\s+)?(?P<ip_address>\d+\.\d+\.\d+\.\d+)\s+(?P<age>(\d|\:|\-)+)\s+(?P<mac_address>\w+\.\w+\.\w+) .* (?P<interface>(\w|\-|\+|\/|\.)+)\s*$',
'show ipv6 neighbors': r'^(?P<ip_address>(\w|\:)+)\s+(?P<age>\d+)\s+(?P<mac_address>\w+\.\w+\.\w+)\s+(?P<state>\w+)\s+(?P<interface>\w+)',
}
def _get( self, *args, **kwargs ):
for cmd in self.matches.keys():
for d in self.prompt.tell_and_match_block( cmd, [self.matches[cmd]], timeout=self.prompt.timeouts['medium'], all_matches=True, error_okay=True ):
logging.debug(" d: %s" % d)
if 'mac_address' in d and 'ip_address' in d:
d['mac_address'] = d['mac_address'].lower()
d['ip_address'] = d['ip_address'].lower()
yield d['mac_address'] + '-' + d['ip_address'], d
return
class Layer3CiscoIos( Layer3 ):
routes = RoutesCisco
arps = ArpsCisco
class TransceiverCiscoIos( Transceiver ):
def _get( self, *args, **kwargs ):
t = {}
for d in self.prompt.tell_and_match(
'show int transceiver',
r'^(?P<port>(\w|\/)+) \s+ (?P<temp>\d+\.\d+) .* (?P<tx>(-)?\d+\.\d+) \s+ (?P<rx>(-)?\d+\.\d+)\s*$',
timeout=self.prompt.timeouts['long'] ):
# yield d['port'], d
t[d['port']] = d
# get the type for each
for i in self.prompt.tell( 'show int status' ):
a = i.split()
for n,v in enumerate(a):
# for the 'base' for ethernet type transceivers
if search(r'ase',v):
if a[0] in t:
logging.debug("found on %s a %s"%(a[0],a[n:]))
t[a[0]]['type'] = ' '.join(a[n:])
for k,v in t.iteritems():
yield k,v
return
class ModuleCiscoIos( Module ):
def _get( self, *args, **kwargs ):
for d in self.prompt.tell_and_match_by_index(
'show module',
'slot',
[ '^\s+(?P<slot>\d+)\s+(?P<ports>\d+)\s+(?P<description>.*)\s+(?P<model>WS\S+)\s+(?P<serial>\S+)\s*$', '^\s+(?P<slot>\d+)\s+(?P<mac_ranges>\S+ to \S+)\s+(?P<hardware_version>\S+)\s+(?P<firmware_version>\S+)\s+(?P<software_version>\S+)\s+(?P<status>\S+)\s*$', '^\s+(?P<slot>\d+)\s+(?P<diag_status>\S+)\s*$' ],
timeout=self.prompt.timeouts['medium'] ):
yield d['slot'], d
# need a point index for the sub modules
sub_index = {}
for d in self.prompt.tell_and_match(
'show module',
r'^\s+(?P<slot>\d+)\s+(?P<description>.*)\s+(?P<model>(W|V)S\-\S+)\s+(?P<serial>\S+)\s+(?P<hardware_version>\S+)\s+(?P<status>\S+)\s*$'
):
s = d['slot']
# logging.error("SLOT: %s" % s)
if not s in sub_index:
sub_index[s] = 1
else:
sub_index[s] = sub_index[s] + 1
d['slot'] = str(d['slot']) + '.' + str(sub_index[s])
yield d['slot'], d
return
class FRUCiscoIos( FRU ):
transceiver = TransceiverCiscoIos
module = ModuleCiscoIos
class PasswordCiscoIos( Password ):
def _login( self, interface, password ):
if self.prompt.ask(str(interface), cursor=self.prompt.cursor('mode','enable')):
if self.prompt.current_cursor == self.prompt.cursor('mode','config-line'):
return self.prompt.ask( ' password ' + str(password) )
def console_login( self, password, clear_existing=True ):
self._login( 'con 0', password )
def vty_login( self, password, clear_existing=True ):
self._login( 'vty 0 4', password )
self._login( 'vty 5 15', password )
def login( self, user, password, level=15, clear_existing=True ):
self.prompt.ask('username ' + str(user) + " privilege " + str(level) + ' secret ' + str(password), cursor=self.prompt.cursor('mode','enable') )
def enable( self, password, level=15, clear_existing=True ):
return self.prompt.ask( 'enable secret ' + str(password), cursor=self.prompt.cursor('mode','config') )
def get_snmp_community( self, type=None ):
cmd = 'show run '
if not type == None:
cmd = cmd + '| inc RO '
cmd = cmd + '| inc snmp-server community'
for l in self.prompt.tell( cmd, cursor=self.prompt.cursor('mode','enable') ):
yield l
return
def snmp( self, community, type='RO', access_list=None, clear_existing=True ):
if clear_existing:
cur = [ s for s in self.get_snmp( type=type )]
for c in cur:
self.prompt.ask( 'no ' + str(c) )
cmd = 'snmp-server community ' + str(community) + ' ' + str(type)
if not access_list == None:
cmd = cmd + ' ' + str(access_list)
return self.prompt.ask( cmd, cursor=self.prompt.cursor('mode','enable') )
def snmp_ro(self, community, access_list=20, clear_existing=True):
return self.snmp( community, type='RO' )
def snmp_rw(self, community, access_list=21, clear_existing=True):
return self.snmp( community, type='RW' )
class UsersCiscoIos( Users ):
matches = [
r'^\s+\d (?P<line>\w+\s\d) \s+ (?P<user>(\w|\.|\-\@)+) \s+ \w+ \s+ \d+',
r'^\s+\d (?P<line>\w+\s\d) \s+ \w+ \s+ \d+',
]
def get( self ):
return self._yield_matches( 'show users | exc \*', cursor=self.prompt.cursor('mode','enable'), regex_matches=self.matches )
class SystemCiscoIos( System ):
config = ConfigCiscoIos
model = ModelCiscoIos
firmware = FirmwareCiscoIos
fru = FRUCiscoIos
password = PasswordCiscoIos
users = UsersCiscoIos
__cache = None
def get(self, cached=False ):
if cached and self.__cache:
return self.__cache
c = []
members = []
m = []
item = { 'number': 0 }
for l in self.prompt.tell( 'show version', cursor=self.prompt.cursor( 'mode', 'enable' ), output_wait=0.03, timeout=self.prompt.timeouts['medium'] ):
# logging.debug(" > " + str(l))
if search( r'^\*?\s+\d+\s+\d+', l ):
stuff = l.split()
logging.debug(" match search %s: %s" % (l,stuff))
if len(stuff) > 4:
# remove state for 3850's
if stuff[-1] == 'INSTALL':
_ = stuff.pop()
item = {
'sw_image': stuff.pop(),
'sw_version': stuff.pop(),
'model': stuff.pop(),
None: stuff.pop(),
'number': int(stuff.pop()) - 1,
}
del item[None]
members.append( item )
elif m.append( search( r'cisco ((\w|\-)+) ', l ) ) or m[-1]:
# logging.debug(" match model number")
item['model'] = m[-1].group(1)
elif m.append( search( r'^Cisco ((\w|\-)+) \(PowerPC|MIPS\) processor', l ) ) or m[-1]:
item['model'] = m[-1].group(1)
elif m.append( search( r'^Cisco IOS Software, .* Software \((.*)\), Version (.*),', l ) ) or m[-1]:
# logging.debug(" match versions")
item['sw_image'] = m[-1].group(1)
item['sw_version'] = m[-1].group(2)
# logging.debug('model info: ' + str(members))
if len(members) == 0:
members.append( item )
output = sorted( members, key=lambda k: k['number'] )
if cached:
self.__cache = output
return output
def reload( self, at=None, force=False, commit=False ):
""" reload a device """
return self.prompt.ask('reload', interact={
'reload': '\n' if force else 'n',
'config_modified': 'y' if commit else 'n',
})
class Rfc2863CiscoIos( Rfc2863 ):
port_regexes = [
r'^(?P<port>\S+) is (?P<admin_status>\S+), line protocol is (?P<oper_status>\S+)',
r'^\s+Description: (?P<alias>.*)\s*$',
r'^\s+MTU (?P<mtu>\d+) bytes, BW (?P<speed>\d+) ',
r'^\s+(?P<duplex>\S+)-duplex, ',
r'^\s+Input queue: (?P<in_queue_size>\d+)/(?P<in_queue_max>\d+)/(?P<in_queue_drops>\d+)/(?P<in_queue_flushes>\d+) \(size/max/drops/flushes\); Total output drops: (?P<total_output_drops>\d+)',
r'^\s+Queueing strategy: (?P<queueing_strategy>\S+)',
r'^\s+Output queue: (?P<output_queue_size>\d+)/(?P<output_queue_max>\d+) \(size/max\)',
#' L2 Switched: ucast: 324264128 pkt, 32732949957 bytes - mcast: 13703676 pkt, 1733488346 bytes',
r'^\s+L2 Switched: ucast: (?P<l2_ucast_pkt>\d+) pkt, (?P<l2_ucast_bytes>\d+) bytes - mcast: (?P<l2_mcast_pkt>\d+) pkt, (?P<l2_mcast_bytes>\d+) ',
#' L3 in Switched: ucast: 138697005099 pkt, 166511592421731 bytes - mcast: 0 pkt, 0 bytes mcast',
r'^\s+L3 in Switched: ucast: (?P<l3_in_ucast_pkt>\d+) pkt, (?P<l3_in_ucast_bytes>\d+) bytes .*mcast: (?P<l3_in_mcast_pkt>\d+) pkt, (?P<l3_in_mcast_bytes>\d+) ',
#' L3 out Switched: ucast: 112629132385 pkt, 59993629187096 bytes mcast: 0 pkt, 0 bytes',
r'^\s+L3 out Switched: ucast: (?P<l3_out_ucast_pkt>\d+) pkt, (?P<l3_out_ucast_bytes>\d+) bytes .*mcast: (?P<l3_out_mcast_pkt>\d+) pkt, (?P<l3_out_mcast_bytes>\d+) ',
#' 146265544928 packets input, 167308815553548 bytes, 0 no buffer',
r'\s+(?P<input_pkts>\d+) packets input, (?P<input_bytes>\d+) bytes, (?P<input_no_buffer>\d+) no buffer',
#' Received 14764021 broadcasts (0 IP multicasts)',
r'\s+Received (?P<input_bcasts>\d+) broadcasts \((?P<input_ip_mcast>\d+) IP multicasts',
#' 0 runts, 6380 giants, 0 throttles',
r'\s+(?P<input_runts>\d+) runts, (?P<input_giants>\d+) giants, (?P<input_throttles>\d+) throttles',
#' 0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored',
r'\s+(?P<input_errors>\d+) input errors, (?P<input_crc>\d+) CRC, (?P<input_frame_errors>\d+) frame, (?P<input_overrun>\d+) overrun, (?P<input_ignored>\d+) ignored',
#' 0 watchdog, 0 multicast, 0 pause input',
r'\s+(?P<input_watchdog>\d+) watchdog, (?P<input_mcast>\d+) multicast, (?P<input_pause>\d+) pause input',
#' 0 input packets with dribble condition detected',
r'\s+(?P<input_dribble>\d+) input packets with dribble condition detected',
# #' 113214434454 packets output, 60416695779182 bytes, 0 underruns',
r'\s+(?P<output_pkts>\d+) packets output, (?P<output_bytes>\d+) bytes, (?P<output_underruns>\d+) underruns',
# #' 0 output errors, 0 collisions, 1 interface resets',
r'\s+(?P<output_errors>\d+) output errors, ((?P<output_collisions>\d+) collisions, )?(?P<interface_resets>\d+) interface resets',
# #' 0 babbles, 0 late collision, 0 deferred',
r'\s+(?P<output_babbles>\d+) babbles, (?P<output_late_collisions>\d+) late collision, (?P<output_deferred>\d+) deferred',
# #' 0 lost carrier, 0 no carrier, 0 PAUSE output',
r'\s+(?P<lost_carrier>\d+) lost carrier, (?P<no_carrier>\d+) no carrier, (?P<pause_output>\d+) PAUSE output',
# #' 0 output buffer failures, 0 output buffers swapped out']
r'\s+(?P<output_buffer_failures>\d+) output buffer failures, (?P<output_buffers_swapped_out>\d+) output buffers swapped out',
]
def _get( self, *args, **kwargs ):
for d in self.prompt.tell_and_match_block( 'show int', self.port_regexes, output_wait=5, timeout=self.prompt.timeouts['medium'] ):
logging.debug("="*80)
if 'port' in d and d['port']:
p = truncate_physical_port( d['port'] )
d['port'] = p
yield p, d
class StatsCiscoIos( Stats ):
rfc2863 = Rfc2863CiscoIos
class CiscoIos( Device ):
"""
Device definition for a generic cisco ios switch
"""
prompt = PromptCiscoIos
system = SystemCiscoIos
stats = StatsCiscoIos
ports = PortsCiscoIos
portchannels = PortChannelsCiscoIos
layer1 = Layer1CiscoIos
layer2 = Layer2CiscoIos
layer3 = Layer3CiscoIos
def _validate( self ):
for l in self.prompt.tell( 'show version '):
# logging.error("> %s" % l)
if match( r'^Cisco ', l ) and ( search( r'IOS', l ) or search( r'Internetwork Operating System', l ) ):
# logging.error("OK")
return True
# FAT cisco access points have their own driver
elif search( r' Radio', l ) or search( r' AIR-', l ):
return False
return False
def validate( self ):
logging.debug('validating...')
if self._validate():
return True
# try again, as sometimes ios doesn't work first time
elif self._validate():
return True
raise IncompatibleDeviceException, 'not a cisco_ios'
|
SLAC-OCIO/net-config
|
lib/netconfig/drivers/cisco_ios.py
|
Python
|
gpl-2.0
| 85,292
|
[
"ASE"
] |
18afc1a93c5632bbb4575f4275148b131edcd604a119bfde65e65ed26f9c9af5
|
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
"""Script to call modules that generate the blog's content, then posts that
content to the ulyssesredux.tumblr.com.
Each chapter is written by a different script that resides in the
chapter_scripts/ directory. Each of these scripts then draw from
texts in the corpora/ directory.
This program is licensed under the GPL v3 or, at your option, any later
version. See the file LICENSE.md for a copy of this licence.
"""
import datetime
import html
import importlib
import json
import math
import re
import sys
sys.path.append('/UlyssesRedux/scripts/')
from directory_structure import * # Gets us the listing of file and directory locations.
import utility_scripts.current_run_data_utils as cr_data
import patrick_logger, introspection # From https://github.com/patrick-brian-mooney/personal-library
from introspection import dump_str
from patrick_logger import log_it
import social_media # From https://github.com/patrick-brian-mooney/personal-library
with open('/social_media_auth.json', encoding='utf-8') as auth_file:
ulysses_client = social_media.Tumblpy_from_dict(json.loads(auth_file.read())['ulysses_client'])
recurring_tags = ['Ulysses (novel)', 'James Joyce', '1922', 'automatically generated text', 'Patrick Mooney']
ulysses_chapters = open(ulysses_chapter_titles_file).readlines()
patrick_logger.verbosity_level = 3
# First, set up parameters.
blog_url = 'http://ulyssesredux.tumblr.com/'
# Some utility routines
def out_of_content_warning():
"""Remind me that we're out of content."""
log_it("WARNING: There's work to be done! You have to reset the blog state on ulyssesredux.tumblr.com to get it working again! A full Ulysses project is done and needs to be cleared!")
log_it(" REMINDER: make this a more prominent warning!") # FIXME
sys.exit(2)
if __name__ == "__main__":
current_run_data = cr_data.read_current_run_parameters()
try:
with open('%s/index.html' % current_run_directory, 'r') as index_file :
the_lines = index_file.readlines()
which_script = 1 + len(the_lines) # If so far we've got, say, six lines in the file, we need to run script #7.
except (FileNotFoundError,):
which_script = 1
the_lines = [][:]
if which_script not in range(1,19):
out_of_content_warning()
# Post parameters
the_title = ulysses_chapters[ which_script - 1 ].strip()
recurring_tags.append(the_title)
current_chapter_temporary_tags = current_run_data['ch%02dtags' % which_script]
temporary_tags = [l.strip() for l in open('%s/temporary-tags' % current_run_directory).readlines()]
the_tags = ', '.join(recurring_tags + temporary_tags) + ', ' + current_chapter_temporary_tags
script_path = '%s.ch%02d' % (daily_scripts_directory, which_script)
print("INFO: About to run script %s.py." % script_path)
# OK, import the relevant chapter script as a module and write the story.
the_script = importlib.import_module(script_path)
the_content = the_script.write_story()
print("content generated ...\n\n ... postprocessing...")
content_lines = the_content.split("\n")
# Now, split the first paragraph into sentences, keeping the final punctuation and joining it back to the end of the sentence.
first_sentence = ''.join(list(filter(None, re.split("([!?.]+)", content_lines[0])))[0:2]) # We'll use this as the summary in the table of contents.
content_lines = [ "<p>" + the_line.strip() + "</p>" for the_line in content_lines if len(the_line.strip()) > 0 ]
the_content = '\n'.join(content_lines)
print("INFO: postprocessed content is:\n\n" + "\n".join(content_lines))
print('INFO: Chapter title is "%s."' % the_title)
print("INFO: tags are %s." % str(recurring_tags + temporary_tags))
# All right, post this content
print('\nINFO: Attempting to post the content')
the_status, the_tumblr_data = social_media.tumblr_text_post(ulysses_client, the_tags, the_title, the_content)
print('\nINFO: the_status is: ' + dump_str(the_status))
print('\nINFO: the_tumblr_data is: ' + dump_str(the_tumblr_data))
new_post_url = blog_url + "post/" + str(the_status['id'])
# Assemble some text to write to the index file
html_tags = ' | '.join([ '<a rel="me muse" href="%s">%s</a>' % (html.escape(blog_url + "tagged/" + the_tag), the_tag) for the_tag in the_tags.split(', ') ])
# Avoid using a really really long first sentence as a summary (a problem sometimes in tests with "Penelope").
while len(first_sentence) > 600 or len(first_sentence.split(' ')) > 150:
first_sentence = ' '.join(first_sentence.split(' ')[0 : math.floor(len(first_sentence.split(' ')) * 0.75)]) + '…' # Lop off the last quarter and try again.
the_line = '<li><a rel="me muse" href="%s">%s</a>' % (new_post_url, the_title)
the_line += ' (%s), ' % datetime.date.today().strftime("%d %B %Y")
the_line += current_run_data[ 'ch%02ddesc' % which_script ]
the_line += ': <blockquote><p>%s</p>' % first_sentence
the_line += '<p><small>tags: ' + html_tags + '</small></p>'
the_line += '</blockquote></li>\n'
# Now record the new line to the index file.
the_lines.append(the_line)
index_file = open('%s/index.html' % current_run_directory, 'w')
index_file.writelines(the_lines)
index_file.close()
|
patrick-brian-mooney/UlyssesRedux
|
daily_script.py
|
Python
|
gpl-3.0
| 5,413
|
[
"Brian"
] |
10e1d9362639262357c871d70aad5efa6b6aeae7137e317383ffb630c3bd1f25
|
# Copyright (C) 2015 OLogN Technologies AG
#
# This source file is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import antlr4
from smartanthill_zc import array
from smartanthill_zc import expression, statement, node
from smartanthill_zc.ECMAScript import ECMAScriptVisitor
from smartanthill_zc.ECMAScript.ECMAScriptLexer import ECMAScriptLexer
from smartanthill_zc.ECMAScript.ECMAScriptParser import ECMAScriptParser
from smartanthill_zc.antlr_helper import (_ProxyAntlrErrorListener,
get_token_text)
from smartanthill_zc.node import StmtListNode
from smartanthill_zc.root import SourceProgramNode
def parse_js_string(compiler, data):
'''
Parse string containing java script code
Returns an antlr parse tree
'''
# input = FileStream(argv[1])
istream = antlr4.InputStream.InputStream(data)
lexer = ECMAScriptLexer(istream)
stream = antlr4.CommonTokenStream(lexer)
parser = ECMAScriptParser(stream)
# parser.removeErrorListener()
parser.addErrorListener(_ProxyAntlrErrorListener(compiler))
tree = parser.program()
compiler.check_stage('parse_js')
return tree
def _parse_js_expression(compiler, data, ctx):
'''
Parse string containing constant expression
Returns a node tree with expression node
'''
# input = FileStream(argv[1])
istream = antlr4.InputStream.InputStream(data)
lexer = ECMAScriptLexer(istream)
stream = antlr4.CommonTokenStream(lexer)
parser = ECMAScriptParser(stream)
# parser.removeErrorListener()
parser.addErrorListener(_ProxyAntlrErrorListener(compiler))
tree = parser.singleExpression()
check = _FilterParameterExpressionVisitor(ctx, compiler)
check.visit(tree)
visitor = _JsSyntaxVisitor(compiler)
expr = visitor.visit(tree)
return expr
class _FilterParameterExpressionVisitor(ECMAScriptVisitor.ECMAScriptVisitor):
'''
Visitor class that implements js_tree_to_syntax_tree function
The template for the visitor is copy&paste from super class interface
ECMAScriptVisitor.ECMAScriptVisitor
'''
def __init__(self, ctx, compiler):
'''
Constructor
'''
self.ctx = ctx
self._compiler = compiler
def visitChildren(self, current):
'''
Overrides antlr4.ParseTreeVisitor method
Changes default action, from walking down the tree to
fail with assert, this will expose any parsed node that does not have
a valid interpretation rule here
'''
self._compiler.report_error(
self.ctx,
"Unsupported parameter value '%s'" % str(current.getText()))
# Visit a parse tree produced by ECMAScriptParser#LogicalOrExpression.
def visitLogicalOrExpression(self, ctx):
self.visit(ctx.singleExpression(0))
self.visit(ctx.singleExpression(1))
# Visit a parse tree produced by ECMAScriptParser#LogicalAndExpression.
def visitLogicalAndExpression(self, ctx):
self.visit(ctx.singleExpression(0))
self.visit(ctx.singleExpression(1))
# Visit a parse tree produced by ECMAScriptParser#LiteralExpression.
def visitLiteralExpression(self, ctx):
pass
# Visit a parse tree produced by ECMAScriptParser#NotExpression.
def visitNotExpression(self, ctx):
self.visit(ctx.singleExpression(0))
# Visit a parse tree produced by ECMAScriptParser#RelationalExpression.
def visitRelationalExpression(self, ctx):
self.visit(ctx.singleExpression(0))
self.visit(ctx.singleExpression(1))
# Visit a parse tree produced by ECMAScriptParser#ParenthesizedExpression.
def visitParenthesizedExpression(self, ctx):
self.visit(ctx.singleExpression(0))
# Visit a parse tree produced by ECMAScriptParser#EqualityExpression.
def visitEqualityExpression(self, ctx):
self.visit(ctx.singleExpression(0))
self.visit(ctx.singleExpression(1))
# Visit a parse tree produced by ECMAScriptParser#UnaryExpression.
def visitUnaryExpression(self, ctx):
self.visit(ctx.singleExpression())
# Visit a parse tree produced by ECMAScriptParser#AdditiveExpression.
def visitAdditiveExpression(self, ctx):
self.visit(ctx.singleExpression(0))
self.visit(ctx.singleExpression(1))
# Visit a parse tree produced by ECMAScriptParser#MultiplicativeExpression.
def visitMultiplicativeExpression(self, ctx):
self.visit(ctx.singleExpression(0))
self.visit(ctx.singleExpression(1))
def create_parameters(compiler, data, ctx):
'''
Creates an StmtListNode and populates it with
VariableDeclarationStmtNode with data from dictionary.
Used for parameters
'''
decls = compiler.init_node(node.DeclarationListNode(), ctx)
for key in data.keys():
assert isinstance(key, str)
var = compiler.init_node(
statement.ParameterDeclarationStmtNode(), ctx)
var.txt_name = key
decls.add_declaration(var)
compiler.check_stage('parameter')
return decls
def js_parse_tree_to_syntax_tree(compiler, js_tree):
'''
Translates an ECMAScript (js) parse tree as returned by antlr4 into a
syntax tree as used by the zepto compiler, this tree transformation
replaces syntax directed translation written directly into the grammar
as used by yacc-lex.
The antlr parser creates a parse tree from the grammar without actions,
and at this point the parse tree is transformed into the syntax tree
needed by the application.
While this may seem more complex, it is actually almost the same,
only changing where and when, things take place
'''
visitor = _JsSyntaxVisitor(compiler)
source = visitor.visit(js_tree)
compiler.check_stage('js_syntax')
return source
class _JsSyntaxVisitor(ECMAScriptVisitor.ECMAScriptVisitor):
'''
Visitor class that implements js_tree_to_syntax_tree function
The template for the visitor is copy&paste from super class interface
ECMAScriptVisitor.ECMAScriptVisitor
'''
def __init__(self, compiler):
'''
Constructor
'''
self._compiler = compiler
def visitChildren(self, current):
'''
Overrides antlr4.ParseTreeVisitor method
Changes default action, from walking down the tree to
fail with assert, this will expose any parsed node that does not have
a valid interpretation rule here
'''
self._compiler.report_error(current, "Internal Error!")
self._compiler.report_error(current, "Unmatched parser token")
assert False
def init_operator(self, op, node_ctx, op_ctx, expr_list_ctx):
'''
Initializes a very generic operator expression.
Operands go into an argument list exactly the same as methods,
this should make easier argument match algorithms
'''
op = self._compiler.init_node(op, node_ctx)
text = get_token_text(self._compiler, op_ctx)
op.txt_operator = text
if text not in ['!', '&&', '||',
'*', '/', '%', '+', '-',
'<', '>', '<=', '>=', '==', '!=']:
self._compiler.report_error(
node_ctx, "Operator '%s' not supported" % text)
arg_list = self._compiler.init_node(node.ArgumentListNode(), node_ctx)
for e in expr_list_ctx:
expr = self.visit(e)
arg_list.add_argument(expr)
op.set_argument_list(arg_list)
return op
# Visit a parse tree produced by ECMAScriptParser#program.
def visitProgram(self, ctx):
prog = self._compiler.init_node(SourceProgramNode(), ctx)
stmt_list = self._compiler.init_node(
StmtListNode(), ctx)
elems = ctx.sourceElements()
if elems:
elem = elems.sourceElement()
for current in elem:
st = current.statement()
assert st
stmt = self.visit(st)
stmt_list.add_statement(stmt)
prog.set_statement_list(stmt_list)
return prog
# Visit a parse tree produced by ECMAScriptParser#statement.
def visitStatement(self, ctx):
return ctx.getChild(0).accept(self)
# Visit a parse tree produced by ECMAScriptParser#mcuSleepStatement.
def visitMcuSleepStatement(self, ctx):
stmt = self._compiler.init_node(statement.McuSleepStmtNode(), ctx)
args = self.visit(ctx.arguments())
stmt.set_argument_list(args)
return stmt
# Visit a parse tree produced by ECMAScriptParser#block.
def visitBlock(self, ctx):
stmt_list = self._compiler.init_node(
StmtListNode(), ctx)
st_list = ctx.statementList()
if st_list:
sts = st_list.statement()
for current in sts:
stmt = self.visit(current)
stmt_list.add_statement(stmt)
return stmt_list
# Visit a parse tree produced by ECMAScriptParser#variableStatement.
def visitVariableStatement(self, ctx):
stmt = self._compiler.init_node(
statement.VariableDeclarationStmtNode(), ctx)
var_list = ctx.variableDeclarationList().variableDeclaration()
assert len(var_list) >= 1
if len(var_list) > 1:
self._compiler.report_error(ctx, "Multiple varible declarations in"
" a single statement not supported")
stmt.txt_name = get_token_text(self._compiler,
var_list[0].Identifier())
ini = var_list[0].initialiser()
if ini:
expr = self.visit(ini.singleExpression())
stmt.set_initializer(expr)
return stmt
# Visit a parse tree produced by ECMAScriptParser#emptyStatement.
def visitEmptyStatement(self, ctx):
stmt = self._compiler.init_node(statement.NopStmtNode(), ctx)
return stmt
# Visit a parse tree produced by ECMAScriptParser#expressionStatement.
def visitExpressionStatement(self, ctx):
stmt = self._compiler.init_node(statement.ExpressionStmtNode(), ctx)
expr = self.visit(ctx.singleExpression())
stmt.set_expression(expr)
return stmt
# Visit a parse tree produced by ECMAScriptParser#ifStatement.
def visitIfStatement(self, ctx):
stmt = self._compiler.init_node(statement.IfElseStmtNode(), ctx)
expr = self.visit(ctx.singleExpression())
stmt.set_expression(expr)
body = ctx.statement()
if len(body) == 1:
body_if = statement.make_statement_list(
self._compiler, self.visit(body[0]))
stmt.set_if_branch(body_if)
elif len(body) == 2:
body_if = statement.make_statement_list(
self._compiler, self.visit(body[0]))
stmt.set_if_branch(body_if)
body_el = statement.make_statement_list(
self._compiler, self.visit(body[1]))
stmt.set_else_branch(body_el)
else:
assert False
return stmt
# Visit a parse tree produced by ECMAScriptParser#DoStatement.
def visitDoStatement(self, ctx):
self._compiler.report_error(ctx, "Loop 'do' not supported")
return self._compiler.init_node(statement.ErrorStmtNode(), ctx)
# Visit a parse tree produced by ECMAScriptParser#WhileStatement.
def visitWhileStatement(self, ctx):
self._compiler.report_error(ctx, "Loop 'while' not supported")
return self._compiler.init_node(statement.ErrorStmtNode(), ctx)
# Visit a parse tree produced by ECMAScriptParser#ForStatement.
def visitForStatement(self, ctx):
self._compiler.report_error(
ctx, "Loop 'for' only supported in the trivial form "
"'for(var i = ..; i < ..; i++) {..}'")
return self._compiler.init_node(statement.ErrorStmtNode(), ctx)
# Visit a parse tree produced by ECMAScriptParser#ForVarTrivialStatement.
def visitSimpleForStatement(self, ctx):
stmt = self._compiler.init_node(statement.SimpleForStmtNode(), ctx)
id_list = ctx.Identifier()
assert len(id_list) == 3
txt0 = get_token_text(self._compiler, id_list[0])
txt1 = get_token_text(self._compiler, id_list[1])
txt2 = get_token_text(self._compiler, id_list[2])
if txt0 == txt1 and txt1 == txt2:
stmt.txt_name = txt0
else:
self._compiler.report_error(
ctx, "Loop 'for' only supported in the trivial form "
"'for(var i = ..; i < ..; i++) {..}'")
expr_list = ctx.singleExpression()
assert len(expr_list) == 2
begin = self.visit(expr_list[0])
stmt.set_begin_expression(begin)
end = self.visit(expr_list[1])
stmt.set_end_expression(end)
sl = statement.make_statement_list(
self._compiler, self.visit(ctx.statement()))
stmt.set_statement_list(sl)
return stmt
# Visit a parse tree produced by ECMAScriptParser#ForVarStatement.
def visitForVarStatement(self, ctx):
self._compiler.report_error(
ctx, "Loop 'for' only supported in the trivial form "
"'for(var i = ..; i < ..; i++) {..}'")
return self._compiler.init_node(statement.ErrorStmtNode(), ctx)
# Visit a parse tree produced by ECMAScriptParser#ForInStatement.
def visitForInStatement(self, ctx):
self._compiler.report_error(
ctx, "Loop 'for' only supported in the trivial form "
"'for(var i = ..; i < ..; i++) {..}'")
return self._compiler.init_node(statement.ErrorStmtNode(), ctx)
# Visit a parse tree produced by ECMAScriptParser#ForVarInStatement.
def visitForVarInStatement(self, ctx):
self._compiler.report_error(
ctx, "Loop 'for' only supported in the trivial form "
"'for(var i = ..; i < ..; i++) {..}'")
return self._compiler.init_node(statement.ErrorStmtNode(), ctx)
# Visit a parse tree produced by ECMAScriptParser#returnStatement.
def visitReturnStatement(self, ctx):
stmt = self._compiler.init_node(statement.ReturnStmtNode(), ctx)
exprCtx = ctx.singleExpression()
if exprCtx:
expr = self.visit(exprCtx)
assert expr
stmt.set_expression(expr)
return stmt
# Visit a parse tree produced by ECMAScriptParser#arguments.
def visitArguments(self, ctx):
args = self._compiler.init_node(node.ArgumentListNode(), ctx)
al = ctx.argumentList()
if al:
exprs = al.singleExpression()
for e in exprs:
expr = self.visit(e)
args.add_argument(expr)
return args
# Visit a parse tree produced by ECMAScriptParser#FunctionExpression.
def visitFunctionExpression(self, ctx):
expr = self._compiler.init_node(expression.FunctionCallExprNode(), ctx)
expr.txt_name = get_token_text(self._compiler, ctx.Identifier())
args = self.visit(ctx.arguments())
expr.set_argument_list(args)
return expr
# Visit a parse tree produced by ECMAScriptParser#AssignmentExpression.
def visitAssignmentExpression(self, ctx):
expr = self._compiler.init_node(expression.AssignmentExprNode(), ctx)
expr.txt_name = get_token_text(self._compiler, ctx.Identifier())
rhs = self.visit(ctx.singleExpression())
expr.set_rhs(rhs)
return expr
# Visit a parse tree produced by ECMAScriptParser#LogicalOrExpression.
def visitLogicalOrExpression(self, ctx):
return self.init_operator(expression.LogicOpExprNode(), ctx,
ctx.getChild(1), ctx.singleExpression())
# Visit a parse tree produced by ECMAScriptParser#LogicalAndExpression.
def visitLogicalAndExpression(self, ctx):
return self.init_operator(expression.LogicOpExprNode(), ctx,
ctx.getChild(1), ctx.singleExpression())
# Visit a parse tree produced by ECMAScriptParser#IdentifierExpression.
def visitIdentifierExpression(self, ctx):
expr = self._compiler.init_node(expression.VariableExprNode(), ctx)
expr.txt_name = get_token_text(self._compiler, ctx.Identifier())
return expr
# Visit a parse tree produced by ECMAScriptParser#LiteralExpression.
def visitLiteralExpression(self, ctx):
return self.visit(ctx.literal())
# Visit a parse tree produced by ECMAScriptParser#ArrayLiteralExpression.
def visitArrayLiteralExpression(self, ctx):
expr = self._compiler.init_node(array.ArrayLiteralExprNode(), ctx)
elems = ctx.arrayLiteral().elementList()
if not elems:
self._compiler.report_error(
ctx,
"Empty array expression not supported")
else:
exprs = elems.singleExpression()
assert len(exprs) >= 1
for current in exprs:
e = self.visit(current)
expr.add_expression(e)
return expr
# Visit a parse tree produced by ECMAScriptParser#MemberDotExpression.
def visitMemberDotExpression(self, ctx):
expr = self._compiler.init_node(expression.MemberAccessExprNode(), ctx)
expr.txt_member = get_token_text(self._compiler, ctx.Identifier())
e = self.visit(ctx.singleExpression())
expr.set_expression(e)
return expr
# Visit a parse tree produced by ECMAScriptParser#NotExpression.
def visitNotExpression(self, ctx):
return self.init_operator(expression.LogicOpExprNode(), ctx,
ctx.getChild(0), [ctx.singleExpression()])
# Visit a parse tree produced by ECMAScriptParser#RelationalExpression.
def visitRelationalExpression(self, ctx):
return self.init_operator(expression.ComparisonOpExprNode(), ctx,
ctx.getChild(1), ctx.singleExpression())
# Visit a parse tree produced by ECMAScriptParser#ParenthesizedExpression.
def visitParenthesizedExpression(self, ctx):
return self.visit(ctx.singleExpression())
# Visit a parse tree produced by ECMAScriptParser#MethodExpression.
def visitMethodExpression(self, ctx):
expr = self._compiler.init_node(expression.BodyPartCallExprNode(), ctx)
expr.txt_bodypart = get_token_text(self._compiler, ctx.Identifier(0))
expr.txt_method = get_token_text(self._compiler, ctx.Identifier(1))
args = self.visit(ctx.arguments())
expr.set_argument_list(args)
return expr
# Visit a parse tree produced by ECMAScriptParser#EqualityExpression.
def visitEqualityExpression(self, ctx):
return self.init_operator(expression.ComparisonOpExprNode(), ctx,
ctx.getChild(1), ctx.singleExpression())
# Visit a parse tree produced by ECMAScriptParser#UnaryExpression.
def visitUnaryExpression(self, ctx):
return self.init_operator(expression.UnaryOpExprNode(), ctx,
ctx.getChild(0), [ctx.singleExpression()])
# Visit a parse tree produced by ECMAScriptParser#AdditiveExpression.
def visitAdditiveExpression(self, ctx):
return self.init_operator(expression.ArithmeticOpExprNode(), ctx,
ctx.getChild(1), ctx.singleExpression())
# Visit a parse tree produced by ECMAScriptParser#MultiplicativeExpression.
def visitMultiplicativeExpression(self, ctx):
return self.init_operator(expression.ArithmeticOpExprNode(), ctx,
ctx.getChild(1), ctx.singleExpression())
# Visit a parse tree produced by ECMAScriptParser#literal.
def visitLiteral(self, ctx):
if ctx.numericLiteral():
expr = self._compiler.init_node(
expression.NumberLiteralExprNode(), ctx)
lit = ctx.numericLiteral().DecimalLiteral()
expr.txt_literal = get_token_text(self._compiler, lit)
return expr
elif ctx.BooleanLiteral():
text = get_token_text(self._compiler, ctx.BooleanLiteral())
assert text == 'true' or text == 'false'
expr = self._compiler.init_node(
expression.BooleanLiteralExprNode(), ctx)
expr.boolean_value = True if text == 'true' else False
return expr
else:
assert False
|
smartanthill/smartanthill-zepto-compiler
|
smartanthill_zc/parse_js.py
|
Python
|
gpl-2.0
| 21,223
|
[
"VisIt"
] |
12af1c38b9d7f7650e1421aaa3f7b8013f6169191528c2bcac5cb0c1f76eff7c
|
#!/usr/bin/python
import httplib
import httplib2
import os
import random
import sys
import time
import tempfile
import subprocess
import json
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaFileUpload
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# Explicitly tell the underlying HTTP transport library not to retry, since
# we are handling retry logic ourselves.
httplib2.RETRIES = 1
# Maximum number of times to retry before giving up.
MAX_RETRIES = 10
# Always retry when these exceptions are raised.
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,
httplib.IncompleteRead, httplib.ImproperConnectionState,
httplib.CannotSendRequest, httplib.CannotSendHeader,
httplib.ResponseNotReady, httplib.BadStatusLine)
# Always retry when an apiclient.errors.HttpError with one of these status
# codes is raised.
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret. You can acquire an OAuth 2.0 client ID and client secret from
# the Google Developers Console at
# https://console.developers.google.com/.
# Please ensure that you have enabled the YouTube Data API for your project.
# For more information about using OAuth2 to access the YouTube Data API, see:
# https://developers.google.com/youtube/v3/guides/authentication
# For more information about the client_secrets.json file format, see:
# https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
CLIENT_SECRETS_FILE = "client_secrets.json"
# This OAuth 2.0 access scope allows an application to upload files to the
# authenticated user's YouTube channel, but doesn't allow other types of access.
YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the Developers Console
https://console.developers.google.com/
For more information about the client_secrets.json file format, please visit:
https://developers.google.com/api-client-library/python/guide/aaa_client_secrets
""" % os.path.abspath(os.path.join(os.path.dirname(__file__),
CLIENT_SECRETS_FILE))
VALID_PRIVACY_STATUSES = ("public", "private", "unlisted")
def get_authenticated_service(args):
flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,
scope=YOUTUBE_UPLOAD_SCOPE,
message=MISSING_CLIENT_SECRETS_MESSAGE)
storage = Storage("%s-oauth2.json" % sys.argv[0])
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, storage, args)
return build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
http=credentials.authorize(httplib2.Http()))
def initialize_upload(youtube, options):
tags = None
if options.keywords:
tags = options.keywords.split(",")
body=dict(
snippet=dict(
title=options.title,
description=options.description,
tags=tags,
categoryId=options.category
),
status=dict(
privacyStatus=options.privacyStatus
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=",".join(body.keys()),
body=body,
# The chunksize parameter specifies the size of each chunk of data, in
# bytes, that will be uploaded at a time. Set a higher value for
# reliable connections as fewer chunks lead to faster uploads. Set a lower
# value for better recovery on less reliable connections.
#
# Setting "chunksize" equal to -1 in the code below means that the entire
# file will be uploaded in a single HTTP request. (If the upload fails,
# it will still be retried where it left off.) This is usually a best
# practice, but if you're using Python older than 2.6 or if you're
# running on App Engine, you should set the chunksize to something like
# 1024 * 1024 (1 megabyte).
media_body=MediaFileUpload(options.file, chunksize=-1, resumable=True)
)
resumable_upload(insert_request)
gProgramDirectory = os.path.dirname(sys.argv[0])
def sendCompletionEmail(videoID):
fullBatchPath = gProgramDirectory + '\\Python27\\python.exe ' + gProgramDirectory + '\\send_email.py '
fullBatchPath += '-VideoTitle=' + '"' + args.title + '" '
videoURL = 'https://www.youtube.com/watch?v=' + videoID + ' '
fullBatchPath += '-VideoURL=' + videoURL
tempBatFile = tempfile.NamedTemporaryFile(suffix='.bat', delete=False)
tempBatFile.write(fullBatchPath)
tempBatFile.close()
#WriteJSON(args.title, videoURL)
print('Sending confirmation email...')
subprocess.call(tempBatFile.name)
#remove temp batch file
os.remove(tempBatFile.name)
# This method implements an exponential backoff strategy to resume a
# failed upload.
def resumable_upload(insert_request):
response = None
error = None
retry = 0
while response is None:
try:
print ("Uploading file...")
status, response = insert_request.next_chunk()
if 'id' in response:
print ("Video id '%s' was successfully uploaded." % response['id'])
sendCompletionEmail(response['id'])
else:
exit("The upload failed with an unexpected response: %s" % response)
except (HttpError, e):
if e.resp.status in RETRIABLE_STATUS_CODES:
error = "A retriable HTTP error %d occurred:\n%s" % (e.resp.status,
e.content)
else:
raise
except (RETRIABLE_EXCEPTIONS, e):
error = "A retriable error occurred: %s" % e
if error is not None:
print (error)
retry += 1
if retry > MAX_RETRIES:
exit("No longer attempting to retry.")
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print ("Sleeping %f seconds and then retrying..." % sleep_seconds)
time.sleep(sleep_seconds)
def WriteJSON(VideoTitle, VideoURL):
data = {}
data['VideoProperties'] = []
data['VideoProperties'].append(
{
"VideoTitle" : VideoTitle,
"VideoURL" : VideoURL
})
with open ('UploadInfo.json', 'w') as outfile:
json.dump(data, outfile)
if __name__ == '__main__':
argparser.add_argument("--file", required=True, help="Video file to upload")
argparser.add_argument("--title", help="Video title", default="Test Title")
argparser.add_argument("--description", help="Video description",
default="")
argparser.add_argument("--category", default="22",
help="Numeric video category. " +
"See https://developers.google.com/youtube/v3/docs/videoCategories/list")
argparser.add_argument("--keywords", help="Video keywords, comma separated",
default="")
argparser.add_argument("--privacyStatus", choices=VALID_PRIVACY_STATUSES,
default=VALID_PRIVACY_STATUSES[2], help="Video privacy status.")
args = argparser.parse_args()
if not os.path.exists(args.file):
exit("Please specify a valid file using the --file= parameter.")
youtube = get_authenticated_service(args)
try:
initialize_upload(youtube, args)
except (HttpError, e):
print ("An HTTP error %d occurred:\n%s" % (e.resp.status, e.content))
|
sometallgit/AutoUploader
|
upload_video.py
|
Python
|
mit
| 7,700
|
[
"VisIt"
] |
883d082dbe5fa462fe4912e8f1406a250315c4759b06e1f305a04fab56a20911
|
# -*- coding: utf-8 -*-
"""Mayavi/traits GUI for setting MRI fiducials."""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD (3-clause)
import os
from mayavi.core.ui.mayavi_scene import MayaviScene
from mayavi.tools.mlab_scene_model import MlabSceneModel
import numpy as np
from pyface.api import confirm, error, FileDialog, OK, YES
from traits.api import (HasTraits, HasPrivateTraits, on_trait_change,
cached_property, DelegatesTo, Event, Instance,
Property, Array, Bool, Button, Enum)
from traitsui.api import HGroup, Item, VGroup, View, Handler, ArrayEditor
from traitsui.menu import NoButtons
from tvtk.pyface.scene_editor import SceneEditor
from ..coreg import (fid_fname, _find_fiducials_files, _find_head_bem,
get_mni_fiducials)
from ..defaults import DEFAULTS
from ..io import write_fiducials
from ..io.constants import FIFF
from ..surface import complete_surface_info, decimate_surface
from ..utils import get_subjects_dir, logger, warn
from ..viz.backends._pysurfer_mayavi import _toggle_mlab_render
from ._file_traits import (SurfaceSource, fid_wildcard, FiducialsSource,
MRISubjectSource, SubjectSelectorPanel,
Surf)
from ._viewer import (HeadViewController, PointObject, SurfaceObject,
headview_borders, _BUTTON_WIDTH,
_MRI_FIDUCIALS_WIDTH, _MM_WIDTH,
_RESET_LABEL, _RESET_WIDTH, _mm_fmt)
defaults = DEFAULTS['coreg']
class MRIHeadWithFiducialsModel(HasPrivateTraits):
"""Represent an MRI head shape (high and low res) with fiducials.
Attributes
----------
points : array (n_points, 3)
MRI head surface points.
tris : array (n_tris, 3)
Triangles based on points.
lpa : array (1, 3)
Left peri-auricular point coordinates.
nasion : array (1, 3)
Nasion coordinates.
rpa : array (1, 3)
Right peri-auricular point coordinates.
"""
subject_source = Instance(MRISubjectSource, ())
bem_low_res = Instance(SurfaceSource, ())
bem_high_res = Instance(SurfaceSource, ())
fid = Instance(FiducialsSource, ())
fid_file = DelegatesTo('fid', 'file')
fid_fname = DelegatesTo('fid', 'fname')
fid_points = DelegatesTo('fid', 'points')
subjects_dir = DelegatesTo('subject_source')
subject = DelegatesTo('subject_source')
subject_has_bem = DelegatesTo('subject_source')
lpa = Array(float, (1, 3))
nasion = Array(float, (1, 3))
rpa = Array(float, (1, 3))
reset = Event(desc="Reset fiducials to the file.")
# info
can_save = Property(depends_on=['file', 'can_save_as'])
can_save_as = Property(depends_on=['lpa', 'nasion', 'rpa'])
can_reset = Property(depends_on=['file', 'fid.points', 'lpa', 'nasion',
'rpa'])
fid_ok = Property(depends_on=['lpa', 'nasion', 'rpa'], desc="All points "
"are set")
default_fid_fname = Property(depends_on=['subjects_dir', 'subject'],
desc="the default file name for the "
"fiducials fif file")
# switch for the GUI (has no effect in the model)
lock_fiducials = Bool(False, desc="Used by GIU, has no effect in the "
"model.")
@on_trait_change('fid_points')
def reset_fiducials(self): # noqa: D102
if self.fid_points is not None:
self.lpa = self.fid_points[0:1]
self.nasion = self.fid_points[1:2]
self.rpa = self.fid_points[2:3]
def save(self, fname=None):
"""Save the current fiducials to a file.
Parameters
----------
fname : str
Destination file path. If None, will use the current fid filename
if available, or else use the default pattern.
"""
if fname is None:
fname = self.fid_file
if not fname:
fname = self.default_fid_fname
dig = [{'kind': FIFF.FIFFV_POINT_CARDINAL,
'ident': FIFF.FIFFV_POINT_LPA,
'r': np.array(self.lpa[0])},
{'kind': FIFF.FIFFV_POINT_CARDINAL,
'ident': FIFF.FIFFV_POINT_NASION,
'r': np.array(self.nasion[0])},
{'kind': FIFF.FIFFV_POINT_CARDINAL,
'ident': FIFF.FIFFV_POINT_RPA,
'r': np.array(self.rpa[0])}]
write_fiducials(fname, dig, FIFF.FIFFV_COORD_MRI)
self.fid_file = fname
@cached_property
def _get_can_reset(self):
if not self.fid_file:
return False
elif np.any(self.lpa != self.fid.points[0:1]):
return True
elif np.any(self.nasion != self.fid.points[1:2]):
return True
elif np.any(self.rpa != self.fid.points[2:3]):
return True
return False
@cached_property
def _get_can_save_as(self):
can = not (np.all(self.nasion == self.lpa) or
np.all(self.nasion == self.rpa) or
np.all(self.lpa == self.rpa))
return can
@cached_property
def _get_can_save(self):
if not self.can_save_as:
return False
elif self.fid_file:
return True
elif self.subjects_dir and self.subject:
return True
else:
return False
@cached_property
def _get_default_fid_fname(self):
fname = fid_fname.format(subjects_dir=self.subjects_dir,
subject=self.subject)
return fname
@cached_property
def _get_fid_ok(self):
return all(np.any(pt) for pt in (self.nasion, self.lpa, self.rpa))
def _reset_fired(self):
self.reset_fiducials()
# if subject changed because of a change of subjects_dir this was not
# triggered
@on_trait_change('subjects_dir,subject')
def _subject_changed(self):
subject = self.subject
subjects_dir = self.subjects_dir
if not subjects_dir or not subject:
return
# find high-res head model (if possible)
high_res_path = _find_head_bem(subject, subjects_dir, high_res=True)
low_res_path = _find_head_bem(subject, subjects_dir, high_res=False)
if high_res_path is None and low_res_path is None:
msg = 'No standard head model was found for subject %s' % subject
error(None, msg, "No head surfaces found")
raise RuntimeError(msg)
if high_res_path is not None:
self.bem_high_res.file = high_res_path
else:
self.bem_high_res.file = low_res_path
if low_res_path is None:
# This should be very rare!
warn('No low-resolution head found, decimating high resolution '
'mesh (%d vertices): %s' % (len(self.bem_high_res.surf.rr),
high_res_path,))
# Create one from the high res one, which we know we have
rr, tris = decimate_surface(self.bem_high_res.surf.rr,
self.bem_high_res.surf.tris,
n_triangles=5120)
surf = complete_surface_info(dict(rr=rr, tris=tris),
copy=False, verbose=False)
# directly set the attributes of bem_low_res
self.bem_low_res.surf = Surf(tris=surf['tris'], rr=surf['rr'],
nn=surf['nn'])
else:
self.bem_low_res.file = low_res_path
# Set MNI points
try:
fids = get_mni_fiducials(subject, subjects_dir)
except Exception: # some problem, leave at origin
self.fid.mni_points = None
else:
self.fid.mni_points = np.array([f['r'] for f in fids], float)
# find fiducials file
fid_files = _find_fiducials_files(subject, subjects_dir)
if len(fid_files) == 0:
self.fid.reset_traits(['file'])
self.lock_fiducials = False
else:
self.fid_file = fid_files[0].format(subjects_dir=subjects_dir,
subject=subject)
self.lock_fiducials = True
# does not seem to happen by itself ... so hard code it:
self.reset_fiducials()
class SetHandler(Handler):
"""Handler to change style when setting MRI fiducials."""
def object_set_changed(self, info): # noqa: D102
return self.object_locked_changed(info)
def object_locked_changed(self, info): # noqa: D102
if info.object.locked:
ss = ''
else:
ss = 'border-style: solid; border-color: red; border-width: 2px;'
# This will only work for Qt, but hopefully that's most users!
try:
_color_children(info.ui.info.ui.control, ss)
except AttributeError: # safeguard for wxpython
pass
def _color_children(obj, ss):
"""Qt helper."""
for child in obj.children():
if 'QRadioButton' in repr(child):
child.setStyleSheet(ss if child.isChecked() else '')
elif 'QLineEdit' in repr(child):
child.setStyleSheet(ss)
elif 'QWidget' in repr(child): # on Linux it's nested
_color_children(child, ss)
_SET_TOOLTIP = ('Click on the MRI image to set the position, '
'or enter values below')
class FiducialsPanel(HasPrivateTraits):
"""Set fiducials on an MRI surface."""
model = Instance(MRIHeadWithFiducialsModel)
fid_file = DelegatesTo('model')
fid_fname = DelegatesTo('model')
lpa = DelegatesTo('model')
nasion = DelegatesTo('model')
rpa = DelegatesTo('model')
can_save = DelegatesTo('model')
can_save_as = DelegatesTo('model')
can_reset = DelegatesTo('model')
fid_ok = DelegatesTo('model')
locked = DelegatesTo('model', 'lock_fiducials')
set = Enum('LPA', 'Nasion', 'RPA')
current_pos_mm = Array(float, (1, 3))
save_as = Button(label='Save as...')
save = Button(label='Save')
reset_fid = Button(label=_RESET_LABEL)
headview = Instance(HeadViewController)
hsp_obj = Instance(SurfaceObject)
picker = Instance(object)
# the layout of the dialog created
view = View(VGroup(
HGroup(Item('fid_file', width=_MRI_FIDUCIALS_WIDTH,
tooltip='MRI fiducials file'), show_labels=False),
HGroup(Item('set', width=_MRI_FIDUCIALS_WIDTH,
format_func=lambda x: x, style='custom',
tooltip=_SET_TOOLTIP), show_labels=False),
HGroup(Item('current_pos_mm',
editor=ArrayEditor(width=_MM_WIDTH, format_func=_mm_fmt),
tooltip='MRI fiducial position (mm)'), show_labels=False),
HGroup(Item('save', enabled_when='can_save',
tooltip="If a filename is currently specified, save to "
"that file, otherwise save to the default file name",
width=_BUTTON_WIDTH),
Item('save_as', enabled_when='can_save_as',
width=_BUTTON_WIDTH),
Item('reset_fid', enabled_when='can_reset', width=_RESET_WIDTH,
tooltip='Reset to file values (if available)'),
show_labels=False),
enabled_when="locked==False", show_labels=False), handler=SetHandler())
def __init__(self, *args, **kwargs): # noqa: D102
super(FiducialsPanel, self).__init__(*args, **kwargs)
@on_trait_change('current_pos_mm')
def _update_pos(self):
attr = self.set.lower()
if not np.allclose(getattr(self, attr), self.current_pos_mm * 1e-3):
setattr(self, attr, self.current_pos_mm * 1e-3)
@on_trait_change('model:lpa')
def _update_lpa(self, name):
if self.set == 'LPA':
self.current_pos_mm = self.lpa * 1000
@on_trait_change('model:nasion')
def _update_nasion(self, name):
if self.set.lower() == 'Nasion':
self.current_pos_mm = self.nasion * 1000
@on_trait_change('model:rpa')
def _update_rpa(self, name):
if self.set.lower() == 'RPA':
self.current_pos_mm = self.rpa * 1000
def _reset_fid_fired(self):
self.model.reset = True
def _save_fired(self):
self.model.save()
def _save_as_fired(self):
if self.fid_file:
default_path = self.fid_file
else:
default_path = self.model.default_fid_fname
dlg = FileDialog(action="save as", wildcard=fid_wildcard,
default_path=default_path)
dlg.open()
if dlg.return_code != OK:
return
path = dlg.path
if not path.endswith('.fif'):
path = path + '.fif'
if os.path.exists(path):
answer = confirm(None, "The file %r already exists. Should it "
"be replaced?", "Overwrite File?")
if answer != YES:
return
self.model.save(path)
def _on_pick(self, picker):
if self.locked:
return
self.picker = picker
n_pos = len(picker.picked_positions)
if n_pos == 0:
logger.debug("GUI: picked empty location")
return
if picker.actor is self.hsp_obj.surf.actor.actor:
idxs = []
idx = None
pt = [picker.pick_position]
elif self.hsp_obj.surf.actor.actor in picker.actors:
idxs = [i for i in range(n_pos) if picker.actors[i] is
self.hsp_obj.surf.actor.actor]
idx = idxs[-1]
pt = [picker.picked_positions[idx]]
else:
logger.debug("GUI: picked object other than MRI")
def round_(x):
return round(x, 3)
poss = [map(round_, pos) for pos in picker.picked_positions]
pos = map(round_, picker.pick_position)
msg = ["Pick Event: %i picked_positions:" % n_pos]
line = str(pos)
if idx is None:
line += " <-pick_position"
msg.append(line)
for i, pos in enumerate(poss):
line = str(pos)
if i == idx:
line += " <- MRI mesh"
elif i in idxs:
line += " (<- also MRI mesh)"
msg.append(line)
logger.debug('\n'.join(msg))
if self.set == 'Nasion':
self.nasion = pt
elif self.set == 'LPA':
self.lpa = pt
elif self.set == 'RPA':
self.rpa = pt
else:
raise ValueError("set = %r" % self.set)
@on_trait_change('set')
def _on_set_change(self, obj, name, old, new):
if new == 'Nasion':
self.current_pos_mm = self.nasion * 1000
self.headview.front = True
elif new == 'LPA':
self.current_pos_mm = self.lpa * 1000
self.headview.left = True
elif new == 'RPA':
self.current_pos_mm = self.rpa * 1000
self.headview.right = True
# FiducialsPanel view that allows manipulating all coordinates numerically
view2 = View(VGroup(Item('fid_file', label='Fiducials File'),
Item('fid_fname', show_label=False, style='readonly'),
Item('set', style='custom'), 'lpa', 'nasion', 'rpa',
HGroup(Item('save', enabled_when='can_save'),
Item('save_as', enabled_when='can_save_as'),
Item('reset_fid', enabled_when='can_reset'),
show_labels=False),
enabled_when="locked==False"))
class FiducialsFrame(HasTraits):
"""GUI for interpolating between two KIT marker files.
Parameters
----------
subject : None | str
Set the subject which is initially selected.
subjects_dir : None | str
Override the SUBJECTS_DIR environment variable.
"""
model = Instance(MRIHeadWithFiducialsModel, ())
scene = Instance(MlabSceneModel, ())
headview = Instance(HeadViewController)
spanel = Instance(SubjectSelectorPanel)
panel = Instance(FiducialsPanel)
mri_obj = Instance(SurfaceObject)
point_scale = float(defaults['mri_fid_scale'])
lpa_obj = Instance(PointObject)
nasion_obj = Instance(PointObject)
rpa_obj = Instance(PointObject)
def _headview_default(self):
return HeadViewController(scene=self.scene, system='RAS')
def _panel_default(self):
panel = FiducialsPanel(model=self.model, headview=self.headview)
panel.trait_view('view', view2)
return panel
def _spanel_default(self):
return SubjectSelectorPanel(model=self.model.subject_source)
view = View(HGroup(Item('scene',
editor=SceneEditor(scene_class=MayaviScene),
dock='vertical'),
VGroup(headview_borders,
VGroup(Item('spanel', style='custom'),
label="Subject", show_border=True,
show_labels=False),
VGroup(Item('panel', style="custom"),
label="Fiducials", show_border=True,
show_labels=False),
show_labels=False),
show_labels=False),
resizable=True,
buttons=NoButtons)
def __init__(self, subject=None, subjects_dir=None,
**kwargs): # noqa: D102
super(FiducialsFrame, self).__init__(**kwargs)
subjects_dir = get_subjects_dir(subjects_dir)
if subjects_dir is not None:
self.spanel.subjects_dir = subjects_dir
if subject is not None:
if subject in self.spanel.subjects:
self.spanel.subject = subject
@on_trait_change('scene.activated')
def _init_plot(self):
_toggle_mlab_render(self, False)
lpa_color = defaults['lpa_color']
nasion_color = defaults['nasion_color']
rpa_color = defaults['rpa_color']
# bem
color = defaults['mri_color']
self.mri_obj = SurfaceObject(points=self.model.points, color=color,
tri=self.model.tris, scene=self.scene)
self.model.on_trait_change(self._on_mri_src_change, 'tris')
self.panel.hsp_obj = self.mri_obj
# fiducials
self.lpa_obj = PointObject(scene=self.scene, color=lpa_color,
has_norm=True,
point_scale=self.point_scale)
self.panel.sync_trait('lpa', self.lpa_obj, 'points', mutual=False)
self.sync_trait('point_scale', self.lpa_obj, mutual=False)
self.nasion_obj = PointObject(scene=self.scene, color=nasion_color,
has_norm=True,
point_scale=self.point_scale)
self.panel.sync_trait('nasion', self.nasion_obj, 'points',
mutual=False)
self.sync_trait('point_scale', self.nasion_obj, mutual=False)
self.rpa_obj = PointObject(scene=self.scene, color=rpa_color,
has_norm=True,
point_scale=self.point_scale)
self.panel.sync_trait('rpa', self.rpa_obj, 'points', mutual=False)
self.sync_trait('point_scale', self.rpa_obj, mutual=False)
self.headview.left = True
_toggle_mlab_render(self, True)
# picker
self.scene.mayavi_scene.on_mouse_pick(self.panel._on_pick, type='cell')
def _on_mri_src_change(self):
if (not np.any(self.model.points)) or (not np.any(self.model.tris)):
self.mri_obj.clear()
return
self.mri_obj.points = self.model.points
self.mri_obj.tri = self.model.tris
self.mri_obj.plot()
|
Teekuningas/mne-python
|
mne/gui/_fiducials_gui.py
|
Python
|
bsd-3-clause
| 20,158
|
[
"Mayavi"
] |
a2c0b00df501659930f36c15b8e7a983e9ba5605683a909d715318d81f63aab3
|
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
from fontTools.designspaceLib import DesignSpaceDocument
from xmldiff import main, formatting
import itertools
import pytest
import fontTools.feaLib.parser
import fontTools.feaLib.ast
import glyphsLib
from glyphsLib import to_designspace, to_glyphs
from glyphsLib.util import open_ufo
def test_designspace_generation_regular_same_family_name(tmpdir, ufo_module):
ufo_Lt = ufo_module.Font()
ufo_Lt.info.familyName = "CoolFoundry Examplary Serif"
ufo_Lt.info.styleName = "Light"
ufo_Lt.info.openTypeOS2WeightClass = 300
ufo_Rg = ufo_module.Font()
ufo_Rg.info.familyName = "CoolFoundry Examplary Serif"
ufo_Rg.info.styleName = "Regular"
ufo_Rg.info.openTypeOS2WeightClass = 400
ufo_Md = ufo_module.Font()
ufo_Md.info.familyName = "CoolFoundry Examplary Serif"
ufo_Md.info.styleName = "Medium"
ufo_Md.info.openTypeOS2WeightClass = 500
ufo_Bd = ufo_module.Font()
ufo_Bd.info.familyName = "CoolFoundry Examplary Serif"
ufo_Bd.info.styleName = "Bold"
ufo_Bd.info.openTypeOS2WeightClass = 700
ufo_ExBd = ufo_module.Font()
ufo_ExBd.info.familyName = "CoolFoundry Examplary Serif"
ufo_ExBd.info.styleName = "XBold"
ufo_ExBd.info.openTypeOS2WeightClass = 800
font = to_glyphs([ufo_Lt, ufo_Rg, ufo_Md, ufo_Bd, ufo_ExBd])
designspace = to_designspace(font, ufo_module=ufo_module)
path = os.path.join(str(tmpdir), "actual.designspace")
designspace.write(path)
expected_path = os.path.join(
os.path.dirname(__file__), "..", "data", "DesignspaceGenTestRegular.designspace"
)
assert (
len(main.diff_files(path, expected_path, formatter=formatting.DiffFormatter()))
== 0
)
def test_designspace_generation_italic_same_family_name(tmpdir, ufo_module):
ufo_Lt = ufo_module.Font()
ufo_Lt.info.familyName = "CoolFoundry Examplary Serif"
ufo_Lt.info.styleName = "Light Italic"
ufo_Lt.info.openTypeOS2WeightClass = 300
ufo_Lt.info.italicAngle = -11
ufo_Rg = ufo_module.Font()
ufo_Rg.info.familyName = "CoolFoundry Examplary Serif"
ufo_Rg.info.styleName = "Regular Italic"
ufo_Rg.info.openTypeOS2WeightClass = 400
ufo_Rg.info.italicAngle = -11
ufo_Md = ufo_module.Font()
ufo_Md.info.familyName = "CoolFoundry Examplary Serif"
ufo_Md.info.styleName = "Medium Italic"
ufo_Md.info.openTypeOS2WeightClass = 500
ufo_Md.info.italicAngle = -11
ufo_Bd = ufo_module.Font()
ufo_Bd.info.familyName = "CoolFoundry Examplary Serif"
ufo_Bd.info.styleName = "Bold Italic"
ufo_Bd.info.openTypeOS2WeightClass = 700
ufo_Bd.info.italicAngle = -11
ufo_ExBd = ufo_module.Font()
ufo_ExBd.info.familyName = "CoolFoundry Examplary Serif"
ufo_ExBd.info.styleName = "XBold Italic"
ufo_ExBd.info.openTypeOS2WeightClass = 800
ufo_ExBd.info.italicAngle = -11
font = to_glyphs([ufo_Lt, ufo_Rg, ufo_Md, ufo_Bd, ufo_ExBd])
designspace = to_designspace(font, ufo_module=ufo_module)
path = os.path.join(str(tmpdir), "actual.designspace")
designspace.write(path)
expected_path = os.path.join(
os.path.dirname(__file__), "..", "data", "DesignspaceGenTestItalic.designspace"
)
assert (
len(main.diff_files(path, expected_path, formatter=formatting.DiffFormatter()))
== 0
)
def test_designspace_generation_regular_different_family_names(tmpdir, ufo_module):
ufo_Lt = ufo_module.Font()
ufo_Lt.info.familyName = "CoolFoundry Examplary Serif Light"
ufo_Lt.info.styleName = "Regular"
ufo_Lt.info.openTypeOS2WeightClass = 300
ufo_Rg = ufo_module.Font()
ufo_Rg.info.familyName = "CoolFoundry Examplary Serif"
ufo_Rg.info.styleName = "Regular"
ufo_Rg.info.openTypeOS2WeightClass = 400
# Different family names are not allowed
# REVIEW: reasonable requirement?
with pytest.raises(Exception):
to_glyphs([ufo_Lt, ufo_Rg])
def test_designspace_generation_same_weight_name(tmpdir, ufo_module):
ufo_Bd = ufo_module.Font()
ufo_Bd.info.familyName = "Test"
ufo_Bd.info.styleName = "Bold"
ufo_ExBd = ufo_module.Font()
ufo_ExBd.info.familyName = "Test"
ufo_ExBd.info.styleName = "Bold"
ufo_XExBd = ufo_module.Font()
ufo_XExBd.info.familyName = "Test"
ufo_XExBd.info.styleName = "Bold"
font = to_glyphs([ufo_Bd, ufo_ExBd, ufo_XExBd])
designspace = to_designspace(font, ufo_module=ufo_module)
assert designspace.sources[0].filename != designspace.sources[1].filename
assert designspace.sources[1].filename != designspace.sources[2].filename
assert designspace.sources[0].filename != designspace.sources[2].filename
@pytest.mark.parametrize("filename", ["BraceTestFont.glyphs", "BraceTestFontV3.glyphs"])
def test_designspace_generation_brace_layers(datadir, filename, ufo_module):
with open(str(datadir.join(filename))) as f:
font = glyphsLib.load(f)
designspace = to_designspace(font, ufo_module=ufo_module, minimal=True)
axes_order = [
(a.name, a.minimum, a.default, a.maximum, a.map) for a in designspace.axes
]
assert axes_order == [
("Width", 75, 100, 100, [(75, 50.0), (100, 100.0)]),
("Weight", 100, 100, 700, [(100, 100.0), (700, 1000.0)]),
]
source_order = [(s.filename, s.layerName, s.name) for s in designspace.sources]
assert source_order == [
("NewFont-Light.ufo", None, "New Font Light"),
("NewFont-Light.ufo", "{75}", "New Font Light {75}"),
("NewFont-Bold.ufo", None, "New Font Bold"),
("NewFont-Bold.ufo", "{75}", "New Font Bold {75}"),
("NewFont-Bold.ufo", "Test2 {90.5, 500}", "New Font Bold Test2 {90.5, 500}"),
("NewFont-Bold.ufo", "Test1 {90.5, 600}", "New Font Bold Test1 {90.5, 600}"),
("NewFont-CondensedLight.ufo", None, "New Font Condensed Light"),
("NewFont-CondensedBold.ufo", None, "New Font Condensed Bold"),
]
# Check that all sources have a font object attached and sources with the same
# filename have the same font object attached.
masters = {}
for source in designspace.sources:
assert source.font
if source.filename in masters:
assert masters[source.filename] is source.font
masters[source.filename] = source.font
# Check that brace layer glyph is created
assert len(designspace.sources[0].font.layers) == 2
@pytest.mark.parametrize("filename", ["BraceTestFont.glyphs", "BraceTestFontV3.glyphs"])
def test_designspace_generation_instances(datadir, filename, ufo_module):
with open(str(datadir.join(filename))) as f:
font = glyphsLib.load(f)
designspace = to_designspace(font, ufo_module=ufo_module)
instances_order = [
(i.name, i.styleMapStyleName, i.location) for i in designspace.instances
]
assert instances_order == [
("New Font Thin", "regular", {"Width": 100.0, "Weight": 100.0}),
("New Font Regular", "regular", {"Width": 100.0, "Weight": 500.0}),
("New Font Bold", "bold", {"Width": 100.0, "Weight": 1000.0}),
("New Font Semi Consensed", "regular", {"Width": 75.0, "Weight": 500.0}),
("New Font Thin Condensed", "regular", {"Width": 50.0, "Weight": 100.0}),
("New Font Condensed", "regular", {"Width": 50.0, "Weight": 500.0}),
("New Font Bold Condensed", "bold", {"Width": 50.0, "Weight": 1000.0}),
]
@pytest.mark.parametrize("filename", ["BraceTestFont.glyphs", "BraceTestFontV3.glyphs"])
def test_designspace_generation_on_disk(datadir, tmpdir, filename, ufo_module):
glyphsLib.build_masters(str(datadir.join(filename)), str(tmpdir))
ufo_paths = list(tmpdir.visit(fil="*.ufo"))
assert len(ufo_paths) == 4 # Source layers should not be written to disk.
for ufo_path in ufo_paths:
ufo = open_ufo(ufo_path, ufo_module.Font)
# Check that all glyphs have contours (brace layers are in "b" only, writing
# the brace layer to disk would result in empty other glyphs).
for layer in ufo.layers:
for glyph in layer:
if glyph.name == "space":
assert not glyph
else:
assert glyph
def test_designspace_generation_bracket_roundtrip(datadir, ufo_module):
with open(str(datadir.join("BracketTestFont.glyphs"))) as f:
font = glyphsLib.load(f)
designspace = to_designspace(font, ufo_module=ufo_module)
assert designspace.rules[0].name == "BRACKET.300.600"
assert designspace.rules[0].conditionSets == [
[dict(name="Weight", minimum=300, maximum=600)]
]
assert designspace.rules[0].subs == [("x", "x.BRACKET.300")]
assert designspace.rules[1].name == "BRACKET.300.1000"
assert designspace.rules[1].conditionSets == [
[dict(name="Weight", minimum=300, maximum=1000)]
]
assert designspace.rules[1].subs == [("a", "a.BRACKET.300")]
assert designspace.rules[2].name == "BRACKET.600.1000"
assert designspace.rules[2].conditionSets == [
[dict(name="Weight", minimum=600, maximum=1000)]
]
assert designspace.rules[2].subs == [("x", "x.BRACKET.600")]
for source in designspace.sources:
assert "[300]" not in source.font.layers
assert "Something [300]" not in source.font.layers
assert "[600]" not in source.font.layers
assert "Other [600]" not in source.font.layers
g1 = source.font["x.BRACKET.300"]
assert not g1.unicodes
g2 = source.font["x.BRACKET.600"]
assert not g2.unicodes
font_rt = to_glyphs(designspace)
assert "x" in font_rt.glyphs
g1 = font_rt.glyphs["x"]
assert len(g1.layers) == 12 and {l.name for l in g1.layers} == {
"[300]",
"[600]",
"Bold",
"Condensed Bold",
"Condensed Light",
"Light",
"Other [600]",
"Something [300]",
}
g2 = font_rt.glyphs["a"]
assert len(g2.layers) == 8 and {l.name for l in g2.layers} == {
"[300]",
"Bold",
"Condensed Bold",
"Condensed Light",
"Light",
}
assert "a.BRACKET.300" not in font_rt.glyphs
assert "x.BRACKET.300" not in font_rt.glyphs
assert "x.BRACKET.600" not in font_rt.glyphs
def test_designspace_generation_bracket_roundtrip_psnames(datadir, ufo_module):
with open(str(datadir.join("PSNames.glyphs"))) as f:
font = glyphsLib.load(f)
designspace: DesignSpaceDocument = to_designspace(font, ufo_module=ufo_module)
assert designspace.findDefault().font.lib["public.postscriptNames"] == {
"a-cy": "uni0430",
"a-cy.BRACKET.18": "uni0430.BRACKET.18",
"a-cy.alt": "uni0430.alt",
}
font_rt = to_glyphs(designspace)
designspace_rt = to_designspace(font_rt, ufo_module=ufo_module)
assert designspace_rt.findDefault().font.lib["public.postscriptNames"] == {
"a-cy": "uni0430",
"a-cy.BRACKET.18": "uni0430.BRACKET.18",
"a-cy.alt": "uni0430.alt",
}
font_rt2 = to_glyphs(designspace_rt)
designspace_rt2 = to_designspace(font_rt2, ufo_module=ufo_module)
assert designspace_rt2.findDefault().font.lib["public.postscriptNames"] == {
"a-cy": "uni0430",
"a-cy.BRACKET.18": "uni0430.BRACKET.18",
"a-cy.alt": "uni0430.alt",
}
def test_designspace_generation_bracket_roundtrip_no_layername(datadir, ufo_module):
with open(str(datadir.join("BracketTestFont.glyphs"))) as f:
font = glyphsLib.load(f)
# Remove brace layers for clean slate.
master_ids = {m.id for m in font.masters}
for g in font.glyphs:
dl = [l for l in g.layers if l.layerId not in master_ids]
for l in dl:
g.layers.remove(l)
designspace = to_designspace(font, ufo_module=ufo_module)
for source in designspace.sources:
source.font.newGlyph("b.BRACKET.100")
font_rt = to_glyphs(designspace)
for layer in font_rt.glyphs["b"].layers:
if layer.layerId not in master_ids:
assert layer.name == "[100]"
def test_designspace_generation_bracket_unbalanced_brackets(datadir, ufo_module):
with open(str(datadir.join("BracketTestFont2.glyphs"))) as f:
font = glyphsLib.load(f)
layer_names = {l.name for l in font.glyphs["C"].layers}
assert layer_names == {"Regular", "Bold", "Bold [600]"}
designspace = to_designspace(font, ufo_module=ufo_module)
for source in designspace.sources:
assert "C.BRACKET.600" in source.font
font_rt = to_glyphs(designspace)
assert "C" in font_rt.glyphs
assert {l.name for l in font_rt.glyphs["C"].layers} == layer_names
assert "C.BRACKET.600" not in font_rt.glyphs
def test_designspace_generation_bracket_composite_glyph(datadir, ufo_module):
with open(str(datadir.join("BracketTestFont2.glyphs"))) as f:
font = glyphsLib.load(f)
g = font.glyphs["B"]
for layer in g.layers:
assert layer.components[0].name == "A"
designspace = to_designspace(font, ufo_module=ufo_module)
for source in designspace.sources:
ufo = source.font
assert "B.BRACKET.600" in ufo
assert ufo["B"].components[0].baseGlyph == "A"
assert ufo["B.BRACKET.600"].components[0].baseGlyph == "A.BRACKET.600"
font_rt = to_glyphs(designspace)
assert "B" in font_rt.glyphs
g2 = font_rt.glyphs["B"]
for layer in g2.layers:
assert layer.components[0].name == "A"
assert "B.BRACKET.600" not in font_rt.glyphs
def test_designspace_generation_reverse_bracket_roundtrip(datadir, ufo_module):
with open(str(datadir.join("BracketTestFont2.glyphs"))) as f:
font = glyphsLib.load(f)
g = font.glyphs["D"]
assert {"Regular ]600]", "Bold ]600]"}.intersection(l.name for l in g.layers)
designspace = to_designspace(font, ufo_module=ufo_module)
assert designspace.rules[1].name == "BRACKET.400.600"
assert designspace.rules[1].conditionSets == [
[dict(name="Weight", minimum=400, maximum=600)]
]
assert designspace.rules[1].subs == [("D", "D.REV_BRACKET.600")]
for source in designspace.sources:
ufo = source.font
assert "D.REV_BRACKET.600" in ufo
font_rt = to_glyphs(designspace)
assert "D" in font_rt.glyphs
g2 = font_rt.glyphs["D"]
assert {"Regular ]600]", "Bold ]600]"}.intersection(l.name for l in g2.layers)
assert "D.REV_BRACKET.600" not in font_rt.glyphs
def test_designspace_generation_bracket_no_export_glyph(datadir, ufo_module):
with open(str(datadir.join("BracketTestFont2.glyphs"))) as f:
font = glyphsLib.load(f)
font.glyphs["E"].export = False
designspace = to_designspace(
font, write_skipexportglyphs=True, ufo_module=ufo_module, minimal=False
)
assert "E" in designspace.lib.get("public.skipExportGlyphs")
for source in designspace.sources:
assert "E.REV_BRACKET.570" not in source.font
assert "E.BRACKET.630" not in source.font
for rule in designspace.rules:
assert "E" not in {g for g in itertools.chain(*rule.subs)}
font_rt = to_glyphs(designspace)
assert "E" in font_rt.glyphs
assert {l.name for l in font_rt.glyphs["E"].layers} == {
"Regular",
"Regular [630]",
"Bold",
"Bold ]570]",
}
def test_designspace_generation_bracket_GDEF(datadir, ufo_module):
with open(str(datadir.join("BracketTestFont.glyphs"))) as f:
font = glyphsLib.load(f)
# add some attaching anchors to the "x" glyph and its (bracket) layers to
# trigger the generation of GDEF table
for layer in font.glyphs["x"].layers:
anchor = glyphsLib.classes.GSAnchor()
anchor.name = "top"
anchor.position = (0, 0)
layer.anchors.append(anchor)
designspace = to_designspace(font, ufo_module=ufo_module, generate_GDEF=True)
for source in designspace.sources:
ufo = source.font
features = fontTools.feaLib.parser.Parser(
io.StringIO(ufo.features.text), glyphNames=ufo.keys()
).parse()
for stmt in features.statements:
if (
isinstance(stmt, fontTools.feaLib.ast.TableBlock)
and stmt.name == "GDEF"
):
gdef = stmt
for stmt in gdef.statements:
if isinstance(stmt, fontTools.feaLib.ast.GlyphClassDefStatement):
glyph_class_defs = stmt
break
else:
pytest.fail(
f"No GDEF.GlyphClassDef statement found in {ufo!r} features:\n"
f"{ufo.features.text}"
)
break
else:
pytest.fail(
f"No GDEF table definition found in {ufo!r} features:\n"
f"{ufo.features.text}"
)
assert set(glyph_class_defs.baseGlyphs.glyphSet()) == {
"x",
"x.BRACKET.300",
"x.BRACKET.600",
}
def test_designspace_generation_bracket_glyphs3_simple(datadir, ufo_module):
with open(str(datadir.join("Alternate-g3-axis1.glyphs"))) as f:
font = glyphsLib.load(f)
designspace = to_designspace(font, ufo_module=ufo_module)
for source in designspace.sources:
assert "A.BRACKET.600" in source.font
|
googlefonts/glyphsLib
|
tests/builder/designspace_gen_test.py
|
Python
|
apache-2.0
| 17,982
|
[
"VisIt"
] |
66f28c27794fcda74798e4599bbaa36fb5b10b4efd876d1e24375d822e70f4b3
|
"""
Given two words (beginWord and endWord), and a dictionary's word list, find all shortest transformation sequence(s) from beginWord to endWord, such that:
Only one letter can be changed at a time
Each intermediate word must exist in the word list
For example,
Given:
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log"]
Return
[
["hit","hot","dot","dog","cog"],
["hit","hot","lot","log","cog"]
]
Note:
All words have the same length.
All words contain only lowercase alphabetic characters.
"""
from collections import deque
from collections import defaultdict
class Solution:
# @param start, a string
# @param end, a string
# @param dict, a set of string
# @return a list of lists of string
def findLadders(self, start, end, dict):
if len(dict) == 0 or start == end:
return
allChars = [chr(i) for i in range(ord('a'), ord('z') + 1)]
wordQueue = deque([]) # BFS queue
visited = set() # hold nodes been visited in current level
to_be_visited = set() # holds nodes that will be visited in the next level
parents = defaultdict(set) # keep track of the the ancestors of a given word
found = False # if true means we reached the end word no need to continue
wordQueue.append(start)
visited.add(start)
dict.add(start)
result = []
# perform bfs in level order paradigm
while wordQueue and not(found):
# remove nodes that have been visited before
# from the dictionary (to prevent loops)
for next_word in wordQueue:
dict.remove(next_word)
to_be_visited.clear()
# visit all nodes in current level
level_len = len(wordQueue)
for l in range(level_len):
word = wordQueue.popleft()
#search for replaces
for i in range(len(word)):
for char in allChars:
if char == word[i]:
continue
newWord = word[:i] + char + word[i + 1:]
if newWord == end: # search will be terminated (min path found)
found = True
parents[newWord].add(word)
else: # check visiting the new node
if newWord in dict: # add link
parents[newWord].add(word)
if not newWord in to_be_visited: # visit the node
wordQueue.append(newWord)
to_be_visited.add(newWord)
self.find_paths(end, start, [], result, parents)
return result
def find_paths(self, current, end, path, result, parents):
# add the current node in the path reversely
path.append(current)
# reached the end
if current == end:
found_path = list(path)
found_path.reverse()
result.append(found_path)
# move up the tree
for parent in parents[current]:
self.find_paths(parent, end, path, result, parents)
# backtrack
path.pop(-1)
s = Solution()
print s.findLadders("hit", "cog", set(["hot","dot","dog","lot","log"]))
|
Ahmed--Mohsen/leetcode
|
word_ladder_2.py
|
Python
|
mit
| 2,853
|
[
"VisIt"
] |
29a835255d6eeda4973b73d6ca895310cb7aa5edf7d915f5b00c13a79b0ab068
|
#!/usr/bin/env python3
#
# Copyright (C) 2015 INRA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__author__ = 'Maria Bernard - Sigenae AND Frederic Escudie - Plateforme bioinformatique Toulouse'
__copyright__ = 'Copyright (C) 2015 INRA'
__license__ = 'GNU General Public License'
__version__ = '1.5.0'
__email__ = 'frogs-support@inrae.fr'
__status__ = 'prod'
import os
import sys
import argparse
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
LIB_DIR = os.path.abspath(os.path.join(os.path.dirname(CURRENT_DIR), "lib"))
sys.path.append(LIB_DIR)
if os.getenv('PYTHONPATH') is None: os.environ['PYTHONPATH'] = LIB_DIR
else: os.environ['PYTHONPATH'] = os.environ['PYTHONPATH'] + os.pathsep + LIB_DIR
from frogsBiom import BiomIO
from frogsSequenceIO import *
from frogsUtils import *
##################################################################################################################################################
#
# FUNCTIONS
#
##################################################################################################################################################
def uniq( metadata_list, evaluated_tag, ambiguity_value ):
value = None
for metadata_elt in metadata_list:
if value is None:
value = metadata_elt[evaluated_tag]
elif value != metadata_elt[evaluated_tag]:
value = ambiguity_value
return value
def observation_line_parts( observation, count_by_sample, fields, list_separator ):
no_data_str = "no data"
print(observation)
line = list()
for current_field in fields:
if current_field == '@observation_name':
line.append( str(observation['id']) )
elif current_field == '@sample_count':
line.append( "\t".join(map(str, count_by_sample)) )
elif current_field == '@observation_sum':
line.append( str(sum(count_by_sample)) )
elif current_field == "@rdp_tax_and_bootstrap":
rdp_and_bootstrap = ""
if issubclass(observation['metadata']["rdp_taxonomy"].__class__, list) :
if len(observation['metadata']["rdp_taxonomy"]) > 0 :
rdp_taxonomy = observation['metadata']["rdp_taxonomy"]
rdp_bootstrap = observation['metadata']["rdp_bootstrap"]
for i, tax in enumerate(rdp_taxonomy):
rdp_and_bootstrap += tax + ";(" + str(rdp_bootstrap[i]) + ");" # tax1;(boots1);tax2;(boots2);
else:
if observation['metadata']["rdp_taxonomy"]:
rdp_taxonomy = observation['metadata']["rdp_taxonomy"].split(";")
rdp_bootstrap = observation['metadata']["rdp_bootstrap"].split(";")
for i, tax in enumerate(rdp_taxonomy):
rdp_and_bootstrap += tax + ";(" + str(rdp_bootstrap[i]) + ");" # tax1;(boots1);tax2;(boots2);
if rdp_and_bootstrap != "" :
line.append(str(rdp_and_bootstrap))
else:
line.append(no_data_str)
elif current_field == "@blast_perc_identity":
if issubclass(observation['metadata']["blast_affiliations"].__class__, list) and len(observation['metadata']["blast_affiliations"]) > 0:
line.append( str(uniq(observation['metadata']["blast_affiliations"], "perc_identity", "multi-identity")) )
else:
line.append( no_data_str )
elif current_field == "@blast_perc_query_coverage":
if issubclass(observation['metadata']["blast_affiliations"].__class__, list) and len(observation['metadata']["blast_affiliations"]) > 0:
line.append( str(uniq(observation['metadata']["blast_affiliations"], "perc_query_coverage", "multi-coverage")) )
else:
line.append( no_data_str )
elif current_field == "@blast_evalue":
if issubclass(observation['metadata']["blast_affiliations"].__class__, list) and len(observation['metadata']["blast_affiliations"]) > 0:
line.append( str(uniq(observation['metadata']["blast_affiliations"], "evalue", "multi-evalue")) )
else:
line.append( no_data_str )
elif current_field == "@blast_subject":
if issubclass(observation['metadata']["blast_affiliations"].__class__, list) and len(observation['metadata']["blast_affiliations"]) > 0:
line.append( str(uniq(observation['metadata']["blast_affiliations"], "subject", "multi-subject")) )
else:
line.append( no_data_str )
elif current_field == "@blast_aln_length":
if issubclass(observation['metadata']["blast_affiliations"].__class__, list) and len(observation['metadata']["blast_affiliations"]) > 0:
line.append( str(uniq(observation['metadata']["blast_affiliations"], "aln_length", "multi-alignment-lg")) )
else:
line.append( no_data_str )
else: #metadata
if issubclass(observation['metadata'][current_field].__class__, list):
if len(observation['metadata'][current_field]) > 0 :
line.append( list_separator.join(observation['metadata'][current_field]) )
else:
line.append(no_data_str)
elif issubclass(observation['metadata'][current_field].__class__, str):
line.append( str(observation['metadata'][current_field]) )
else : # like blast taxonomy = None
line.append(no_data_str)
return line
def header_line_parts( fields, biom ):
header_parts = list()
for current_field in fields:
if current_field == '@observation_name':
header_parts.append( "observation_name" )
elif current_field == '@sample_count':
header_parts.append( "\t".join(biom.get_samples_names()) )
elif current_field == '@observation_sum':
header_parts.append( "observation_sum" )
elif current_field == '@rdp_tax_and_bootstrap':
header_parts.append( "rdp_tax_and_bootstrap" )
elif current_field == "@blast_perc_identity":
header_parts.append( "blast_perc_identity" )
elif current_field == "@blast_perc_query_coverage":
header_parts.append( "blast_perc_query_coverage" )
elif current_field == "@blast_evalue":
header_parts.append( "blast_evalue" )
elif current_field == "@blast_subject":
header_parts.append( "blast_subject" )
elif current_field == "@blast_aln_length":
header_parts.append( "blast_aln_length" )
elif current_field == '@seed_sequence':
header_parts.append( "seed_sequence" )
else: #metadata
header_parts.append( str(current_field) )
return header_parts
def biom_to_tsv( input_biom, output_tsv, fields, list_separator ):
"""
@summary: Convert BIOM file to TSV file.
@param input_biom: [str] Path to the BIOM file.
@param output_tsv: [str] Path to the output file (format : TSV).
@param fields: [list] Columns and their order in output. Special columns : '@observation_name', '@observation_sum', '@sample_count' '@rdp_tax_and_bootstrap' . The others columns must be metadata title.
@param list_separator: [str] Separator for complex metadata.
"""
biom = BiomIO.from_json( input_biom )
out_fh = open( output_tsv, "wt" )
# Header
header_parts = header_line_parts( fields, biom )
out_fh.write( "#" + "\t".join(header_parts) + "\n" )
# Data
for obs_idx, count_by_sample in enumerate(biom.to_count()):
observation_parts = observation_line_parts( biom.rows[obs_idx], count_by_sample, fields, list_separator )
out_fh.write( "\t".join(observation_parts) + "\n" )
out_fh.close()
def biom_fasta_to_tsv( input_biom, input_fasta, output_tsv, fields, list_separator ):
"""
@summary: Convert BIOM file to TSV file with sequence.
@param input_biom: [str] Path to the BIOM file.
@param input_fasta: [str] Path to the sequences of the observations.
@param output_tsv: [str] Path to the output file (format : TSV).
@param fields: [list] Columns and their order in output. Special columns : '@observation_name', '@observation_sum', '@sample_count', '@rdp_tax_and_bootstrap', '@seed_sequence'. The others columns must be metadata title.
@param list_separator: [str] Separator for complex metadata.
"""
biom = BiomIO.from_json( input_biom )
observation_list = [ name for name in biom.get_observations_names()]
out_fh = open( output_tsv, "wt" )
sequence_idx = fields.index("@seed_sequence")
# Header
header_parts = header_line_parts( fields, biom )
out_fh.write( "#" + "\t".join(header_parts) + "\n" )
# Data
fields_without_seq = fields
del fields_without_seq[sequence_idx]
FH_in = FastaIO( input_fasta )
for record in FH_in:
try :
obs_idx = biom.find_idx("observation", record.id)
count_by_sample = biom.data.get_row_array(obs_idx)
observation_parts = observation_line_parts( biom.rows[obs_idx], count_by_sample, fields_without_seq, list_separator )
observation_parts.insert( sequence_idx, record.string )
out_fh.write( "\t".join(observation_parts) + "\n" )
observation_list.remove(record.id)
except:
pass
out_fh.close()
if len(observation_list) > 0:
raise_exception(Exception("\n\n##ERROR : your input fasta file (" + input_fasta + ") does not contain sequence for :" + ", ".join(observation_list) + "\n"))
##################################################################################################################################################
#
# MAIN
#
##################################################################################################################################################
if __name__ == "__main__":
# Manage parameters
parser = argparse.ArgumentParser( description='Convert BIOM file to TSV file.' )
parser.add_argument( '-v', '--version', action='version', version=__version__ )
parser.add_argument( '-f', '--fields', default=['@observation_name', '@observation_sum', '@sample_count'], nargs='+', help="Columns and their order in output. Special columns : '@observation_name', '@observation_sum', '@sample_count' '@rdp_tax_and_bootstrap', '@seed_sequence' . The others columns must be metadata title. [Default: %(default)s]")
parser.add_argument( '-s', '--list-separator', default=';', help='Separator for complex metadata. [Default: %(default)s]')
group_input = parser.add_argument_group( 'Inputs' ) # Inputs
group_input.add_argument( '-i', '--input-file', required=True, help='Path to the abundance file (format: BIOM).' )
group_input.add_argument( '-a', '--input-fasta', default=None, required=False, help='Path to the sequences file (format: fasta).' )
group_output = parser.add_argument_group( 'Outputs' ) # Outputs
group_output.add_argument( '-o', '--output-file', required=True, help='Path to the output file (format: TSV).')
args = parser.parse_args()
# Process
if args.input_fasta is not None:
biom_fasta_to_tsv( args.input_file, args.input_fasta, args.output_file, args.fields, args.list_separator )
else:
biom_to_tsv( args.input_file, args.output_file, args.fields, args.list_separator )
|
geraldinepascal/FROGS
|
libexec/biom2tsv.py
|
Python
|
gpl-3.0
| 12,058
|
[
"BLAST"
] |
3c706e199b39eeaa3bca60cbd2d85f2c72dfaf2e89d8eb8f244e1535aee0a648
|
# -*- coding: utf-8 -*-
import os
from PIL import Image
import os.path
import time
import torch
import torchvision.datasets as dset
import torchvision.transforms as trn
import torch.utils.data as data
import numpy as np
from PIL import Image
# /////////////// Distortion Helpers ///////////////
import skimage as sk
from skimage.filters import gaussian
from io import BytesIO
from wand.image import Image as WandImage
from wand.api import library as wandlibrary
import wand.color as WandColor
import ctypes
from PIL import Image as PILImage
import cv2
from scipy.ndimage import zoom as scizoom
from scipy.ndimage.interpolation import map_coordinates
import warnings
warnings.simplefilter("ignore", UserWarning)
def disk(radius, alias_blur=0.1, dtype=np.float32):
if radius <= 8:
L = np.arange(-8, 8 + 1)
ksize = (3, 3)
else:
L = np.arange(-radius, radius + 1)
ksize = (5, 5)
X, Y = np.meshgrid(L, L)
aliased_disk = np.array((X ** 2 + Y ** 2) <= radius ** 2, dtype=dtype)
aliased_disk /= np.sum(aliased_disk)
# supersample disk to antialias
return cv2.GaussianBlur(aliased_disk, ksize=ksize, sigmaX=alias_blur)
# Tell Python about the C method
wandlibrary.MagickMotionBlurImage.argtypes = (ctypes.c_void_p, # wand
ctypes.c_double, # radius
ctypes.c_double, # sigma
ctypes.c_double) # angle
# Extend wand.image.Image class to include method signature
class MotionImage(WandImage):
def motion_blur(self, radius=0.0, sigma=0.0, angle=0.0):
wandlibrary.MagickMotionBlurImage(self.wand, radius, sigma, angle)
# modification of https://github.com/FLHerne/mapgen/blob/master/diamondsquare.py
def plasma_fractal(mapsize=32, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
def clipped_zoom(img, zoom_factor):
h = img.shape[0]
# ceil crop height(= crop width)
ch = int(np.ceil(h / zoom_factor))
top = (h - ch) // 2
img = scizoom(img[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
# trim off any extra pixels
trim_top = (img.shape[0] - h) // 2
return img[trim_top:trim_top + h, trim_top:trim_top + h]
# /////////////// End Distortion Helpers ///////////////
# /////////////// Distortions ///////////////
def gaussian_noise(x, severity=1):
c = [0.04, 0.06, .08, .09, .10][severity - 1]
x = np.array(x) / 255.
return np.clip(x + np.random.normal(size=x.shape, scale=c), 0, 1) * 255
def shot_noise(x, severity=1):
c = [500, 250, 100, 75, 50][severity - 1]
x = np.array(x) / 255.
return np.clip(np.random.poisson(x * c) / c, 0, 1) * 255
def impulse_noise(x, severity=1):
c = [.01, .02, .03, .05, .07][severity - 1]
x = sk.util.random_noise(np.array(x) / 255., mode='s&p', amount=c)
return np.clip(x, 0, 1) * 255
def speckle_noise(x, severity=1):
c = [.06, .1, .12, .16, .2][severity - 1]
x = np.array(x) / 255.
return np.clip(x + x * np.random.normal(size=x.shape, scale=c), 0, 1) * 255
def gaussian_blur(x, severity=1):
c = [.4, .6, 0.7, .8, 1][severity - 1]
x = gaussian(np.array(x) / 255., sigma=c, multichannel=True)
return np.clip(x, 0, 1) * 255
def glass_blur(x, severity=1):
# sigma, max_delta, iterations
c = [(0.05,1,1), (0.25,1,1), (0.4,1,1), (0.25,1,2), (0.4,1,2)][severity - 1]
x = np.uint8(gaussian(np.array(x) / 255., sigma=c[0], multichannel=True) * 255)
# locally shuffle pixels
for i in range(c[2]):
for h in range(32 - c[1], c[1], -1):
for w in range(32 - c[1], c[1], -1):
dx, dy = np.random.randint(-c[1], c[1], size=(2,))
h_prime, w_prime = h + dy, w + dx
# swap
x[h, w], x[h_prime, w_prime] = x[h_prime, w_prime], x[h, w]
return np.clip(gaussian(x / 255., sigma=c[0], multichannel=True), 0, 1) * 255
def defocus_blur(x, severity=1):
c = [(0.3, 0.4), (0.4, 0.5), (0.5, 0.6), (1, 0.2), (1.5, 0.1)][severity - 1]
x = np.array(x) / 255.
kernel = disk(radius=c[0], alias_blur=c[1])
channels = []
for d in range(3):
channels.append(cv2.filter2D(x[:, :, d], -1, kernel))
channels = np.array(channels).transpose((1, 2, 0)) # 3x32x32 -> 32x32x3
return np.clip(channels, 0, 1) * 255
def motion_blur(x, severity=1):
c = [(6,1), (6,1.5), (6,2), (8,2), (9,2.5)][severity - 1]
output = BytesIO()
x.save(output, format='PNG')
x = MotionImage(blob=output.getvalue())
x.motion_blur(radius=c[0], sigma=c[1], angle=np.random.uniform(-45, 45))
x = cv2.imdecode(np.fromstring(x.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED)
if x.shape != (32, 32):
return np.clip(x[..., [2, 1, 0]], 0, 255) # BGR to RGB
else: # greyscale to RGB
return np.clip(np.array([x, x, x]).transpose((1, 2, 0)), 0, 255)
def zoom_blur(x, severity=1):
c = [np.arange(1, 1.06, 0.01), np.arange(1, 1.11, 0.01), np.arange(1, 1.16, 0.01),
np.arange(1, 1.21, 0.01), np.arange(1, 1.26, 0.01)][severity - 1]
x = (np.array(x) / 255.).astype(np.float32)
out = np.zeros_like(x)
for zoom_factor in c:
out += clipped_zoom(x, zoom_factor)
x = (x + out) / (len(c) + 1)
return np.clip(x, 0, 1) * 255
def fog(x, severity=1):
c = [(.2,3), (.5,3), (0.75,2.5), (1,2), (1.5,1.75)][severity - 1]
x = np.array(x) / 255.
max_val = x.max()
x += c[0] * plasma_fractal(wibbledecay=c[1])[:32, :32][..., np.newaxis]
return np.clip(x * max_val / (max_val + c[0]), 0, 1) * 255
def frost(x, severity=1):
c = [(1, 0.2), (1, 0.3), (0.9, 0.4), (0.85, 0.4), (0.75, 0.45)][severity - 1]
idx = np.random.randint(5)
filename = ['./frost1.png', './frost2.png', './frost3.png', './frost4.jpg', './frost5.jpg', './frost6.jpg'][idx]
frost = cv2.imread(filename)
frost = cv2.resize(frost, (0, 0), fx=0.2, fy=0.2)
# randomly crop and convert to rgb
x_start, y_start = np.random.randint(0, frost.shape[0] - 32), np.random.randint(0, frost.shape[1] - 32)
frost = frost[x_start:x_start + 32, y_start:y_start + 32][..., [2, 1, 0]]
return np.clip(c[0] * np.array(x) + c[1] * frost, 0, 255)
def snow(x, severity=1):
c = [(0.1,0.2,1,0.6,8,3,0.95),
(0.1,0.2,1,0.5,10,4,0.9),
(0.15,0.3,1.75,0.55,10,4,0.9),
(0.25,0.3,2.25,0.6,12,6,0.85),
(0.3,0.3,1.25,0.65,14,12,0.8)][severity - 1]
x = np.array(x, dtype=np.float32) / 255.
snow_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1]) # [:2] for monochrome
snow_layer = clipped_zoom(snow_layer[..., np.newaxis], c[2])
snow_layer[snow_layer < c[3]] = 0
snow_layer = PILImage.fromarray((np.clip(snow_layer.squeeze(), 0, 1) * 255).astype(np.uint8), mode='L')
output = BytesIO()
snow_layer.save(output, format='PNG')
snow_layer = MotionImage(blob=output.getvalue())
snow_layer.motion_blur(radius=c[4], sigma=c[5], angle=np.random.uniform(-135, -45))
snow_layer = cv2.imdecode(np.fromstring(snow_layer.make_blob(), np.uint8),
cv2.IMREAD_UNCHANGED) / 255.
snow_layer = snow_layer[..., np.newaxis]
x = c[6] * x + (1 - c[6]) * np.maximum(x, cv2.cvtColor(x, cv2.COLOR_RGB2GRAY).reshape(32, 32, 1) * 1.5 + 0.5)
return np.clip(x + snow_layer + np.rot90(snow_layer, k=2), 0, 1) * 255
def spatter(x, severity=1):
c = [(0.62,0.1,0.7,0.7,0.5,0),
(0.65,0.1,0.8,0.7,0.5,0),
(0.65,0.3,1,0.69,0.5,0),
(0.65,0.1,0.7,0.69,0.6,1),
(0.65,0.1,0.5,0.68,0.6,1)][severity - 1]
x = np.array(x, dtype=np.float32) / 255.
liquid_layer = np.random.normal(size=x.shape[:2], loc=c[0], scale=c[1])
liquid_layer = gaussian(liquid_layer, sigma=c[2])
liquid_layer[liquid_layer < c[3]] = 0
if c[5] == 0:
liquid_layer = (liquid_layer * 255).astype(np.uint8)
dist = 255 - cv2.Canny(liquid_layer, 50, 150)
dist = cv2.distanceTransform(dist, cv2.DIST_L2, 5)
_, dist = cv2.threshold(dist, 20, 20, cv2.THRESH_TRUNC)
dist = cv2.blur(dist, (3, 3)).astype(np.uint8)
dist = cv2.equalizeHist(dist)
# ker = np.array([[-1,-2,-3],[-2,0,0],[-3,0,1]], dtype=np.float32)
# ker -= np.mean(ker)
ker = np.array([[-2, -1, 0], [-1, 1, 1], [0, 1, 2]])
dist = cv2.filter2D(dist, cv2.CV_8U, ker)
dist = cv2.blur(dist, (3, 3)).astype(np.float32)
m = cv2.cvtColor(liquid_layer * dist, cv2.COLOR_GRAY2BGRA)
m /= np.max(m, axis=(0, 1))
m *= c[4]
# water is pale turqouise
color = np.concatenate((175 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1]),
238 / 255. * np.ones_like(m[..., :1])), axis=2)
color = cv2.cvtColor(color, cv2.COLOR_BGR2BGRA)
x = cv2.cvtColor(x, cv2.COLOR_BGR2BGRA)
return cv2.cvtColor(np.clip(x + m * color, 0, 1), cv2.COLOR_BGRA2BGR) * 255
else:
m = np.where(liquid_layer > c[3], 1, 0)
m = gaussian(m.astype(np.float32), sigma=c[4])
m[m < 0.8] = 0
# m = np.abs(m) ** (1/c[4])
# mud brown
color = np.concatenate((63 / 255. * np.ones_like(x[..., :1]),
42 / 255. * np.ones_like(x[..., :1]),
20 / 255. * np.ones_like(x[..., :1])), axis=2)
color *= m[..., np.newaxis]
x *= (1 - m[..., np.newaxis])
return np.clip(x + color, 0, 1) * 255
def contrast(x, severity=1):
c = [.75, .5, .4, .3, 0.15][severity - 1]
x = np.array(x) / 255.
means = np.mean(x, axis=(0, 1), keepdims=True)
return np.clip((x - means) * c + means, 0, 1) * 255
def brightness(x, severity=1):
c = [.05, .1, .15, .2, .3][severity - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 2] = np.clip(x[:, :, 2] + c, 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def saturate(x, severity=1):
c = [(0.3, 0), (0.1, 0), (1.5, 0), (2, 0.1), (2.5, 0.2)][severity - 1]
x = np.array(x) / 255.
x = sk.color.rgb2hsv(x)
x[:, :, 1] = np.clip(x[:, :, 1] * c[0] + c[1], 0, 1)
x = sk.color.hsv2rgb(x)
return np.clip(x, 0, 1) * 255
def jpeg_compression(x, severity=1):
c = [80, 65, 58, 50, 40][severity - 1]
output = BytesIO()
x.save(output, 'JPEG', quality=c)
x = PILImage.open(output)
return x
def pixelate(x, severity=1):
c = [0.95, 0.9, 0.85, 0.75, 0.65][severity - 1]
x = x.resize((int(32 * c), int(32 * c)), PILImage.BOX)
x = x.resize((32, 32), PILImage.BOX)
return x
# mod of https://gist.github.com/erniejunior/601cdf56d2b424757de5
def elastic_transform(image, severity=1):
IMSIZE = 32
c = [(IMSIZE*0, IMSIZE*0, IMSIZE*0.08),
(IMSIZE*0.05, IMSIZE*0.2, IMSIZE*0.07),
(IMSIZE*0.08, IMSIZE*0.06, IMSIZE*0.06),
(IMSIZE*0.1, IMSIZE*0.04, IMSIZE*0.05),
(IMSIZE*0.1, IMSIZE*0.03, IMSIZE*0.03)][severity - 1]
image = np.array(image, dtype=np.float32) / 255.
shape = image.shape
shape_size = shape[:2]
# random affine
center_square = np.float32(shape_size) // 2
square_size = min(shape_size) // 3
pts1 = np.float32([center_square + square_size,
[center_square[0] + square_size, center_square[1] - square_size],
center_square - square_size])
pts2 = pts1 + np.random.uniform(-c[2], c[2], size=pts1.shape).astype(np.float32)
M = cv2.getAffineTransform(pts1, pts2)
image = cv2.warpAffine(image, M, shape_size[::-1], borderMode=cv2.BORDER_REFLECT_101)
dx = (gaussian(np.random.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dy = (gaussian(np.random.uniform(-1, 1, size=shape[:2]),
c[1], mode='reflect', truncate=3) * c[0]).astype(np.float32)
dx, dy = dx[..., np.newaxis], dy[..., np.newaxis]
x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))
indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))
return np.clip(map_coordinates(image, indices, order=1, mode='reflect').reshape(shape), 0, 1) * 255
# /////////////// End Distortions ///////////////
import collections
print('Using CIFAR-10 data')
d = collections.OrderedDict()
d['Gaussian Noise'] = gaussian_noise
d['Shot Noise'] = shot_noise
d['Impulse Noise'] = impulse_noise
d['Defocus Blur'] = defocus_blur
d['Glass Blur'] = glass_blur
d['Motion Blur'] = motion_blur
d['Zoom Blur'] = zoom_blur
d['Snow'] = snow
d['Frost'] = frost
d['Fog'] = fog
d['Brightness'] = brightness
d['Contrast'] = contrast
d['Elastic'] = elastic_transform
d['Pixelate'] = pixelate
d['JPEG'] = jpeg_compression
d['Speckle Noise'] = speckle_noise
d['Gaussian Blur'] = gaussian_blur
d['Spatter'] = spatter
d['Saturate'] = saturate
test_data = dset.CIFAR10('/share/data/vision-greg/cifarpy', train=False)
convert_img = trn.Compose([trn.ToTensor(), trn.ToPILImage()])
for method_name in d.keys():
print('Creating images for the corruption', method_name)
cifar_c, labels = [], []
for severity in range(1,6):
corruption = lambda clean_img: d[method_name](clean_img, severity)
for img, label in zip(test_data.data, test_data.targets):
labels.append(label)
cifar_c.append(np.uint8(corruption(convert_img(img))))
np.save('/share/data/vision-greg2/users/dan/datasets/CIFAR-10-C/' + d[method_name].__name__ + '.npy',
np.array(cifar_c).astype(np.uint8))
np.save('/share/data/vision-greg2/users/dan/datasets/CIFAR-10-C/labels.npy',
np.array(labels).astype(np.uint8))
|
hendrycks/robustness
|
ImageNet-C/create_c/make_cifar_c.py
|
Python
|
apache-2.0
| 15,767
|
[
"Gaussian"
] |
f865ae91dffb2b4834292ce05c876bf3d77f36417f70075a2b5259348b0c83a5
|
import os, re
from subprocess import *
from time import strftime, gmtime, ctime, localtime, asctime
TERM_COLS = 110
LIBMESH_OPTIONS = {
'mesh_mode' : { 're_option' : r'#define\s+LIBMESH_ENABLE_PARMESH\s+(\d+)',
'default' : 'SERIAL',
'options' :
{
'PARALLEL' : '1',
'SERIAL' : '0'
}
},
'unique_ids' : { 're_option' : r'#define\s+LIBMESH_ENABLE_UNIQUE_ID\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'dtk' : { 're_option' : r'#define\s+LIBMESH_HAVE_DTK\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
},
'vtk' : { 're_option' : r'#define\s+LIBMESH_HAVE_VTK\s+(\d+)',
'default' : 'FALSE',
'options' :
{
'TRUE' : '1',
'FALSE' : '0'
}
}
}
## Run a command and return the output, or ERROR: + output if retcode != 0
def runCommand(cmd):
p = Popen([cmd],stdout=PIPE,stderr=STDOUT, close_fds=True, shell=True)
output = p.communicate()[0]
if (p.returncode != 0):
output = 'ERROR: ' + output
return output
## print an optionally colorified test result
#
# The test will not be colored if
# 1) options.colored is False,
# 2) the environment variable BITTEN_NOCOLOR is true, or
# 3) the color parameter is False.
def printResult(test_name, result, timing, start, end, options, color=True):
f_result = ''
cnt = (TERM_COLS-2) - len(test_name + result)
if color:
any_match = False
# Color leading paths
m = re.search(r'(.*):(.*)', test_name)
if m:
test_name = colorText(m.group(1), options, 'YELLOW') + ':' + m.group(2)
# Color the Caveats CYAN
m = re.search(r'(\[.*?\])', result)
if m:
any_match = True
f_result += colorText(m.group(1), options, 'CYAN') + " "
# Color Exodiff or CVSdiff tests YELLOW
m = re.search('(FAILED \((?:EXODIFF|CSVDIFF)\))', result)
if m:
any_match = True
f_result += colorText(m.group(1), options, 'YELLOW')
else:
# Color remaining FAILED tests RED
m = re.search('(FAILED \(.*\))', result)
if m:
any_match = True
f_result += colorText(m.group(1), options, 'RED')
# Color deleted tests RED
m = re.search('(deleted) (\(.*\))', result)
if m:
any_match = True
f_result += colorText(m.group(1), options, 'RED') + ' ' + m.group(2)
# Color long running tests YELLOW
m = re.search('(RUNNING\.\.\.)', result)
if m:
any_match = True
f_result += colorText(m.group(1), options, 'YELLOW')
# Color PBS status CYAN
m = re.search('((?:LAUNCHED|RUNNING(?!\.)|EXITING|QUEUED))', result)
if m:
any_match = True
f_result += colorText(m.group(1), options, 'CYAN')
# Color Passed tests GREEN
m = re.search('(OK)', result)
if m:
any_match = True
f_result += colorText(m.group(1), options, 'GREEN')
if not any_match:
f_result = result
f_result = test_name + '.'*cnt + ' ' + f_result
else:
f_result = test_name + '.'*cnt + ' ' + result
# Tack on the timing if it exists
if timing:
f_result += ' [' + '%0.3f' % float(timing) + 's]'
if options.debug_harness:
f_result += ' Start: ' + '%0.3f' % start + ' End: ' + '%0.3f' % end
return f_result
## Color the error messages if the options permit, also do not color in bitten scripts because
# it messes up the trac output.
# supports weirded html for more advanced coloring schemes. \verbatim<r>,<g>,<y>,<b>\endverbatim All colors are bolded.
def colorText(str, options, color, html=False):
# ANSI color codes for colored terminal output
color_codes = {'RESET':'\033[0m','BOLD':'\033[1m','RED':'\033[31m','GREEN':'\033[35m','CYAN':'\033[34m','YELLOW':'\033[33m','MAGENTA':'\033[32m'}
if options.code:
color_codes['GREEN'] = '\033[32m'
color_codes['CYAN'] = '\033[36m'
color_codes['MAGENTA'] = '\033[35m'
if options.colored and not (os.environ.has_key('BITTEN_NOCOLOR') and os.environ['BITTEN_NOCOLOR'] == 'true'):
if html:
str = str.replace('<r>', color_codes['BOLD']+color_codes['RED'])
str = str.replace('<c>', color_codes['BOLD']+color_codes['CYAN'])
str = str.replace('<g>', color_codes['BOLD']+color_codes['GREEN'])
str = str.replace('<y>', color_codes['BOLD']+color_codes['YELLOW'])
str = str.replace('<b>', color_codes['BOLD'])
str = re.sub(r'</[rcgyb]>', color_codes['RESET'], str)
else:
str = color_codes[color] + str + color_codes['RESET']
elif html:
str = re.sub(r'</?[rcgyb]>', '', str) # strip all "html" tags
return str
def getPlatforms():
# We'll use uname to figure this out
# Supported platforms are LINUX, DARWIN, SL, LION or ALL
platforms = set()
platforms.add('ALL')
raw_uname = os.uname()
if raw_uname[0].upper() == 'DARWIN':
platforms.add('DARWIN')
if re.match("10\.", raw_uname[2]):
platforms.add('SL')
if re.match("11\.", raw_uname[2]):
platforms.add("LION")
else:
platforms.add(raw_uname[0].upper())
return platforms
def getCompilers(libmesh_dir):
# We'll use the GXX-VERSION string from LIBMESH's Make.common
# to figure this out
# Supported compilers are GCC, INTEL or ALL
compilers = set()
compilers.add('ALL')
# Get the gxx compiler. Note that the libmesh-config script
# can live in different places depending on whether libmesh is
# "installed" or not.
# Installed location of libmesh-config script
libmesh_config_installed = libmesh_dir + '/bin/libmesh-config'
# Uninstalled location of libmesh-config script
libmesh_config_uninstalled = libmesh_dir + '/contrib/bin/libmesh-config'
# The eventual variable we will use to refer to libmesh's configure script
libmesh_config = ''
if os.path.exists(libmesh_config_installed):
libmesh_config = libmesh_config_installed
elif os.path.exists(libmesh_config_uninstalled):
libmesh_config = libmesh_config_uninstalled
else:
print "Error! Could not find libmesh's config script in any of the usual locations!"
exit(1)
# Pass the --cxx option to the libmesh-config script, and check the result
command = libmesh_config + ' --cxx'
p = Popen(command, shell=True, stdout=PIPE)
mpicxx_cmd = p.communicate()[0].strip()
# Account for useage of distcc
if "distcc" in mpicxx_cmd:
split_cmd = mpicxx_cmd.split()
mpicxx_cmd = split_cmd[-1]
# If mpi ic on the command, run -show to get the compiler
if "mpi" in mpicxx_cmd:
p = Popen(mpicxx_cmd + " -show", shell=True, stdout=PIPE)
raw_compiler = p.communicate()[0]
else:
raw_compiler = mpicxx_cmd
if re.match('icpc', raw_compiler) != None:
compilers.add("INTEL")
elif re.match('[cg]\+\+', raw_compiler) != None:
compilers.add("GCC")
elif re.match('clang\+\+', raw_compiler) != None:
compilers.add("CLANG")
return compilers
def getPetscVersion(libmesh_dir):
# We'll use PETSc's own header file to determine the PETSc version
# (major.minor). If necessary in the future we'll also detect subminor...
#
# Note: we used to find this info in Make.common, but in the
# automake version of libmesh, this information is no longer stored
# in Make.common, but rather in
#
# $LIBMESH_DIR/lib/${AC_ARCH}_${METHOD}/pkgconfig/Make.common.${METHOD}
#
# where ${AC_ARCH} is an architecture-dependent string determined by
# libmesh's config.guess. So we could try to look there, but it's
# easier and more portable to look in ${PETSC_DIR}.
# Default to something that doesn't make sense
petsc_version_major = 'x'
petsc_version_minor = 'x'
# Get user's PETSC_DIR from environment.
petsc_dir = os.environ.get('PETSC_DIR')
# environ.get returns 'none' if no such environment variable exists.
if petsc_dir == 'none':
print "PETSC_DIR not found in environment! Cannot detect PETSc version!"
exit(1)
# FIXME: handle I/O exceptions when opening this file
f = open(petsc_dir + '/include/petscversion.h')
# The version lines are (hopefully!) always going to be of the form
# #define PETSC_VERSION_MAJOR X
# where X is some number, so in python, we can split the string and
# pop the last substring (the version) off the end.
for line in f.readlines():
if line.find('#define PETSC_VERSION_MAJOR') != -1:
petsc_version_major = line.split().pop()
elif line.find('#define PETSC_VERSION_MINOR') != -1:
petsc_version_minor = line.split().pop()
# See if we're done.
if (petsc_version_major != 'x' and petsc_version_minor != 'x'):
break
# Done with the file, so we can close it now
f.close()
# If either version was not found, then we can't continue :(
if petsc_version_major == 'x':
print("Error: could not determine valid PETSc major version.")
exit(1)
if petsc_version_minor == 'x':
print("Error: could not determine valid PETSc minor version.")
exit(1)
petsc_version = petsc_version_major + '.' + petsc_version_minor
# print "Running tests assuming PETSc version", petsc_version
return petsc_version
# Break down petsc version logic in a new define
# TODO: find a way to eval() logic instead
def checkPetscVersion(checks, test):
# If any version of petsc works, return true immediately
if 'ALL' in set(test['petsc_version']):
return (True, None, None)
# Iterate through petsc versions in test[PETSC_VERSION] and match it against check[PETSC_VERSION]
for petsc_version in test['petsc_version']:
logic, version = re.search(r'(.*?)(\d\S+)', petsc_version).groups()
# Exact match
if logic == '' or logic == '=':
if version == checks['petsc_version']:
return (True, None, version)
else:
return (False, '!=', version)
# Logical match
if logic == '>' and checks['petsc_version'][0:3] > version[0:3]:
return (True, None, version)
elif logic == '>=' and checks['petsc_version'][0:3] >= version[0:3]:
return (True, None, version)
elif logic == '<' and checks['petsc_version'][0:3] < version[0:3]:
return (True, None, version)
elif logic == '<=' and checks['petsc_version'][0:3] <= version[0:3]:
return (True, None, version)
return (False, logic, version)
def getLibMeshConfigOption(libmesh_dir, option):
# Some tests work differently with parallel mesh enabled
# We need to detect this condition
option_set = set()
option_set.add('ALL')
filenames = [
libmesh_dir + '/include/base/libmesh_config.h', # Old location
libmesh_dir + '/include/libmesh/libmesh_config.h' # New location
];
success = 0
for filename in filenames:
if success == 1:
break
try:
f = open(filename)
contents = f.read()
f.close()
info = LIBMESH_OPTIONS[option]
m = re.search(info['re_option'], contents)
if m != None:
for value, option in info['options'].iteritems():
if m.group(1) == option:
option_set.add(value)
else:
option_set.add(info['default'])
success = 1
except IOError, e:
# print "Warning: I/O Error trying to read", filename, ":", e.strerror, "... Will try other locations."
pass
if success == 0:
print "Error! Could not find libmesh_config.h in any of the usual locations!"
exit(1)
return option_set
def getSharedOption(libmesh_dir):
# Some tests may only run properly with shared libraries on/off
# We need to detect this condition
shared_option = set()
shared_option.add('ALL')
# MOOSE no longer relies on Make.common being present. This gives us the
# potential to work with "uninstalled" libmesh trees, for example.
# Installed location of libmesh libtool script
libmesh_libtool_installed = libmesh_dir + '/contrib/bin/libtool'
# Uninstalled location of libmesh libtool script
libmesh_libtool_uninstalled = libmesh_dir + '/libtool'
# The eventual variable we will use to refer to libmesh's libtool script
libmesh_libtool = ''
if os.path.exists(libmesh_libtool_installed):
libmesh_libtool = libmesh_libtool_installed
elif os.path.exists(libmesh_libtool_uninstalled):
libmesh_libtool = libmesh_libtool_uninstalled
else:
print "Error! Could not find libmesh's libtool script in any of the usual locations!"
exit(1)
# Now run the libtool script (in the shell) to see if shared libraries were built
command = libmesh_libtool + " --config | grep build_libtool_libs | cut -d'=' -f2"
# Note: the strip() command removes the trailing newline
p = Popen(command, shell=True, stdout=PIPE)
result = p.communicate()[0].strip()
if re.search('yes', result) != None:
shared_option.add('DYNAMIC')
elif re.search('no', result) != None:
shared_option.add('STATIC')
else:
# Neither no nor yes? Not possible!
print "Error! Could not determine whether shared libraries were built."
exit(1)
return shared_option
|
amburan/moose
|
framework/scripts/TestHarness/util.py
|
Python
|
lgpl-2.1
| 13,238
|
[
"MOOSE",
"VTK"
] |
4a8daf036dfff14a6e933431e0516a064e88528edde1c020c721a773a7b75d50
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
# Credit to Dr. Shyue Ping Ong for the template of the calculator
"""
This module implements a TEM pattern calculator.
"""
import json
import os
from collections import namedtuple
from fractions import Fraction
from functools import lru_cache
from typing import Dict, List, Tuple, cast, Union
import numpy as np
import pandas as pd
import plotly.graph_objs as go
import scipy.constants as sc
from pymatgen.analysis.diffraction.core import AbstractDiffractionPatternCalculator
from pymatgen.core.structure import Structure
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.util.string import latexify_spacegroup, unicodeify_spacegroup
with open(os.path.join(os.path.dirname(__file__), "atomic_scattering_params.json")) as f:
ATOMIC_SCATTERING_PARAMS = json.load(f)
__author__ = "Frank Wan, Jason Liang"
__copyright__ = "Copyright 2020, The Materials Project"
__version__ = "0.22"
__maintainer__ = "Jason Liang"
__email__ = "fwan@berkeley.edu, yhljason@berkeley.edu"
__date__ = "03/31/2020"
class TEMCalculator(AbstractDiffractionPatternCalculator):
"""
Computes the TEM pattern of a crystal structure for multiple Laue zones.
Code partially inspired from XRD calculation implementation. X-ray factor to electron factor
conversion based on the International Table of Crystallography.
#TODO: Could add "number of iterations", "magnification", "critical value of beam",
"twin direction" for certain materials, "sample thickness", and "excitation error s"
"""
def __init__(
self,
symprec: float = None,
voltage: float = 200,
beam_direction: Tuple[int, int, int] = (0, 0, 1),
camera_length: int = 160,
debye_waller_factors: Dict[str, float] = None,
cs: float = 1,
) -> None:
"""
Args:
symprec (float): Symmetry precision for structure refinement. If
set to 0, no refinement is done. Otherwise, refinement is
performed using spglib with provided precision.
voltage (float): The wavelength is a function of the TEM microscope's
voltage. By default, set to 200 kV. Units in kV.
beam_direction (tuple): The direction of the electron beam fired onto the sample.
By default, set to [0,0,1], which corresponds to the normal direction
of the sample plane.
camera_length (int): The distance from the sample to the projected diffraction pattern.
By default, set to 160 cm. Units in cm.
debye_waller_factors ({element symbol: float}): Allows the
specification of Debye-Waller factors. Note that these
factors are temperature dependent.
cs (float): the chromatic aberration coefficient. set by default to 1 mm.
"""
self.symprec = symprec
self.voltage = voltage
self.beam_direction = beam_direction
self.camera_length = camera_length
self.debye_waller_factors = debye_waller_factors or {}
self.cs = cs
@lru_cache(1)
def wavelength_rel(self) -> float:
"""
Calculates the wavelength of the electron beam with relativistic kinematic effects taken
into account.
Args:
none
Returns:
Relativistic Wavelength (in angstroms)
"""
wavelength_rel = (
sc.h
/ np.sqrt(
2 * sc.m_e * sc.e * 1000 * self.voltage * (1 + (sc.e * 1000 * self.voltage) / (2 * sc.m_e * sc.c ** 2))
)
* (10 ** 10)
)
return wavelength_rel
@classmethod
def generate_points(cls, coord_left: int = -10, coord_right: int = 10) -> np.ndarray:
"""
Generates a bunch of 3D points that span a cube.
Args:
coord_left (int): The minimum coordinate value.
coord_right (int): The maximum coordinate value.
Returns:
Numpy 2d array
"""
points = [0, 0, 0]
coord_values = np.arange(coord_left, coord_right + 1)
points[0], points[1], points[2] = np.meshgrid(coord_values, coord_values, coord_values)
points_matrix = (np.ravel(points[i]) for i in range(0, 3))
result = np.vstack(list(points_matrix)).transpose()
return result
def zone_axis_filter(
self, points: Union[List[Tuple[int, int, int]], np.ndarray], laue_zone: int = 0
) -> Union[List[Tuple[int, int, int]]]:
"""
Filters out all points that exist within the specified Laue zone according to the zone axis rule.
Args:
points (np.ndarray): The list of points to be filtered.
laue_zone (int): The desired Laue zone.
Returns:
list of 3-tuples
"""
if any(isinstance(n, tuple) for n in points):
return list(points)
if len(points) == 0:
return []
filtered = np.where(np.dot(np.array(self.beam_direction), np.transpose(points)) == laue_zone)
result = points[filtered]
result_tuples = cast(List[Tuple[int, int, int]], [tuple(x) for x in result.tolist()])
return result_tuples
def get_interplanar_spacings(
self, structure: Structure, points: Union[List[Tuple[int, int, int]], np.ndarray]
) -> Dict[Tuple[int, int, int], float]:
"""
Args:
structure (Structure): the input structure.
points (tuple): the desired hkl indices.
Returns:
Dict of hkl to its interplanar spacing, in angstroms (float).
"""
points_filtered = self.zone_axis_filter(points)
if (0, 0, 0) in points_filtered:
points_filtered.remove((0, 0, 0))
interplanar_spacings_val = np.array(list(map(lambda x: structure.lattice.d_hkl(x), points_filtered)))
interplanar_spacings = dict(zip(points_filtered, interplanar_spacings_val))
return interplanar_spacings
def bragg_angles(
self, interplanar_spacings: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], float]:
"""
Gets the Bragg angles for every hkl point passed in (where n = 1).
Args:
interplanar_spacings (dict): dictionary of hkl to interplanar spacing
Returns:
dict of hkl plane (3-tuple) to Bragg angle in radians (float)
"""
plane = list(interplanar_spacings.keys())
interplanar_spacings_val = np.array(list(interplanar_spacings.values()))
bragg_angles_val = np.arcsin(self.wavelength_rel() / (2 * interplanar_spacings_val))
bragg_angles = dict(zip(plane, bragg_angles_val))
return bragg_angles
def get_s2(self, bragg_angles: Dict[Tuple[int, int, int], float]) -> Dict[Tuple[int, int, int], float]:
"""
Calculates the s squared parameter (= square of sin theta over lambda) for each hkl plane.
Args:
bragg_angles (Dict): The bragg angles for each hkl plane.
Returns:
Dict of hkl plane to s2 parameter, calculates the s squared parameter
(= square of sin theta over lambda).
"""
plane = list(bragg_angles.keys())
bragg_angles_val = np.array(list(bragg_angles.values()))
s2_val = (np.sin(bragg_angles_val) / self.wavelength_rel()) ** 2
s2 = dict(zip(plane, s2_val))
return s2
def x_ray_factors(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[str, Dict[Tuple[int, int, int], float]]:
"""
Calculates x-ray factors, which are required to calculate atomic scattering factors. Method partially inspired
by the equivalent process in the xrd module.
Args:
structure (Structure): The input structure.
bragg_angles (Dict): Dictionary of hkl plane to Bragg angle.
Returns:
dict of atomic symbol to another dict of hkl plane to x-ray factor (in angstroms).
"""
x_ray_factors = {}
s2 = self.get_s2(bragg_angles)
atoms = structure.composition.elements
scattering_factors_for_atom = {}
for atom in atoms:
coeffs = np.array(ATOMIC_SCATTERING_PARAMS[atom.symbol])
for plane in bragg_angles:
scattering_factor_curr = atom.Z - 41.78214 * s2[plane] * np.sum(
coeffs[:, 0] * np.exp(-coeffs[:, 1] * s2[plane]), axis=None
)
scattering_factors_for_atom[plane] = scattering_factor_curr
x_ray_factors[atom.symbol] = scattering_factors_for_atom
scattering_factors_for_atom = {}
return x_ray_factors
def electron_scattering_factors(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[str, Dict[Tuple[int, int, int], float]]:
"""
Calculates atomic scattering factors for electrons using the Mott-Bethe formula (1st order Born approximation).
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict from atomic symbol to another dict of hkl plane to factor (in angstroms)
"""
electron_scattering_factors = {}
x_ray_factors = self.x_ray_factors(structure, bragg_angles)
s2 = self.get_s2(bragg_angles)
atoms = structure.composition.elements
prefactor = 0.023934
scattering_factors_for_atom = {}
for atom in atoms:
for plane in bragg_angles:
scattering_factor_curr = prefactor * (atom.Z - x_ray_factors[atom.symbol][plane]) / s2[plane]
scattering_factors_for_atom[plane] = scattering_factor_curr
electron_scattering_factors[atom.symbol] = scattering_factors_for_atom
scattering_factors_for_atom = {}
return electron_scattering_factors
def cell_scattering_factors(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], int]:
"""
Calculates the scattering factor for the whole cell.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane (3-tuple) to scattering factor (in angstroms).
"""
cell_scattering_factors = {}
electron_scattering_factors = self.electron_scattering_factors(structure, bragg_angles)
scattering_factor_curr = 0
for plane in bragg_angles:
for site in structure:
for sp, occu in site.species.items():
g_dot_r = np.dot(np.array(plane), np.transpose(site.frac_coords))
scattering_factor_curr += electron_scattering_factors[sp.symbol][plane] * np.exp(
2j * np.pi * g_dot_r
)
cell_scattering_factors[plane] = scattering_factor_curr
scattering_factor_curr = 0
return cell_scattering_factors
def cell_intensity(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], float]:
"""
Calculates cell intensity for each hkl plane. For simplicity's sake, take I = |F|**2.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane to cell intensity
"""
csf = self.cell_scattering_factors(structure, bragg_angles)
plane = bragg_angles.keys()
csf_val = np.array(list(csf.values()))
cell_intensity_val = (csf_val * csf_val.conjugate()).real
cell_intensity = dict(zip(plane, cell_intensity_val))
return cell_intensity
def get_pattern(
self,
structure: Structure,
scaled: bool = None,
two_theta_range: Tuple[float, float] = None,
) -> pd.DataFrame:
"""
Returns all relevant TEM DP info in a pandas dataframe.
Args:
structure (Structure): The input structure.
scaled (boolean): Required value for inheritance, does nothing in TEM pattern
two_theta_range (Tuple): Required value for inheritance, does nothing in TEM pattern
Returns:
PandasDataFrame
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
field_names = [
"Position",
"(hkl)",
"Intensity (norm)",
"Film radius",
"Interplanar Spacing",
]
rows_list = []
for dot in tem_dots:
dict1 = {
"Pos": dot.position,
"(hkl)": dot.hkl,
"Intnsty (norm)": dot.intensity,
"Film rad": dot.film_radius,
"Interplanar Spacing": dot.d_spacing,
}
rows_list.append(dict1)
df = pd.DataFrame(rows_list, columns=field_names)
return df
def normalized_cell_intensity(
self, structure: Structure, bragg_angles: Dict[Tuple[int, int, int], float]
) -> Dict[Tuple[int, int, int], float]:
"""
Normalizes the cell_intensity dict to 1, for use in plotting.
Args:
structure (Structure): The input structure.
bragg_angles (dict of 3-tuple to float): The Bragg angles for each hkl plane.
Returns:
dict of hkl plane to normalized cell intensity
"""
normalized_cell_intensity = {}
cell_intensity = self.cell_intensity(structure, bragg_angles)
max_intensity = max(cell_intensity.values())
norm_factor = 1 / max_intensity
for plane in cell_intensity:
normalized_cell_intensity[plane] = cell_intensity[plane] * norm_factor
return normalized_cell_intensity
def is_parallel(
self,
structure: Structure,
plane: Tuple[int, int, int],
other_plane: Tuple[int, int, int],
) -> bool:
"""
Checks if two hkl planes are parallel in reciprocal space.
Args:
structure (Structure): The input structure.
plane (3-tuple): The first plane to be compared.
other_plane (3-tuple): The other plane to be compared.
Returns:
boolean
"""
phi = self.get_interplanar_angle(structure, plane, other_plane)
return phi in (180, 0) or np.isnan(phi)
def get_first_point(self, structure: Structure, points: list) -> Dict[Tuple[int, int, int], float]:
"""
Gets the first point to be plotted in the 2D DP, corresponding to maximum d/minimum R.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
dict of a hkl plane to max interplanar distance.
"""
max_d = -100.0
max_d_plane = (0, 0, 1)
points = self.zone_axis_filter(points)
spacings = self.get_interplanar_spacings(structure, points)
for plane in sorted(spacings.keys()):
if spacings[plane] > max_d:
max_d_plane = plane
max_d = spacings[plane]
return {max_d_plane: max_d}
@classmethod
def get_interplanar_angle(cls, structure: Structure, p1: Tuple[int, int, int], p2: Tuple[int, int, int]) -> float:
"""
Returns the interplanar angle (in degrees) between the normal of two crystal planes.
Formulas from International Tables for Crystallography Volume C pp. 2-9.
Args:
structure (Structure): The input structure.
p1 (3-tuple): plane 1
p2 (3-tuple): plane 2
Returns:
float
"""
a, b, c = structure.lattice.a, structure.lattice.b, structure.lattice.c
alpha, beta, gamma = (
np.deg2rad(structure.lattice.alpha),
np.deg2rad(structure.lattice.beta),
np.deg2rad(structure.lattice.gamma),
)
v = structure.lattice.volume
a_star = b * c * np.sin(alpha) / v
b_star = a * c * np.sin(beta) / v
c_star = a * b * np.sin(gamma) / v
cos_alpha_star = (np.cos(beta) * np.cos(gamma) - np.cos(alpha)) / (np.sin(beta) * np.sin(gamma))
cos_beta_star = (np.cos(alpha) * np.cos(gamma) - np.cos(beta)) / (np.sin(alpha) * np.sin(gamma))
cos_gamma_star = (np.cos(alpha) * np.cos(beta) - np.cos(gamma)) / (np.sin(alpha) * np.sin(beta))
r1_norm = np.sqrt(
p1[0] ** 2 * a_star ** 2
+ p1[1] ** 2 * b_star ** 2
+ p1[2] ** 2 * c_star ** 2
+ 2 * p1[0] * p1[1] * a_star * b_star * cos_gamma_star
+ 2 * p1[0] * p1[2] * a_star * c_star * cos_beta_star
+ 2 * p1[1] * p1[2] * b_star * c_star * cos_gamma_star
)
r2_norm = np.sqrt(
p2[0] ** 2 * a_star ** 2
+ p2[1] ** 2 * b_star ** 2
+ p2[2] ** 2 * c_star ** 2
+ 2 * p2[0] * p2[1] * a_star * b_star * cos_gamma_star
+ 2 * p2[0] * p2[2] * a_star * c_star * cos_beta_star
+ 2 * p2[1] * p2[2] * b_star * c_star * cos_gamma_star
)
r1_dot_r2 = (
p1[0] * p2[0] * a_star ** 2
+ p1[1] * p2[1] * b_star ** 2
+ p1[2] * p2[2] * c_star ** 2
+ (p1[0] * p2[1] + p2[0] * p1[1]) * a_star * b_star * cos_gamma_star
+ (p1[0] * p2[2] + p2[0] * p1[1]) * a_star * c_star * cos_beta_star
+ (p1[1] * p2[2] + p2[1] * p1[2]) * b_star * c_star * cos_alpha_star
)
phi = np.arccos(r1_dot_r2 / (r1_norm * r2_norm))
return np.rad2deg(phi)
@classmethod
def get_plot_coeffs(
cls,
p1: Tuple[int, int, int],
p2: Tuple[int, int, int],
p3: Tuple[int, int, int],
) -> np.ndarray:
"""
Calculates coefficients of the vector addition required to generate positions for each DP point
by the Moore-Penrose inverse method.
Args:
p1 (3-tuple): The first point. Fixed.
p2 (3-tuple): The second point. Fixed.
p3 (3-tuple): The point whose coefficients are to be calculted.
Returns:
Numpy array
"""
a = np.array([[p1[0], p2[0]], [p1[1], p2[1]], [p1[2], p2[2]]])
b = np.array([[p3[0], p3[1], p3[2]]]).T
a_pinv = np.linalg.pinv(a)
x = np.dot(a_pinv, b)
return np.ravel(x)
def get_positions(self, structure: Structure, points: list) -> Dict[Tuple[int, int, int], np.ndarray]:
"""
Calculates all the positions of each hkl point in the 2D diffraction pattern by vector addition.
Distance in centimeters.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
dict of hkl plane to xy-coordinates.
"""
positions = {}
points = self.zone_axis_filter(points)
# first is the max_d, min_r
first_point_dict = self.get_first_point(structure, points)
for point in first_point_dict:
first_point = point
first_d = first_point_dict[point]
spacings = self.get_interplanar_spacings(structure, points)
# second is the first non-parallel-to-first-point vector when sorted.
# note 000 is "parallel" to every plane vector.
for plane in sorted(spacings.keys()):
second_point, second_d = plane, spacings[plane]
if not self.is_parallel(structure, first_point, second_point):
break
p1 = first_point
p2 = second_point
if (0, 0, 0) in points:
points.remove((0, 0, 0))
points.remove(first_point)
points.remove(second_point)
positions[(0, 0, 0)] = np.array([0, 0])
r1 = self.wavelength_rel() * self.camera_length / first_d
positions[first_point] = np.array([r1, 0])
r2 = self.wavelength_rel() * self.camera_length / second_d
phi = np.deg2rad(self.get_interplanar_angle(structure, first_point, second_point))
positions[second_point] = np.array([r2 * np.cos(phi), r2 * np.sin(phi)])
for plane in points:
coeffs = self.get_plot_coeffs(p1, p2, plane)
pos = np.array(
[
coeffs[0] * positions[first_point][0] + coeffs[1] * positions[second_point][0],
coeffs[0] * positions[first_point][1] + coeffs[1] * positions[second_point][1],
]
)
positions[plane] = pos
points.append((0, 0, 0))
points.append(first_point)
points.append(second_point)
return positions
def tem_dots(self, structure: Structure, points) -> List:
"""
Generates all TEM_dot as named tuples that will appear on the 2D diffraction pattern.
Args:
structure (Structure): The input structure.
points (list): All points to be checked.
Returns:
list of TEM_dots
"""
dots = []
interplanar_spacings = self.get_interplanar_spacings(structure, points)
bragg_angles = self.bragg_angles(interplanar_spacings)
cell_intensity = self.normalized_cell_intensity(structure, bragg_angles)
positions = self.get_positions(structure, points)
for plane in cell_intensity.keys():
dot = namedtuple("dot", ["position", "hkl", "intensity", "film_radius", "d_spacing"])
position = positions[plane]
hkl = plane
intensity = cell_intensity[plane]
film_radius = 0.91 * (10 ** -3 * self.cs * self.wavelength_rel() ** 3) ** Fraction("1/4")
d_spacing = interplanar_spacings[plane]
tem_dot = dot(position, hkl, intensity, film_radius, d_spacing)
dots.append(tem_dot)
return dots
def get_plot_2d(self, structure: Structure) -> go.Figure:
"""
Generates the 2D diffraction pattern of the input structure.
Args:
structure (Structure): The input structure.
Returns:
Figure
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
xs = []
ys = []
hkls = []
intensities = []
for dot in tem_dots:
xs.append(dot.position[0])
ys.append(dot.position[1])
hkls.append(str(dot.hkl))
intensities.append(dot.intensity)
hkls = list(map(unicodeify_spacegroup, list(map(latexify_spacegroup, hkls))))
data = [
go.Scatter(
x=xs,
y=ys,
text=hkls,
hoverinfo="text",
mode="markers",
marker=dict(
size=8,
cmax=1,
cmin=0,
color=intensities,
colorscale=[[0, "black"], [1.0, "white"]],
),
showlegend=False,
),
go.Scatter(
x=[0],
y=[0],
text="(0, 0, 0): Direct beam",
hoverinfo="text",
mode="markers",
marker=dict(size=14, cmax=1, cmin=0, color="white"),
showlegend=False,
),
]
layout = go.Layout(
title="2D Diffraction Pattern<br>Beam Direction: " + "".join(str(e) for e in self.beam_direction),
font=dict(size=14, color="#7f7f7f"),
hovermode="closest",
xaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
yaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
width=550,
height=550,
paper_bgcolor="rgba(100,110,110,0.5)",
plot_bgcolor="black",
)
fig = go.Figure(data=data, layout=layout)
return fig
def get_plot_2d_concise(self, structure: Structure) -> go.Figure:
"""
Generates the concise 2D diffraction pattern of the input structure of a smaller size and without layout.
Does not display.
Args:
structure (Structure): The input structure.
Returns:
Figure
"""
if self.symprec:
finder = SpacegroupAnalyzer(structure, symprec=self.symprec)
structure = finder.get_refined_structure()
points = self.generate_points(-10, 11)
tem_dots = self.tem_dots(structure, points)
xs = []
ys = []
hkls = []
intensities = []
for dot in tem_dots:
if dot.hkl != (0, 0, 0):
xs.append(dot.position[0])
ys.append(dot.position[1])
hkls.append(dot.hkl)
intensities.append(dot.intensity)
data = [
go.Scatter(
x=xs,
y=ys,
text=hkls,
mode="markers",
hoverinfo="skip",
marker=dict(
size=4,
cmax=1,
cmin=0,
color=intensities,
colorscale=[[0, "black"], [1.0, "white"]],
),
showlegend=False,
)
]
layout = go.Layout(
xaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
yaxis=dict(
range=[-4, 4],
showgrid=False,
zeroline=False,
showline=False,
ticks="",
showticklabels=False,
),
plot_bgcolor="black",
margin={"l": 0, "r": 0, "t": 0, "b": 0},
width=121,
height=121,
)
fig = go.Figure(data=data, layout=layout)
fig.layout.update(showlegend=False)
return fig
|
davidwaroquiers/pymatgen
|
pymatgen/analysis/diffraction/tem.py
|
Python
|
mit
| 27,148
|
[
"CRYSTAL",
"pymatgen"
] |
15859085a2b7902491590660630d36607b354cf735048b895fdb9c17bd243c11
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2016-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Python implementation of the tiled Q-transform scan.
This is a re-implementation of the original Q-transform scan from the Omega
pipeline, all credits for the original algorithm go to its
authors.
"""
import warnings
from math import (log, ceil, pi, isinf, exp)
import numpy
from numpy import fft as npfft
from ..utils import round_to_power
from ..segments import Segment
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
__credits__ = 'Scott Coughlin <scott.coughlin@ligo.org>, ' \
'Alex Urban <alexander.urban@ligo.org>'
__all__ = ['QTiling', 'QPlane', 'QTile', 'QGram', 'q_scan']
# q-transform defaults
DEFAULT_FRANGE = (0, float('inf'))
DEFAULT_MISMATCH = 0.2
DEFAULT_QRANGE = (4, 64)
# -- object class definitions -------------------------------------------------
class QObject(object):
"""Base class for Q-transform objects
This object exists just to provide basic methods for all other
Q-transform objects.
"""
# pylint: disable=too-few-public-methods
def __init__(self, duration, sampling, mismatch=DEFAULT_MISMATCH):
self.duration = float(duration)
self.sampling = float(sampling)
self.mismatch = float(mismatch)
@property
def deltam(self):
"""Fractional mismatch between neighbouring tiles
:type: `float`
"""
return 2 * (self.mismatch / 3.) ** (1/2.)
class QBase(QObject):
"""Base class for Q-transform objects with fixed Q
This class just provides a property for Q-prime = Q / sqrt(11)
"""
def __init__(self, q, duration, sampling, mismatch=DEFAULT_MISMATCH):
super().__init__(duration, sampling, mismatch=mismatch)
self.q = float(q)
@property
def qprime(self):
"""Normalized Q `(q/sqrt(11))`
"""
return self.q / 11**(1/2.)
class QTiling(QObject):
"""Iterable constructor of `QPlane` objects
For a given Q-range, each of the resulting `QPlane` objects can
be iterated over.
Parameters
----------
duration : `float`
the duration of the data to be Q-transformed
qrange : `tuple` of `float`
`(low, high)` pair of Q extrema
frange : `tuple` of `float`
`(low, high)` pair of frequency extrema
sampling : `float`
sampling rate (in Hertz) of data to be Q-transformed
mismatch : `float`
maximum fractional mismatch between neighbouring tiles
"""
def __init__(self, duration, sampling,
qrange=DEFAULT_QRANGE,
frange=DEFAULT_FRANGE,
mismatch=DEFAULT_MISMATCH):
super().__init__(duration, sampling, mismatch=mismatch)
self.qrange = (float(qrange[0]), float(qrange[1]))
self.frange = [float(frange[0]), float(frange[1])]
qlist = list(self._iter_qs())
if self.frange[0] == 0: # set non-zero lower frequency
self.frange[0] = 50 * max(qlist) / (2 * pi * self.duration)
maxf = self.sampling / 2 / (1 + 11**(1/2.) / min(qlist))
if isinf(self.frange[1]):
self.frange[1] = maxf
elif self.frange[1] > maxf: # truncate upper frequency to maximum
warnings.warn('upper frequency of %.2f is too high for the given '
'Q range, resetting to %.2f'
% (self.frange[1], maxf))
self.frange[1] = maxf
@property
def qs(self): # pylint: disable=invalid-name
"""Array of Q values for this `QTiling`
:type: `numpy.ndarray`
"""
return numpy.array(list(self._iter_qs()))
@property
def whitening_duration(self):
"""The recommended data duration required for whitening
"""
return max(t.whitening_duration for t in self)
def _iter_qs(self):
"""Iterate over the Q values
"""
# work out how many Qs we need
cumum = log(self.qrange[1] / self.qrange[0]) / 2**(1/2.)
nplanes = int(max(ceil(cumum / self.deltam), 1))
dq = cumum / nplanes # pylint: disable=invalid-name
for i in range(nplanes):
yield self.qrange[0] * exp(2**(1/2.) * dq * (i + .5))
def __iter__(self):
"""Iterate over this `QTiling`
Yields a `QPlane` at each Q value
"""
for q in self._iter_qs():
yield QPlane(q, self.frange, self.duration, self.sampling,
mismatch=self.mismatch)
def transform(self, fseries, **kwargs):
"""Compute the time-frequency plane at fixed Q with the most
significant tile
Parameters
----------
fseries : `~gwpy.timeseries.FrequencySeries`
the complex FFT of a time-series data set
**kwargs
other keyword arguments to pass to `QPlane.transform`
Returns
-------
out : `QGram`
signal energies over the time-frequency plane containing the most
significant tile
N : `int`
estimated number of statistically independent tiles
See also
--------
QPlane.transform
compute the Q-transform over a single time-frequency plane
"""
if not numpy.isfinite(fseries).all():
raise ValueError('Input signal contains non-numerical values')
weight = 1 + numpy.log10(self.qrange[1]/self.qrange[0]) / numpy.sqrt(2)
nind, nplanes, peak, result = (0, 0, 0, None)
# identify the plane with the loudest tile
for plane in self:
nplanes += 1
nind += sum([1 + row.ntiles * row.deltam for row in plane])
result = plane.transform(fseries, **kwargs)
if result.peak['energy'] > peak:
out = result
peak = out.peak['energy']
return (out, nind * weight / nplanes)
class QPlane(QBase):
"""Iterable representation of a Q-transform plane
For a given Q, an array of frequencies can be iterated over, yielding
a `QTile` each time.
Parameters
----------
q : `float`
the Q-value for this plane
frange : `tuple` of `float`
`(low, high)` range of frequencies for this plane
duration : `float`
the duration of the data to be Q-transformed
sampling : `float`
sampling rate (in Hertz) of data to be Q-transformed
mismatch : `float`
maximum fractional mismatch between neighbouring tiles
"""
def __init__(self, q, frange, duration, sampling,
mismatch=DEFAULT_MISMATCH):
super().__init__(q, duration, sampling, mismatch=mismatch)
self.frange = [float(frange[0]), float(frange[1])]
if self.frange[0] == 0: # set non-zero lower frequency
self.frange[0] = 50 * self.q / (2 * pi * self.duration)
if isinf(self.frange[1]): # set non-infinite upper frequency
self.frange[1] = self.sampling / 2 / (1 + 1/self.qprime)
def __iter__(self):
"""Iterate over this `QPlane`
Yields a `QTile` at each frequency
"""
# for each frequency, yield a QTile
for freq in self._iter_frequencies():
yield QTile(self.q, freq, self.duration, self.sampling,
mismatch=self.mismatch)
def _iter_frequencies(self):
"""Iterate over the frequencies of this `QPlane`
"""
# work out how many frequencies we need
minf, maxf = self.frange
fcum_mismatch = log(maxf / minf) * (2 + self.q**2)**(1/2.) / 2.
nfreq = int(max(1, ceil(fcum_mismatch / self.deltam)))
fstep = fcum_mismatch / nfreq
fstepmin = 1 / self.duration
# for each frequency, yield a QTile
last = None
for i in range(nfreq):
this = (
minf * exp(2 / (2 + self.q**2)**(1/2.) * (i + .5) * fstep)
// fstepmin * fstepmin
)
if this != last: # yield only unique elements
yield this
last = this
@property
def frequencies(self):
"""Array of central frequencies for this `QPlane`
:type: `numpy.ndarray`
"""
return numpy.array(list(self._iter_frequencies()))
@property
def farray(self):
"""Array of frequencies for the lower-edge of each frequency bin
:type: `numpy.ndarray`
"""
bandwidths = 2 * pi ** (1/2.) * self.frequencies / self.q
return self.frequencies - bandwidths / 2.
@property
def whitening_duration(self):
"""The recommended data duration required for whitening
"""
return round_to_power(self.q / (2 * self.frange[0]),
base=2, which=None)
def transform(self, fseries, norm=True, epoch=None, search=None):
"""Calculate the energy `TimeSeries` for the given `fseries`
Parameters
----------
fseries : `~gwpy.frequencyseries.FrequencySeries`
the complex FFT of a time-series data set
norm : `bool`, `str`, optional
normalize the energy of the output by the median (if `True` or
``'median'``) or the ``'mean'``, if `False` the output
is the complex `~numpy.fft.ifft` output of the Q-tranform
epoch : `~gwpy.time.LIGOTimeGPS`, `float`, optional
the epoch of these data, only used for metadata in the output
`TimeSeries`, and not requires if the input `fseries` has the
epoch populated.
search : `~gwpy.segments.Segment`, optional
search window of interest to determine the loudest Q-plane
Returns
-------
results : `QGram`
the complex energies of the Q-transform of the input `fseries`
at each frequency
See also
--------
QTile.transform
for details on the transform over a row of `(Q, frequency)` tiles
QGram
an object with energies populated over time-frequency tiles
"""
out = []
for qtile in self:
# get energy from transform
out.append(qtile.transform(fseries, norm=norm, epoch=epoch))
return QGram(self, out, search)
class QTile(QBase):
"""Representation of a tile with fixed Q and frequency
"""
def __init__(self, q, frequency, duration, sampling,
mismatch=DEFAULT_MISMATCH):
super().__init__(q, duration, sampling, mismatch=mismatch)
self.frequency = frequency
@property
def bandwidth(self):
"""The bandwidth for tiles in this row
:type: `float`
"""
return 2 * pi ** (1/2.) * self.frequency / self.q
@property
def ntiles(self):
"""The number of tiles in this row
:type: `int`
"""
tcum_mismatch = self.duration * 2 * pi * self.frequency / self.q
return round_to_power(tcum_mismatch / self.deltam,
base=2, which='upper')
@property
def windowsize(self):
"""The size of the frequency-domain window for this row
:type: `int`
"""
return 2 * int(self.frequency / self.qprime * self.duration) + 1
def _get_indices(self):
half = int((self.windowsize - 1) / 2)
return numpy.arange(-half, half + 1)
def get_window(self):
"""Generate the bi-square window for this row
Returns
-------
window : `numpy.ndarray`
"""
# real frequencies
wfrequencies = self._get_indices() / self.duration
# dimensionless frequencies
xfrequencies = wfrequencies * self.qprime / self.frequency
# normalize and generate bi-square window
norm = self.ntiles / (self.duration * self.sampling) * (
315 * self.qprime / (128 * self.frequency)) ** (1/2.)
return (1 - xfrequencies ** 2) ** 2 * norm
def get_data_indices(self):
"""Returns the index array of interesting frequencies for this row
"""
return numpy.round(
self._get_indices() + 1 + self.frequency * self.duration,
).astype(int)
@property
def padding(self):
"""The `(left, right)` padding required for the IFFT
:type: `tuple` of `int`
"""
pad = self.ntiles - self.windowsize
return (int((pad - 1)/2.), int((pad + 1)/2.))
def transform(self, fseries, norm=True, epoch=None):
"""Calculate the energy `TimeSeries` for the given fseries
Parameters
----------
fseries : `~gwpy.frequencyseries.FrequencySeries`
the complex FFT of a time-series data set
norm : `bool`, `str`, optional
normalize the energy of the output by the median (if `True` or
``'median'``) or the ``'mean'``, if `False` the output
is the energy (power) of the Q-tranform
epoch : `~gwpy.time.LIGOTimeGPS`, `float`, optional
the epoch of these data, only used for metadata in the output
`TimeSeries`, and not requires if the input `fseries` has the
epoch populated.
Returns
-------
energy : `~gwpy.timeseries.TimeSeries`
a `TimeSeries` of the energy from the Q-transform of
this tile against the data.
"""
from ..timeseries import TimeSeries
windowed = fseries[self.get_data_indices()] * self.get_window()
# pad data, move negative frequencies to the end, and IFFT
padded = numpy.pad(windowed, self.padding, mode='constant')
wenergy = npfft.ifftshift(padded)
# return a `TimeSeries`
if epoch is None:
epoch = fseries.epoch
tdenergy = npfft.ifft(wenergy)
cenergy = TimeSeries(tdenergy, x0=epoch,
dx=self.duration/tdenergy.size, copy=False)
energy = type(cenergy)(
cenergy.value.real ** 2. + cenergy.value.imag ** 2.,
x0=cenergy.x0, dx=cenergy.dx, copy=False)
if norm:
norm = norm.lower() if isinstance(norm, str) else norm
if norm in (True, 'median'):
narray = energy / energy.median()
elif norm in ('mean',):
narray = energy / energy.mean()
else:
raise ValueError("Invalid normalisation %r" % norm)
return narray.astype("float32", casting="same_kind", copy=False)
return energy
class QGram(object):
"""Store tile energies over an irregularly gridded plane
Parameters
----------
plane : `QPlane`
the time-frequency plane over which to populate
energies : `list` of `TimeSeries`
a list of signal energies for each row of tiles
search : `~gwpy.segments.Segment`, optional
search window of interest to determine the loudest tile
"""
def __init__(self, plane, energies, search):
self.plane = plane
self.energies = energies
self.peak = self._find_peak(search)
def _find_peak(self, search):
peak = {'energy': 0, 'snr': None, 'time': None, 'frequency': None}
for freq, energy in zip(self.plane.frequencies, self.energies):
if search is not None:
energy = energy.crop(*search)
maxidx = energy.value.argmax()
maxe = energy.value[maxidx]
if maxe > peak['energy']:
peak.update({
'energy': maxe,
'snr': (2 * maxe) ** (1/2.),
'time': energy.t0.value + energy.dt.value * maxidx,
'frequency': freq,
})
return peak
def interpolate(self, tres="<default>", fres="<default>", logf=False,
outseg=None):
"""Interpolate this `QGram` over a regularly-gridded spectrogram
Parameters
----------
tres : `float`, optional
desired time resolution (seconds) of output `Spectrogram`,
default is `abs(outseg) / 1000.`
fres : `float`, `int`, `None`, optional
desired frequency resolution (Hertz) of output `Spectrogram`,
or, if ``logf=True``, the number of frequency samples;
give `None` to skip this step and return the original resolution,
default is 0.5 Hz or 500 frequency samples
logf : `bool`, optional
boolean switch to enable (`True`) or disable (`False`) use of
log-sampled frequencies in the output `Spectrogram`
outseg : `~gwpy.segments.Segment`, optional
GPS `[start, stop)` segment for output `Spectrogram`,
default is the full duration of the input
Returns
-------
out : `~gwpy.spectrogram.Spectrogram`
output `Spectrogram` of normalised Q energy
See also
--------
scipy.interpolate
this method uses `~scipy.interpolate.InterpolatedUnivariateSpline`
to cast all frequency rows to a common time-axis, and then
`~scipy.interpolate.interp2d` to apply the desired frequency
resolution across the band
Notes
-----
This method will return a `Spectrogram` of dtype ``float32`` if
``norm`` is given, and ``float64`` otherwise.
To optimize plot rendering with `~matplotlib.axes.Axes.pcolormesh`,
the output `~gwpy.spectrogram.Spectrogram` can be given a log-sampled
frequency axis by passing `logf=True` at runtime. The `fres` argument
is then the number of points on the frequency axis. Note, this is
incompatible with `~matplotlib.axes.Axes.imshow`.
It is also highly recommended to use the `outseg` keyword argument
when only a small window around a given GPS time is of interest.
"""
from scipy.interpolate import (interp2d, InterpolatedUnivariateSpline)
from ..spectrogram import Spectrogram
if outseg is None:
outseg = self.energies[0].span
frequencies = self.plane.frequencies
dtype = self.energies[0].dtype
# build regular Spectrogram from peak-Q data by interpolating each
# (Q, frequency) `TimeSeries` to have the same time resolution
if tres == "<default>":
tres = abs(Segment(outseg)) / 1000.
xout = numpy.arange(*outseg, step=tres)
nx = xout.size
ny = frequencies.size
out = Spectrogram(numpy.empty((nx, ny), dtype=dtype),
t0=outseg[0], dt=tres, frequencies=frequencies)
# record Q in output
out.q = self.plane.q
# interpolate rows
for i, row in enumerate(self.energies):
xrow = numpy.arange(row.x0.value, (row.x0 + row.duration).value,
row.dx.value)
interp = InterpolatedUnivariateSpline(xrow, row.value)
out[:, i] = interp(xout).astype(dtype, casting="same_kind",
copy=False)
if fres is None:
return out
# interpolate the spectrogram to increase its frequency resolution
# --- this is done because Duncan doesn't like interpolated images
# since they don't support log scaling
interp = interp2d(xout, frequencies, out.value.T, kind='cubic')
if not logf:
if fres == "<default>":
fres = .5
outfreq = numpy.arange(
self.plane.frange[0], self.plane.frange[1], fres,
dtype=dtype)
else:
if fres == "<default>":
fres = 500
outfreq = numpy.geomspace(
self.plane.frange[0],
self.plane.frange[1],
num=int(fres),
)
new = type(out)(
interp(xout, outfreq).T.astype(
dtype, casting="same_kind", copy=False),
t0=outseg[0], dt=tres, frequencies=outfreq,
)
new.q = self.plane.q
return new
def table(self, snrthresh=5.5):
"""Represent this `QPlane` as an `EventTable`
Parameters
----------
snrthresh : `float`, optional
lower inclusive threshold on individual tile SNR to keep in the
table, default: 5.5
Returns
-------
out : `~gwpy.table.EventTable`
a table of time-frequency tiles on this `QPlane`
Notes
-----
Only tiles with signal energy greater than or equal to
`snrthresh ** 2 / 2` will be stored in the output `EventTable`.
"""
from ..table import EventTable
# get plane properties
freqs = self.plane.frequencies
bws = 2 * (freqs - self.plane.farray)
# collect table data as a recarray
names = ('time', 'frequency', 'duration', 'bandwidth', 'energy')
rec = numpy.recarray((0,), names=names, formats=['f8'] * len(names))
for f, bw, row in zip(freqs, bws, self.energies):
ind, = (row.value >= snrthresh ** 2 / 2.).nonzero()
new = ind.size
if new > 0:
rec.resize((rec.size + new,), refcheck=False)
rec['time'][-new:] = row.times.value[ind]
rec['frequency'][-new:] = f
rec['duration'][-new:] = row.dt.to('s').value
rec['bandwidth'][-new:] = bw
rec['energy'][-new:] = row.value[ind]
# save to a table
out = EventTable(rec, copy=False)
out.meta['q'] = self.plane.q
return out
# -- utilities ----------------------------------------------------------------
def q_scan(data, mismatch=DEFAULT_MISMATCH, qrange=DEFAULT_QRANGE,
frange=DEFAULT_FRANGE, duration=None, sampling=None,
**kwargs):
"""Transform data by scanning over a `QTiling`
This utility is provided mainly to allow direct manipulation of the
`QTiling.transform` output. Most users probably just want to use
:meth:`~gwpy.timeseries.TimeSeries.q_transform`, which wraps around this.
Parameters
----------
data : `~gwpy.timeseries.TimeSeries` or `ndarray`
the time- or frequency-domain input data
mismatch : `float`, optional
maximum allowed fractional mismatch between neighbouring tiles
qrange : `tuple` of `float`, optional
`(low, high)` range of Qs to scan
frange : `tuple` of `float`, optional
`(low, high)` range of frequencies to scan
duration : `float`, optional
duration (seconds) of input, required if `data` is not a `TimeSeries`
sampling : `float`, optional
sample rate (Hertz) of input, required if `data` is not a `TimeSeries`
**kwargs
other keyword arguments to be passed to :meth:`QTiling.transform`,
including ``'epoch'`` and ``'search'``
Returns
-------
qgram : `QGram`
the raw output of :meth:`QTiling.transform`
far : `float`
expected false alarm rate (Hertz) of white Gaussian noise with the
same peak energy and total duration as `qgram`
"""
from gwpy.timeseries import TimeSeries
# prepare input
if isinstance(data, TimeSeries):
duration = abs(data.span)
sampling = data.sample_rate.to('Hz').value
kwargs.update({'epoch': data.t0.value})
data = data.fft().value
# return a raw Q-transform and its significance
qgram, N = QTiling(duration, sampling, mismatch=mismatch, qrange=qrange,
frange=frange).transform(data, **kwargs)
far = 1.5 * N * numpy.exp(-qgram.peak['energy']) / duration
return (qgram, far)
|
areeda/gwpy
|
gwpy/signal/qtransform.py
|
Python
|
gpl-3.0
| 24,431
|
[
"Gaussian"
] |
9bd76be55ca96e8f3c06f820ddefc7e67fe210c2c4be9e4391670c57e14f581e
|
#!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://setuptools.readthedocs.io/en/latest/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
from setuptools.extern import six
from setuptools.extern.six.moves import configparser, map
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from setuptools.py27compat import rmtree_safe
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import (
PackageIndex, parse_requirement_arg, URL_SCHEME,
)
from setuptools.command import bdist_egg, egg_info
from setuptools.wheel import Wheel
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources.py31compat
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if six.PY2:
def _to_ascii(s):
return s
def isascii(s):
try:
six.text_type(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ')
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('install-layout=', None, "installation layout to choose (known values: deb)"),
('force-installation-into-system-dir', '0', "force installation into /usr"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version', 'force-installation-into-system-dir'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# enable custom installation, known values: deb
self.install_layout = None
self.force_installation_into_system_dir = None
self.multiarch = None
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = sys.version[:3]
dist = get_distribution('setuptools')
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
print(tmpl.format(**locals()))
raise SystemExit()
def finalize_options(self):
self.version and self._render_version()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
if self.install_layout:
if not self.install_layout.lower() in ['deb']:
raise DistutilsOptionError("unknown value for --install-layout")
self.install_layout = self.install_layout.lower()
import sysconfig
if sys.version_info[:2] >= (3, 3):
self.multiarch = sysconfig.get_config_var('MULTIARCH')
self._expand(
'install_dir', 'script_dir', 'build_directory',
'site_dirs',
)
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
if self.prefix == '/usr' and not self.force_installation_into_system_dir:
raise DistutilsOptionError("""installation into /usr
Trying to install into the system managed parts of the file system. Please
consider to install to another location, or use the option
--force-installation-into-system-dir to overwrite this warning.
""")
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, six.string_types):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
dirs = [
'install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',
]
self._expand_attrs(dirs)
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = list(sorted(self.outputs))
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except Exception:
pid = random.randint(0, sys.maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
if instdir not in map(normalize_path, _pythonpath()):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip()
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip()
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://setuptools.readthedocs.io/en/latest/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip()
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
tmpl = _one_liner("""
import os
f = open({ok_file!r}, 'w')
f.write('OK')
f.close()
""") + '\n'
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
pkg_resources.py31compat.makedirs(dirname, exist_ok=True)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write(tmpl.format(**locals()))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
use_alt = (
basename.lower() == 'python.exe' and
os.path.exists(alt)
)
if use_alt:
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
@contextlib.contextmanager
def _tmpdir(self):
tmpdir = tempfile.mkdtemp(prefix=six.u("easy_install-"))
try:
# cast to str as workaround for #709 and #710 and #712
yield str(tmpdir)
finally:
os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir))
def easy_install(self, spec, deps=False):
if not self.editable:
self.install_site_py()
with self._tmpdir() as tmpdir:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
dl = self.package_index.download(spec, tmpdir)
return self.install_item(None, dl, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = Requirement(str(distreq))
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e))
except VersionConflict as e:
raise DistutilsError(e.report())
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = (
"%r already exists in %s; build directory %s will not be kept"
)
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if self.exclude_scripts:
return
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
body = self._load_template(dev_path) % locals()
script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_ascii(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://github.com/pypa/setuptools/issues/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
if self.dry_run:
return
mask = current_umask()
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.whl'):
return [self.install_wheel(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(
self.install_dir,
os.path.basename(egg_path),
)
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(
f,
(egg_path, destination),
(m + " %s to %s") % (
os.path.basename(egg_path),
os.path.dirname(destination)
),
)
update_dist_caches(
destination,
fix_zipimporter_caches=new_dist_is_zipped,
)
except Exception:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
dist.location = egg_path
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers([
os.path.join(script_dir, args[0])
for args in ScriptWriter.get_args(dist)
])
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
def install_wheel(self, wheel_path, tmpdir):
wheel = Wheel(wheel_path)
assert wheel.is_compatible()
destination = os.path.join(self.install_dir, wheel.egg_name())
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
self.execute(
wheel.install_as_egg,
(destination,),
("Installing %s to %s") % (
os.path.basename(wheel_path),
os.path.dirname(destination)
),
)
finally:
update_dist_caches(destination, fix_zipimporter_caches=False)
self.add_output(destination)
return self.egg_distribution(destination)
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip()
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""")
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist,
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run,
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.""").lstrip()
def no_default_version_msg(self):
template = self.__no_default_msg
return template % (self.install_dir, os.environ.get('PYTHONPATH', ''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
source = source.decode('utf-8')
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
with io.open(sitepy) as strm:
current = strm.read()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
with io.open(sitepy, 'w', encoding='utf-8') as strm:
strm.write(source)
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in six.iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
if sys.version[:3] in ('2.3', '2.4', '2.5') or 'real_prefix' in sys.__dict__:
sitedir_name = 'site-packages'
else:
sitedir_name = 'dist-packages'
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
unix_local = dict(
install_dir = '$base/local/lib/python$py_version_short/%s' % sitedir_name,
script_dir = '$base/local/bin',
),
posix_local = dict(
install_dir = '$base/local/lib/python$py_version_short/%s' % sitedir_name,
script_dir = '$base/local/bin',
),
deb_system = dict(
install_dir = '$base/lib/python3/%s' % sitedir_name,
script_dir = '$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix or self.install_layout:
if self.install_layout and self.install_layout in ['deb']:
scheme_name = "deb_system"
self.prefix = '/usr'
elif self.prefix or 'real_prefix' in sys.__dict__:
scheme_name = os.name
else:
scheme_name = "posix_local"
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(scheme_name,self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def _pythonpath():
items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
return filter(None, items)
def get_site_dirs():
"""
Return a list of 'site' dirs
"""
sitedirs = []
# start with PYTHONPATH
sitedirs.extend(_pythonpath())
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([
os.path.join(
prefix,
"local/lib",
"python" + sys.version[:3],
"dist-packages",
),
os.path.join(
prefix,
"lib",
"python" + sys.version[:3],
"dist-packages",
),
os.path.join(prefix, "lib", "site-python"),
])
else:
sitedirs.extend([
prefix,
os.path.join(prefix, "lib", "site-packages"),
])
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
home_sp = os.path.join(
home,
'Library',
'Python',
sys.version[:3],
'site-packages',
)
sitedirs.append(home_sp)
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
try:
sitedirs.extend(site.getsitepackages())
except AttributeError:
pass
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a configparser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
init = {'version': '', 'target_version': ''}
cfg = configparser.RawConfigParser(init)
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(six.StringIO(config))
except configparser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if six.PY3:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
rel_paths = list(map(self.make_relative, self.paths))
if rel_paths:
log.debug("Saving %s", self.filename)
lines = self._wrap_lines(rel_paths)
data = '\n'.join(lines) + '\n'
if os.path.islink(self.filename):
os.unlink(self.filename)
with open(self.filename, 'wt') as f:
f.write(data)
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
@staticmethod
def _wrap_lines(lines):
return lines
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
for line in lines:
yield line
yield cls.postlude
prelude = _one_liner("""
import sys
sys.__plen = len(sys.path)
""")
postlude = _one_liner("""
import sys
new = sys.path[sys.__plen:]
del sys.path[sys.__plen:]
p = getattr(sys, '__egginsert', 0)
sys.path[p:p] = new
sys.__egginsert = p + len(new)
""")
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
PthDistributions = RewritePthDistributions
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func in [os.unlink, os.remove] and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
# http://bit.ly/2h9itJX
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
if item.startswith(q) and item.endswith(q):
return item[1:-1]
return item
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent(r"""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", DeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_header", DeprecationWarning)
if wininst:
executable = "python.exe"
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
return WindowsScriptWriter.best()
else:
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", DeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
warnings.warn(msg, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or find_executable(clean_header)
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if six.PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
return shutil.rmtree(path, ignore_errors, onerror)
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands,
**kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
|
bjornlevi/5thpower
|
nefndaralit/env/lib/python3.6/site-packages/setuptools/command/easy_install.py
|
Python
|
mit
| 89,413
|
[
"VisIt"
] |
bd19ca7c02768604f2b7838e2d29dae01144c927e3f7e283e201880337c2ea2f
|
"""
Main functions for interacting with LAtools.
(c) Oscar Branson : https://github.com/oscarbranson
"""
import configparser
import itertools
import inspect
import json
import os
import re
import time
import warnings
import dateutil
import textwrap
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pandas as pd
import pkg_resources as pkgrs
import uncertainties as unc
import uncertainties.unumpy as un
from sklearn.preprocessing import minmax_scale, scale
from sklearn.cluster import KMeans
from scipy.optimize import curve_fit
from .helpers import plot
from .filtering import filters
from .filtering.classifier_obj import classifier
from .processes import read_data
from .preprocessing.split import long_file
from .D_obj import D
from .helpers import Bunch
from .helpers.plot import rangecalc
from .helpers.signal import rolling_window, enumerate_bool, calc_grads
from .helpers import logging
from .helpers.logging import _log
from .helpers.config import read_configuration, config_locator
from .helpers.stat_fns import *
from .helpers import utils
from .helpers import srm as srms
from .helpers.progressbars import progressbar
from .helpers.chemistry import analyte_mass, decompose_molecule
from .helpers.analytes import get_analyte_name, analyte_2_massname, pretty_element, unitpicker, analyte_sort_fn, analyte_checker, split_analyte_ratios
from .helpers.io import get_date
idx = pd.IndexSlice # multi-index slicing!
# deactivate IPython deprecations warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
# deactivate numpy invalid comparison warnings
np.seterr(invalid='ignore')
# TODO: Allow full sklearn integration by allowing sample-wise application of custom classifiers. i.e. Provide data collection (get_data) ajd filter addition API.
# Especially: PCA, Gaussian Mixture Models
# TODO: Move away from single `internal_standard` specification towards specifying multiple internal standards.
# TODO: Add 'smooth all' function.
class analyse(object):
"""
For processing and analysing whole LA - ICPMS datasets.
Parameters
----------
data_path : str
The path to a directory containing multiple data files.
errorhunt : bool
If True, latools prints the name of each file before it
imports the data. This is useful for working out which
data file is causing the import to fail.
config : str
The name of the configuration to use for the analysis.
This determines which configuration set from the
latools.cfg file is used, and overrides the default
configuration setup. You might sepcify this if your lab
routinely uses two different instruments.
dataformat : str or dict
Either a path to a data format file, or a
dataformat dict. See documentation for more details.
extension : str
The file extension of your data files. Defaults to
'.csv'.
srm_identifier : str
A string used to separate samples and standards. srm_identifier
must be present in all standard measurements. Defaults to
'STD'.
cmap : dict
A dictionary of {analyte: colour} pairs. Colour can be any valid
matplotlib colour string, RGB or RGBA sequence, or hex string.
time_format : str
A regex string identifying the time format, used by pandas when
created a universal time scale. If unspecified (None), pandas
attempts to infer the time format, but in some cases this might
not work.
internal_standard : str
The name of the analyte used as an internal standard throughout
analysis.
file_structure : str
This specifies whether latools should expect multiplte files in a folder ('multi')
or a single file containing multiple analyses ('long'). Default is 'multi'.
names : str or array-like
If file_structure is 'multi', this should be either:
* 'file_names' : use the file names as labels (default)
* 'metadata_names' : used the 'names' attribute of metadata as the name
anything else : use numbers.
If file_structure is 'long', this should be a list of names for the ablations
in the file. The wildcards '+' and '*' are supported in file names, and are used
when the number of ablations does not match the number of sample names provided.
If a sample name contains '+', all ablations that are not specified in the list
are combined into a single file and given this name. If a sample name contains '*'
these are analyses are numbered sequentially and split into separate files.
For example, if you have 5 ablations with one standard at the start and stop you
could provide one of:
* names = ['std', 'sample+', 'std'], which would divide the long file into [std, sample (containing three ablations), std].
* names = ['std', 'sample+', 'std'], which would divide the long file into [std, sample0, sample1, sample2, std], where each
name is associated with a single ablation.
split_kwargs : dict
Arguments to pass to latools.split.long_file()
Attributes
----------
path : str
Path to the directory containing the data files, as
specified by `data_path`.
dirname : str
The name of the directory containing the data files,
without the entire path.
files : array_like
A list of all files in `folder`.
param_dir : str
The directory where parameters are stored.
report_dir : str
The directory where plots are saved.
data : dict
A dict of `latools.D` data objects, labelled by sample
name.
samples : array_like
A list of samples.
analytes : array_like
A list of analytes measured.
stds : array_like
A list of the `latools.D` objects containing hte SRM
data. These must contain srm_identifier in the file name.
srm_identifier : str
A string present in the file names of all standards.
cmaps : dict
An analyte - specific colour map, used for plotting.
"""
def __init__(self, data_path, errorhunt=False, config='DEFAULT',
dataformat=None, extension='.csv', srm_identifier='STD',
cmap=None, time_format=None, internal_standard='Ca43',
file_structure='multi', names='file_names', srm_file=None, pbar=None, split_kwargs={}):
"""
For processing and analysing whole LA - ICPMS datasets.
"""
# initialise log
params = {k: v for k, v in locals().items() if k not in ['self', 'pbar']}
self.log = ['__init__ :: args=() kwargs={}'.format(str(params))]
# assign file paths
self.path = os.path.realpath(data_path)
self.parent_folder = os.path.dirname(self.path)
# set line length for outputs
self._line_width = 80
# make output directories
self.report_dir = re.sub('//', '/',
os.path.join(self.parent_folder,
os.path.splitext(os.path.basename(self.path))[0] + '_reports/'))
if not os.path.isdir(self.report_dir):
os.mkdir(self.report_dir)
self.export_dir = re.sub('//', '/',
os.path.join(self.parent_folder,
os.path.splitext(os.path.basename(self.path))[0] + '_export/'))
if not os.path.isdir(self.export_dir):
os.mkdir(self.export_dir)
# set up file paths
self._file_internal_standard_massfrac = os.path.join(self.export_dir, 'internal_standard_massfrac.csv')
# load configuration parameters
self.config = read_configuration(config)
# print some info about the analysis and setup.
startmsg = self._fill_line('-') + 'Starting analysis:'
if srm_file is None or dataformat is None:
startmsg += '\n Using {} configuration'.format(self.config['config'])
if config == 'DEFAULT':
startmsg += ' (default).'
else:
startmsg += '.'
pretext = ' with'
else:
pretext = 'Using'
if srm_file is not None:
startmsg += '\n ' + pretext + ' custom srm_file ({})'.format(srm_file)
if isinstance(dataformat, str):
startmsg += '\n ' + pretext + ' custom dataformat file ({})'.format(dataformat)
elif isinstance(dataformat, dict):
startmsg += '\n ' + pretext + ' custom dataformat dict'
print(startmsg)
self._load_srmfile(srm_file)
self._load_dataformat(dataformat)
# link up progress bars
if pbar is None:
self.pbar = progressbar()
else:
self.pbar = pbar
if file_structure == 'multi':
self.files = np.array([f for f in os.listdir(self.path)
if extension in f])
# load data into list (initialise D objects)
with self.pbar.set(total=len(self.files), desc='Loading Data') as prog:
data = [None] * len(self.files)
for i, f in enumerate(self.files):
data_passthrough = read_data(data_file=os.path.join(self.path, f), dataformat=self.dataformat, name_mode=names)
data[i] = D(passthrough=(f, *data_passthrough))
# data[i] = (D(os.path.join(self.path, f),
# dataformat=self.dataformat,
# errorhunt=errorhunt,
# cmap=cmap,
# internal_standard=internal_standard,
# name=names))
prog.update()
elif file_structure == 'long':
data = []
print(self.path)
for data_passthrough in long_file(data_file=self.path, dataformat=self.dataformat, sample_list=names, passthrough=True, **split_kwargs):
data.append(D(passthrough=data_passthrough))
# create universal time scale
if 'date' in data[0].meta.keys():
if (time_format is None) and ('time_format' in self.dataformat.keys()):
time_format = self.dataformat['time_format']
start_times = []
for d in data:
start_times.append(get_date(d.meta['date'], time_format))
min_time = min(start_times)
for d, st in zip(data, start_times):
d.uTime = d.Time + (st - min_time).seconds
else:
ts = 0
for d in data:
d.uTime = d.Time + ts
ts += d.Time[-1]
msg = self._wrap_text(
"Time not determined from dataformat. Universal time scale " +
"approximated as continuously measured samples. " +
"Samples might not be in the right order. "
"Background correction and calibration may not behave " +
"as expected.")
warnings.warn(self._wrap_msg(msg, '*'))
self.max_time = max([d.uTime.max() for d in data])
# sort data by uTime
data.sort(key=lambda d: d.uTime[0])
# process sample names
if (names == 'file_names') | (names == 'metadata_names'):
samples = np.array([s.sample for s in data], dtype=object) # get all sample names
# if duplicates, rename them
usamples, ucounts = np.unique(samples, return_counts=True)
if usamples.size != samples.size:
dups = usamples[ucounts > 1] # identify duplicates
nreps = ucounts[ucounts > 1] # identify how many times they repeat
for d, n in zip(dups, nreps): # cycle through duplicates
new = [d + '_{}'.format(i) for i in range(n)] # append number to duplicate names
ind = samples == d
samples[ind] = new # rename in samples
for s, ns in zip([data[i] for i in np.where(ind)[0]], new):
s.sample = ns # rename in D objects
elif file_structure == 'long':
samples = np.array([s.sample for s in data], dtype=object)
else:
samples = np.arange(len(data)) # assign a range of numbers
for i, s in enumerate(samples):
data[i].sample = s
self.samples = samples
# copy colour map to top level
self.cmaps = data[0].cmap
# get analytes
# TODO: does this preserve the *order* of the analytes?
all_analytes = set()
extras = set()
for d in data:
all_analytes.update(d.analytes)
extras.update(all_analytes.symmetric_difference(d.analytes))
self.analytes = all_analytes.difference(extras)
mismatch = []
if self.analytes != all_analytes:
smax = 0
for d in data:
if d.analytes != self.analytes:
mismatch.append((d.sample, d.analytes.difference(self.analytes)))
if len(d.sample) > smax:
smax = len(d.sample)
msg = (self._fill_line('*') +
'All data files do not contain the same analytes.\n' +
'Only analytes present in all files will be processed.\n' +
'In the following files, these analytes will be excluded:\n')
for s, a in mismatch:
msg += (' {0: <' + '{:}'.format(smax + 2) + '}: ').format(s) + str(a) + '\n'
msg += self._fill_line('*')
warnings.warn(msg)
# set for recording calculated ratios
self.analyte_ratios = set()
self.uncalibrated = set()
if len(self.analytes) == 0:
raise ValueError('No analyte names identified. Please check the \ncolumn_id > pattern ReGeX in your dataformat file.')
if internal_standard in self.analytes:
self.internal_standard = internal_standard
else:
self.internal_standard = None
warnings.warn(
self._wrap_text(f'The specified internal_standard {internal_standard} is not in the list of analytes ({self.analytes}). You will have to specify a valid analyte when calling the `ratio()` function later in the analysis.')
)
self.internal_standard_concs = None
self.minimal_analytes = set()
# record which analytes are needed for calibration
self.calibration_analytes = set()
# keep record of which stages of processing have been performed
self.stages_complete = set(['rawdata'])
# From this point on, data stored in dicts
self.data = Bunch(zip(self.samples, data))
# remove mismatch analytes - QUICK-FIX - SHOULD BE DONE HIGHER UP?
for s, a in mismatch:
self.data[s].analytes = self.data[s].analytes.difference(a)
# get SRM info
self.srm_identifier = srm_identifier
self.stds = [] # make this a dict
_ = [self.stds.append(s) for s in self.data.values()
if self.srm_identifier in s.sample]
self.srms_ided = False
# set up focus_stage recording
self.focus_stage = 'rawdata'
self.stat_focus_stage = None
self.focus = Bunch()
# set up subsets
self.clear_subsets()
# remove any analytes for which all counts are zero
# self.get_focus()
# for a in self.analytes:
# if np.nanmean(self.focus[a] == 0):
# self.analytes.remove(a)
# warnings.warn('{} contains no data - removed from analytes')
# initialise classifiers
self.classifiers = Bunch()
# report
print(('Loading Data:\n {:d} Data Files Loaded: {:d} standards, {:d} '
'samples').format(len(self.data),
len(self.stds),
len(self.data) - len(self.stds)))
astr = self._wrap_text('Analytes: ' + ' '.join(self.analytes_sorted()))
print(astr)
print(' Internal Standard: {}'.format(self.internal_standard))
def _fill_line(self, char, newline=True):
"""Generate a full line of given character"""
if newline:
return char * self._line_width + '\n'
else:
return char * self._line_width
def _wrap_text(self, text):
"""Splits text over multiple lines to fit within self._line_width"""
return '\n'.join(textwrap.wrap(text, width=self._line_width,
break_long_words=False))
def _wrap_msg(self, msg, char):
return self._fill_line(char) + msg + '\n' + self._fill_line(char, False)
def _load_dataformat(self, dataformat):
"""
Load in dataformat.
Check dataformat file exists, and store it in a class attribute.
If dataformat is not provided during initialisation, assign it
fom configuration file
"""
if dataformat is None:
if os.path.exists(self.config['dataformat']):
dataformat = self.config['dataformat']
elif os.path.exists(pkgrs.resource_filename('latools',
self.config['dataformat'])):
dataformat = pkgrs.resource_filename('latools',
self.config['dataformat'])
else:
config_file = config_locator()
raise ValueError(('The dataformat file specified in the ' +
self.config['config'] + ' configuration cannot be found.\n'
'Please make sure the file exists, and that'
'the path in the config file is correct.\n'
'Your configurations can be found here:'
' {}\n'.format(config_file)))
self.dataformat_file = dataformat
else:
self.dataformat_file = 'None: dict provided'
# if it's a string, check the file exists and import it.
if isinstance(dataformat, str):
if os.path.exists(dataformat):
# self.dataformat = eval(open(dataformat).read())
self.dataformat = json.load(open(dataformat))
else:
warnings.warn(("The dataformat file (" + dataformat +
") cannot be found.\nPlease make sure the file "
"exists, and that the path is correct.\n\nFile "
"Path: " + dataformat))
# if it's a dict, just assign it straight away.
elif isinstance(dataformat, dict):
self.dataformat = dataformat
def _load_srmfile(self, srm_file):
"""
Check srmfile exists, and store it in a class attribute.
"""
if srm_file is not None:
if os.path.exists(srm_file):
self.srmfile = srm_file
else:
raise ValueError(('Cannot find the specified SRM file:\n ' +
srm_file +
'Please check that the file location is correct.'))
else:
if os.path.exists(self.config['srmfile']):
self.srmfile = self.config['srmfile']
elif os.path.exists(pkgrs.resource_filename('latools',
self.config['srmfile'])):
self.srmfile = pkgrs.resource_filename('latools',
self.config['srmfile'])
else:
config_file = config_locator()
raise ValueError(('The SRM file specified in the ' + self.config['config'] +
' configuration cannot be found.\n'
'Please make sure the file exists, and that the '
'path in the config file is correct.\n'
'Your configurations can be found here:'
' {}\n'.format(config_file)))
def _get_samples(self, subset=None):
"""
Helper function to get sample names from subset.
Parameters
----------
subset : str
Subset name. If None, returns all samples.
Returns
-------
List of sample names
"""
if subset is None:
samples = self.subsets['All_Samples']
else:
try:
samples = self.subsets[subset]
except KeyError:
raise KeyError(("Subset '{:s}' does not ".format(subset) +
"exist.\nUse 'make_subset' to create a" +
"subset."))
return samples
def _log_header(self):
return ['# LATOOLS analysis log saved at {}'.format(time.strftime('%Y:%m:%d %H:%M:%S')),
'data_path :: {}'.format(self.path),
'# Analysis Log Start: \n'
]
def _analyte_checker(self, analytes=None, check_ratios=True, single=False, focus_stage=None):
"""
Return valid analytes depending on the analysis stage
"""
return analyte_checker(self, analytes=analytes, check_ratios=check_ratios, single=single, focus_stage=focus_stage)
def analytes_sorted(self, analytes=None, check_ratios=True, single=False, focus_stage=None):
return sorted(self._analyte_checker(analytes=analytes, check_ratios=check_ratios, single=single, focus_stage=focus_stage), key=analyte_sort_fn)
@_log
def basic_processing(self,
noise_despiker=True, despike_win=3, despike_nlim=12., # despike args
despike_maxiter=4,
autorange_analyte='total_counts', autorange_gwin=5, autorange_swin=3, autorange_win=20, # autorange args
autorange_on_mult=[1., 1.5], autorange_off_mult=[1.5, 1],
autorange_transform='log',
bkg_weight_fwhm=300., # bkg_calc_weightedmean
bkg_n_min=20, bkg_n_max=None, bkg_cstep=None,
bkg_filter=False, bkg_f_win=7, bkg_f_n_lim=3,
bkg_errtype='stderr', # bkg_sub
calib_drift_correct=True, # calibrate
calib_srms_used=['NIST610', 'NIST612', 'NIST614'],
calib_zero_intercept=True, calib_n_min=10,
plots=True):
self.despike(noise_despiker=noise_despiker,
win=despike_win, nlim=despike_nlim,
maxiter=despike_maxiter)
self.autorange(analyte=autorange_analyte, gwin=autorange_gwin, swin=autorange_swin,
win=autorange_win, on_mult=autorange_on_mult,
off_mult=autorange_off_mult,
transform=autorange_transform)
if plots:
self.trace_plots(ranges=True)
self.bkg_calc_weightedmean(weight_fwhm=bkg_weight_fwhm, n_min=bkg_n_min, n_max=bkg_n_max,
cstep=bkg_cstep, bkg_filter=bkg_filter, f_win=bkg_f_win, f_n_lim=bkg_f_n_lim)
if plots:
self.bkg_plot()
self.bkg_subtract(errtype=bkg_errtype)
self.ratio()
self.calibrate(drift_correct=calib_drift_correct, srms_used=calib_srms_used,
zero_intercept=calib_zero_intercept, n_min=calib_n_min)
if plots:
self.calibration_plot()
return
@_log
def autorange(self, analyte='total_counts', gwin=5, swin=3, win=20,
on_mult=[1., 1.5], off_mult=[1.5, 1],
transform='log', ploterrs=True, focus_stage='despiked', **kwargs):
"""
Automatically separates signal and background data regions.
Automatically detect signal and background regions in the laser
data, based on the behaviour of a single analyte. The analyte used
should be abundant and homogenous in the sample.
**Step 1: Thresholding.**
The background signal is determined using a gaussian kernel density
estimator (kde) of all the data. Under normal circumstances, this
kde should find two distinct data distributions, corresponding to
'signal' and 'background'. The minima between these two distributions
is taken as a rough threshold to identify signal and background
regions. Any point where the trace crosses this thrshold is identified
as a 'transition'.
**Step 2: Transition Removal.**
The width of the transition regions between signal and background are
then determined, and the transitions are excluded from analysis. The
width of the transitions is determined by fitting a gaussian to the
smoothed first derivative of the analyte trace, and determining its
width at a point where the gaussian intensity is at at `conf` time the
gaussian maximum. These gaussians are fit to subsets of the data
centered around the transitions regions determined in Step 1, +/- `win`
data points. The peak is further isolated by finding the minima and
maxima of a second derivative within this window, and the gaussian is
fit to the isolated peak.
Parameters
----------
analyte : str
The analyte that autorange should consider. For best results,
choose an analyte that is present homogeneously in high
concentrations.
This can also be 'total_counts' to use the sum of all analytes.
gwin : int
The smoothing window used for calculating the first derivative.
Must be odd.
win : int
Determines the width (c +/- win) of the transition data subsets.
smwin : int
The smoothing window used for calculating the second derivative.
Must be odd.
conf : float
The proportional intensity of the fitted gaussian tails that
determines the transition width cutoff (lower = wider transition
regions excluded).
trans_mult : array_like, len=2
Multiples of the peak FWHM to add to the transition cutoffs, e.g.
if the transitions consistently leave some bad data proceeding the
transition, set trans_mult to [0, 0.5] to ad 0.5 * the FWHM to the
right hand side of the limit.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked', or rawdata' if not despiked. Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
Outputs added as instance attributes. Returns None.
bkg, sig, trn : iterable, bool
Boolean arrays identifying background, signal and transision
regions
bkgrng, sigrng and trnrng : iterable
(min, max) pairs identifying the boundaries of contiguous
True regions in the boolean arrays.
"""
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
focus_stage = 'rawdata'
if analyte in self.analytes:
self.minimal_analytes.update([analyte])
fails = {} # list for catching failures.
with self.pbar.set(total=len(self.data), desc='AutoRange') as prog:
for s, d in self.data.items():
f = d.autorange(analyte=analyte, gwin=gwin, swin=swin, win=win,
on_mult=on_mult, off_mult=off_mult,
ploterrs=ploterrs, transform=transform, **kwargs)
if f is not None:
fails[s] = f
prog.update() # advance progress bar
# handle failures
if len(fails) > 0:
wstr = ('\n\n' + '*' * 41 + '\n' +
' WARNING\n' + '*' * 41 + '\n' +
'Autorange failed for some samples:\n')
kwidth = max([len(k) for k in fails.keys()]) + 1
fstr = ' {:' + '{}'.format(kwidth) + 's}: '
for k in sorted(fails.keys()):
wstr += fstr.format(k) + ', '.join(['{:.1f}'.format(f) for f in fails[k][-1]]) + '\n'
wstr += ('\n*** THIS IS NOT NECESSARILY A PROBLEM ***\n' +
'But please check the plots below to make\n' +
'sure they look OK. Failures are marked by\n' +
'dashed vertical red lines.\n\n' +
'To examine an autorange failure in more\n' +
'detail, use the `autorange_plot` method\n' +
'of the failing data object, e.g.:\n' +
"dat.data['Sample'].autorange_plot(params)\n" +
'*' * 41 + '\n')
warnings.warn(wstr)
self.stages_complete.update(['autorange'])
return
def find_expcoef(self, nsd_below=0., plot=False,
trimlim=None, autorange_kwargs={}):
"""
Determines exponential decay coefficient for despike filter.
Fits an exponential decay function to the washout phase of standards
to determine the washout time of your laser cell. The exponential
coefficient reported is `nsd_below` standard deviations below the
fitted exponent, to ensure that no real data is removed.
Total counts are used in fitting, rather than a specific analyte.
Parameters
----------
nsd_below : float
The number of standard deviations to subtract from the fitted
coefficient when calculating the filter exponent.
plot : bool or str
If True, creates a plot of the fit, if str the plot is to the
location specified in str.
trimlim : float
A threshold limit used in determining the start of the
exponential decay region of the washout. Defaults to half
the increase in signal over background. If the data in
the plot don't fall on an exponential decay line, change
this number. Normally you'll need to increase it.
Returns
-------
None
"""
print('Calculating exponential decay coefficient\nfrom SRM washouts...')
def findtrim(tr, lim=None):
trr = np.roll(tr, -1)
trr[-1] = 0
if lim is None:
lim = 0.5 * np.nanmax(tr - trr)
ind = (tr - trr) >= lim
return np.arange(len(ind))[ind ^ np.roll(ind, -1)][0]
if not hasattr(self.stds[0], 'trnrng'):
for s in self.stds:
s.autorange(**autorange_kwargs, ploterrs=False)
trans = []
times = []
for v in self.stds:
for trnrng in v.trnrng[-1::-2]:
tr = minmax_scale(v.data['total_counts'][(v.Time > trnrng[0]) & (v.Time < trnrng[1])])
sm = np.apply_along_axis(np.nanmean, 1,
rolling_window(tr, 3, pad=0))
sm[0] = sm[1]
trim = findtrim(sm, trimlim) + 2
trans.append(minmax_scale(tr[trim:]))
times.append(np.arange(tr[trim:].size) *
np.diff(v.Time[1:3]))
times = np.concatenate(times)
times = np.round(times, 2)
trans = np.concatenate(trans)
ti = []
tr = []
for t in np.unique(times):
ti.append(t)
tr.append(np.nanmin(trans[times == t]))
def expfit(x, e):
"""
Exponential decay function.
"""
return np.exp(e * x)
ep, ecov = curve_fit(expfit, ti, tr, p0=(-1.))
eeR2 = R2calc(trans, expfit(times, ep))
if plot:
fig, ax = plt.subplots(1, 1, figsize=[6, 4])
ax.scatter(times, trans, alpha=0.2, color='k', marker='x', zorder=-2)
ax.scatter(ti, tr, alpha=1, color='k', marker='o')
fitx = np.linspace(0, max(ti))
ax.plot(fitx, expfit(fitx, ep), color='r', label='Fit')
ax.plot(fitx, expfit(fitx, ep - nsd_below * np.diag(ecov)**.5, ),
color='b', label='Used')
ax.text(0.95, 0.75,
('y = $e^{%.2f \pm %.2f * x}$\n$R^2$= %.2f \nCoefficient: '
'%.2f') % (ep,
np.diag(ecov)**.5,
eeR2,
ep - nsd_below * np.diag(ecov)**.5),
transform=ax.transAxes, ha='right', va='top', size=12)
ax.set_xlim(0, ax.get_xlim()[-1])
ax.set_xlabel('Time (s)')
ax.set_ylim(-0.05, 1.05)
ax.set_ylabel('Proportion of Signal')
plt.legend()
if isinstance(plot, str):
fig.savefig(plot)
self.expdecay_coef = ep - nsd_below * np.diag(ecov)**.5
print(' {:0.2f}'.format(self.expdecay_coef[0]))
return
@_log
def despike(self, expdecay_despiker=False, exponent=None,
noise_despiker=True, win=3, nlim=12., exponentrace_plot=False,
maxiter=4, autorange_kwargs={}, focus_stage='rawdata'):
"""
Despikes data with exponential decay and noise filters.
Parameters
----------
expdecay_despiker : bool
Whether or not to apply the exponential decay filter.
exponent : None or float
The exponent for the exponential decay filter. If None,
it is determined automatically using `find_expocoef`.
tstep : None or float
The timeinterval between measurements. If None, it is
determined automatically from the Time variable.
noise_despiker : bool
Whether or not to apply the standard deviation spike filter.
win : int
The rolling window over which the spike filter calculates
the trace statistics.
nlim : float
The number of standard deviations above the rolling mean
that data are excluded.
exponentrace_plot : bool
Whether or not to show a plot of the automatically determined
exponential decay exponent.
maxiter : int
The max number of times that the fitler is applied.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'rawdata'. Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
None
"""
if focus_stage != self.focus_stage:
self.set_focus(focus_stage)
if expdecay_despiker and exponent is None:
if not hasattr(self, 'expdecay_coef'):
self.find_expcoef(plot=exponentrace_plot,
autorange_kwargs=autorange_kwargs)
exponent = self.expdecay_coef
time.sleep(0.1)
with self.pbar.set(total=len(self.data), desc='Despiking') as prog:
for d in self.data.values():
d.despike(expdecay_despiker, exponent,
noise_despiker, win, nlim, maxiter)
prog.update()
self.stages_complete.update(['despiked'])
self.focus_stage = 'despiked'
return
# functions for background correction
def get_background(self, n_min=10, n_max=None, focus_stage='despiked', bkg_filter=False, f_win=5, f_n_lim=3):
"""
Extract all background data from all samples on universal time scale.
Used by both 'polynomial' and 'weightedmean' methods.
Parameters
----------
n_min : int
The minimum number of points a background region must
have to be included in calculation.
n_max : int
The maximum number of points a background region must
have to be included in calculation.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Returns
-------
pandas.DataFrame object containing background data.
"""
allbkgs = {'uTime': [],
'ns': []}
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
focus_stage = 'rawdata'
for a in self.analytes:
allbkgs[a] = []
n0 = 0
for s in self.data.values():
if sum(s.bkg) > 0:
allbkgs['uTime'].append(s.uTime[s.bkg])
allbkgs['ns'].append(enumerate_bool(s.bkg, n0)[s.bkg])
n0 = allbkgs['ns'][-1][-1]
for a in self.analytes:
allbkgs[a].append(s.data[focus_stage][a][s.bkg])
allbkgs.update((k, np.concatenate(v)) for k, v in allbkgs.items())
bkgs = pd.DataFrame(allbkgs) # using pandas here because it's much more efficient than loops.
self.bkg = Bunch()
# extract background data from whole dataset
if n_max is None:
self.bkg['raw'] = bkgs.groupby('ns').filter(lambda x: len(x) > n_min)
else:
self.bkg['raw'] = bkgs.groupby('ns').filter(lambda x: (len(x) > n_min) & (len(x) < n_max))
# calculate per - background region stats
self.bkg['summary'] = self.bkg['raw'].groupby('ns').aggregate([np.mean, np.std, stderr])
# sort summary by uTime
self.bkg['summary'].sort_values(('uTime', 'mean'), inplace=True)
# self.bkg['summary'].index = np.arange(self.bkg['summary'].shape[0])
# self.bkg['summary'].index.name = 'ns'
if bkg_filter:
# calculate rolling mean and std from summary
t = self.bkg['summary'].loc[:, idx[:, 'mean']]
r = t.rolling(f_win).aggregate([np.nanmean, np.nanstd])
# calculate upper threshold
upper = r.loc[:, idx[:, :, 'nanmean']] + f_n_lim * r.loc[:, idx[:, :, 'nanstd']].values
# calculate which are over upper threshold
over = r.loc[:, idx[:, :, 'nanmean']] > np.roll(upper.values, 1, 0)
# identify them
ns_drop = over.loc[over.apply(any, 1), :].index.values
# drop them from summary
self.bkg['summary'].drop(ns_drop, inplace=True)
# remove them from raw
ind = np.ones(self.bkg['raw'].shape[0], dtype=bool)
for ns in ns_drop:
ind = ind & (self.bkg['raw'].loc[:, 'ns'] != ns)
self.bkg['raw'] = self.bkg['raw'].loc[ind, :]
return
@_log
def bkg_calc_weightedmean(self, analytes=None, weight_fwhm=600,
n_min=20, n_max=None, cstep=None, errtype='stderr',
bkg_filter=False, f_win=7, f_n_lim=3, focus_stage='despiked'):
"""
Background calculation using a gaussian weighted mean.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
weight_fwhm : float
The full-width-at-half-maximum of the gaussian used
to calculate the weighted average.
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
self.bkg = Bunch()
elif isinstance(analytes, str):
analytes = [analytes]
self.get_background(n_min=n_min, n_max=n_max,
bkg_filter=bkg_filter,
f_win=f_win, f_n_lim=f_n_lim, focus_stage=focus_stage)
# Gaussian - weighted average
if 'calc' not in self.bkg.keys():
# create time points to calculate background
if cstep is None:
cstep = weight_fwhm / 20
elif cstep > weight_fwhm:
warnings.warn("\ncstep should be less than weight_fwhm. Your backgrounds\n" +
"might not behave as expected.\n")
bkg_t = np.linspace(0,
self.max_time,
int(self.max_time // cstep))
self.bkg['calc'] = Bunch()
self.bkg['calc']['uTime'] = bkg_t
# TODO : calculation then dict assignment is clumsy...
mean, std, stderr = gauss_weighted_stats(self.bkg['raw'].uTime,
self.bkg['raw'].loc[:, analytes].values,
self.bkg['calc']['uTime'],
fwhm=weight_fwhm)
self.bkg_interps = {}
for i, a in enumerate(analytes):
self.bkg['calc'][a] = {'mean': mean[i],
'std': std[i],
'stderr': stderr[i]}
self.bkg_interps[a] = un_interp1d(x=self.bkg['calc']['uTime'],
y=un.uarray(self.bkg['calc'][a]['mean'],
self.bkg['calc'][a][errtype]))
@_log
def bkg_calc_interp1d(self, analytes=None, kind=1, n_min=10, n_max=None, cstep=30,
bkg_filter=False, f_win=7, f_n_lim=3, errtype='stderr', focus_stage='despiked'):
"""
Background calculation using a 1D interpolation.
scipy.interpolate.interp1D is used for interpolation.
Parameters
----------
analytes : str or iterable
Which analyte or analytes to calculate.
kind : str or int
Integer specifying the order of the spline interpolation
used, or string specifying a type of interpolation.
Passed to `scipy.interpolate.interp1D`.
n_min : int
Background regions with fewer than n_min points
will not be included in the fit.
cstep : float or None
The interval between calculated background points.
filter : bool
If true, apply a rolling filter to the isolated background regions
to exclude regions with anomalously high values. If True, two parameters
alter the filter's behaviour:
f_win : int
The size of the rolling window
f_n_lim : float
The number of standard deviations above the rolling mean
to set the threshold.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
if analytes is None:
analytes = self.analytes
self.bkg = Bunch()
elif isinstance(analytes, str):
analytes = [analytes]
self.get_background(n_min=n_min, n_max=n_max,
bkg_filter=bkg_filter,
f_win=f_win, f_n_lim=f_n_lim, focus_stage=focus_stage)
def pad(a, lo=None, hi=None):
if lo is None:
lo = [a[0]]
if hi is None:
hi = [a[-1]]
return np.concatenate((lo, a, hi))
if 'calc' not in self.bkg.keys():
# create time points to calculate background
bkg_t = pad(np.ravel(self.bkg.raw.loc[:, ['uTime', 'ns']].groupby('ns').aggregate([min, max])))
bkg_t = np.unique(np.sort(np.concatenate([bkg_t, np.arange(0, self.max_time, cstep)])))
self.bkg['calc'] = Bunch()
self.bkg['calc']['uTime'] = bkg_t
d = self.bkg['summary']
self.bkg_interps = {}
with self.pbar.set(total=len(analytes), desc='Calculating Analyte Backgrounds') as prog:
for a in analytes:
fill_vals = (un.uarray(d.loc[:, (a, 'mean')].iloc[0], d.loc[:, (a, errtype)].iloc[0]),
un.uarray(d.loc[:, (a, 'mean')].iloc[-1], d.loc[:, (a, errtype)].iloc[-1]))
p = un_interp1d(x=d.loc[:, ('uTime', 'mean')],
y=un.uarray(d.loc[:, (a, 'mean')],
d.loc[:, (a, errtype)]),
kind=kind, bounds_error=False, fill_value=fill_vals)
self.bkg_interps[a] = p
self.bkg['calc'][a] = {'mean': p.new_nom(self.bkg['calc']['uTime']),
errtype: p.new_std(self.bkg['calc']['uTime'])}
prog.update()
# self.bkg['calc']
return
@_log
def bkg_subtract(self, analytes=None, errtype='stderr', focus_stage='despiked'):
"""
Subtract calculated background from data.
Must run bkg_calc first!
Parameters
----------
analytes : str or iterable
Which analyte(s) to subtract.
errtype : str
Which type of error to propagate. default is 'stderr'.
focus_stage : str
Which stage of analysis to apply processing to.
Defaults to 'despiked' if present, or 'rawdata' if not.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
"""
analytes = self._analyte_checker(analytes)
if focus_stage == 'despiked':
if 'despiked' not in self.stages_complete:
focus_stage = 'rawdata'
# make uncertainty-aware background interpolators
# bkg_interps = {}
# for a in analytes:
# bkg_interps[a] = un_interp1d(x=self.bkg['calc']['uTime'],
# y=un.uarray(self.bkg['calc'][a]['mean'],
# self.bkg['calc'][a][errtype]))
# self.bkg_interps = bkg_interps
# apply background corrections
with self.pbar.set(total=len(self.data), desc='Background Subtraction') as prog:
for d in self.data.values():
# [d.bkg_subtract(a, bkg_interps[a].new(d.uTime), None, focus_stage=focus_stage) for a in analytes]
[d.bkg_subtract(a, self.bkg_interps[a].new(d.uTime), ~d.sig, focus_stage=focus_stage) for a in analytes]
d.setfocus('bkgsub')
prog.update()
self.stages_complete.update(['bkgsub'])
self.focus_stage = 'bkgsub'
return
@_log
def correct_spectral_interference(self, target_analyte, source_analyte, f):
"""
Correct spectral interference.
Subtract interference counts from target_analyte, based on the
intensity of a source_analayte and a known fractional contribution (f).
Correction takes the form:
target_analyte -= source_analyte * f
Only operates on background-corrected data ('bkgsub'). To undo a correction,
rerun `self.bkg_subtract()`.
Example
-------
To correct 44Ca+ for an 88Sr++ interference, where both 43.5 and 44 Da
peaks are known:
f = abundance(88Sr) / (abundance(87Sr)
counts(44Ca) = counts(44 Da) - counts(43.5 Da) * f
Parameters
----------
target_analyte : str
The name of the analyte to modify.
source_analyte : str
The name of the analyte to base the correction on.
f : float
The fraction of the intensity of the source_analyte to
subtract from the target_analyte. Correction is:
target_analyte - source_analyte * f
Returns
-------
None
"""
if target_analyte not in self.analytes:
raise ValueError('target_analyte: {:} not in available analytes ({:})'.format(target_analyte, ', '.join(self.analytes)))
if source_analyte not in self.analytes:
raise ValueError('source_analyte: {:} not in available analytes ({:})'.format(source_analyte, ', '.join(self.analytes)))
with self.pbar.set(total=len(self.data), desc='Interference Correction') as prog:
for d in self.data.values():
d.correct_spectral_interference(target_analyte, source_analyte, f)
prog.update()
@_log
def bkg_plot(self, analytes=None, figsize=None, yscale='log',
ylim=None, err='stderr', save=True):
"""
Plot the calculated background.
Parameters
----------
analytes : str or iterable
Which analyte(s) to plot.
figsize : tuple
The (width, height) of the figure, in inches.
If None, calculated based on number of samples.
yscale : str
'log' (default) or 'linear'.
ylim : tuple
Manually specify the y scale.
err : str
What type of error to plot. Default is stderr.
save : bool
If True, figure is saved.
Returns
-------
fig, ax : matplotlib.figure, matplotlib.axes
"""
# if not hasattr(self, 'bkg'):
# raise ValueError("\nPlease calculate a background before attempting to\n" +
# "plot it... either:\n" +
# " bkg_calc_interp1d\n" +
# " bkg_calc_weightedmean\n")
if not hasattr(self, 'bkg'):
self.get_background()
analytes = self._analyte_checker(analytes)
if figsize is None:
if len(self.samples) > 50:
figsize = (len(self.samples) * 0.2, 5)
else:
figsize = (10, 5)
fig = plt.figure(figsize=figsize)
ax = fig.add_axes([.07, .1, .84, .8])
with self.pbar.set(total=len(analytes), desc='Plotting backgrounds') as prog:
for a in analytes:
# draw data points
ax.scatter(self.bkg['raw'].uTime, self.bkg['raw'].loc[:, a],
alpha=0.5, s=3, c=self.cmaps[a],
lw=0.5)
# draw STD boxes
for i, r in self.bkg['summary'].iterrows():
x = (r.loc['uTime', 'mean'] - r.loc['uTime', 'std'] * 2,
r.loc['uTime', 'mean'] + r.loc['uTime', 'std'] * 2)
yl = [r.loc[a, 'mean'] - r.loc[a, err]] * 2
yu = [r.loc[a, 'mean'] + r.loc[a, err]] * 2
ax.fill_between(x, yl, yu, alpha=0.8, lw=0.5, color=self.cmaps[a], zorder=1)
prog.update()
if yscale == 'log':
ax.set_yscale('log')
if ylim is not None:
ax.set_ylim(ylim)
else:
ax.set_ylim(ax.get_ylim() * np.array([1, 10])) # x10 to make sample names readable.
if 'calc' in self.bkg:
for a in analytes:
# draw confidence intervals of calculated
x = self.bkg['calc']['uTime']
y = self.bkg['calc'][a]['mean']
yl = self.bkg['calc'][a]['mean'] - self.bkg['calc'][a][err]
yu = self.bkg['calc'][a]['mean'] + self.bkg['calc'][a][err]
# trim values below zero if log scale=
if yscale == 'log':
yl[yl < ax.get_ylim()[0]] = ax.get_ylim()[0]
ax.plot(x, y,
c=self.cmaps[a], zorder=2, label=pretty_element(a))
ax.fill_between(x, yl, yu,
color=self.cmaps[a], alpha=0.3, zorder=-1)
else:
for a in analytes:
ax.plot([], [], c=self.cmaps[a], label=pretty_element(a))
ax.set_xlabel('Time (s)')
ax.set_ylabel('Background Counts')
ax.set_title('Points = raw data; Bars = {:s}; Lines = Calculated Background; Envelope = Background {:s}'.format(err, err),
fontsize=10)
ha, la = ax.get_legend_handles_labels()
ax.legend(labels=la[:len(analytes)], handles=ha[:len(analytes)], bbox_to_anchor=(1, 1))
# scale x axis to range ± 2.5%
xlim = [0, max([d.uTime[-1] for d in self.data.values()])]
ax.set_xlim(xlim)
# add sample labels
for s, d in self.data.items():
ax.axvline(d.uTime[0], alpha=0.2, color='k', zorder=-1)
ax.text(d.uTime[0], ax.get_ylim()[1], s, rotation=90,
va='top', ha='left', zorder=-1, fontsize=7)
if save:
fig.savefig(self.report_dir + '/background.png', dpi=200)
return fig, ax
# functions for calculating ratios
@_log
def ratio(self, internal_standard=None, analytes=None, focus_stage=None):
"""
Calculates the ratio of all analytes to a single analyte.
Parameters
----------
internal_standard : str
The name of the analyte to divide all other analytes
by.
Returns
-------
None
"""
if focus_stage is None:
focus_stage = self.focus_stage
if 'bkgsub' not in self.stages_complete:
raise RuntimeError('Cannot calculate ratios before background subtraction.')
analytes = self._analyte_checker(analytes, focus_stage=focus_stage)
if internal_standard is not None:
self.internal_standard = internal_standard
if self.internal_standard in self.analytes.union(self.analyte_ratios):
self.minimal_analytes.update([internal_standard])
self.calibration_analytes.update([internal_standard])
self.calibration_analytes.update(analytes)
else:
raise ValueError('The internal standard ({}) is not amongst the '.format(internal_standard) +
'analytes in\nyour data files. Please make sure it is specified correctly.')
# check internal_standard is valid
internal_standard = self._analyte_checker(self.internal_standard, focus_stage=focus_stage).pop()
with self.pbar.set(total=len(self.data), desc='Ratio Calculation') as prog:
for s in self.data.values():
s.ratio(internal_standard=internal_standard, analytes=analytes, focus_stage=focus_stage)
self.analyte_ratios.update(s.analyte_ratios)
self.cmaps.update(s.cmap)
prog.update()
if self.focus_stage not in ['ratios', 'calibrated', 'mass_fraction']:
self.stages_complete.update(['ratios'])
self.focus_stage = 'ratios'
return
def srm_load_database(self, srms_used=None, reload=False):
if not hasattr(self, 'srmdat') or reload:
# load SRM info
srmdat = srms.read_table(self.srmfile)
srmdat = srmdat.loc[srms_used]
srmdat.reset_index(inplace=True)
srmdat.set_index(['SRM', 'Item'], inplace=True)
# calculate ratios to internal_standard for calibration ratios
analyte_srm_link = {}
warns = {}
self.uncalibrated = set()
self._analytes_missing_from_srm = set()
# create an empty SRM table
srmtab = pd.DataFrame(index=srms_used, columns=pd.MultiIndex.from_product([self.analyte_ratios, ['mean', 'err']]))
for srm in srms_used:
srm_nocal = set()
srmsub = srmdat.loc[srm]
# determine analyte - Item pairs in table
ad = {}
for ar in self.analyte_ratios:
a_num, a_denom = ar.split('_') # separate numerator and denominator
for a in [a_num, a_denom]:
if a in ad.keys():
continue
# check if there's an exact match of form [Mass][Element] in srmdat
mna = analyte_2_massname(a)
if mna in srmsub.index:
ad[a] = mna
else:
# if not, match by element name.
item = srmsub.index[srmsub.index.str.contains(get_analyte_name(a))].values
if len(item) > 1:
item = item[item == get_analyte_name(a)]
if len(item) == 1:
ad[a] = item[0]
else:
if srm not in warns:
warns[srm] = []
warns[srm].append(a)
srm_nocal.update([ar])
analyte_srm_link[srm] = ad
# build calibration database for given ratios
for a in self.analyte_ratios.difference(srm_nocal):
a_num, a_denom = a.split('_')
# calculate SRM polyatom multiplier (multiplier to account for stoichiometry,
# e.g. if internal standard is Na, N will be 2 if measured in SRM as Na2O)
N_denom = float(decompose_molecule(ad[a_denom])[get_analyte_name(a_denom)])
N_num = float(decompose_molecule(ad[a_num])[get_analyte_name(a_num)])
# calculate molar ratio
srmtab.loc[srm, (a, 'mean')] = ((srmdat.loc[(srm, ad[a_num]), 'mol/g'] * N_num) /
(srmdat.loc[(srm, ad[a_denom]), 'mol/g'] * N_denom))
srmtab.loc[srm, (a, 'err')] = (srmtab.loc[srm, (a, 'mean')] *
((srmdat.loc[(srm, ad[a_num]), 'mol/g_err'] / (srmdat.loc[(srm, ad[a_num]), 'mol/g']))**2 +
(srmdat.loc[(srm, ad[a_denom]), 'mol/g_err'] / (srmdat.loc[(srm, ad[a_denom]), 'mol/g']))**2)**0.5)
# where uncertainties are missing, replace with zeros
srmtab[srmtab.loc[:, idx[:, 'err']].isnull()] = 0
# record outputs
self.srmdat = srmdat # the full SRM table
self._analyte_srmdat_link = analyte_srm_link # dict linking analyte names to rows in srmdat
self.srmtab = srmtab.astype(float) # a summary of relevant mol/mol values only
# record which analytes have missing CRM data
means = self.srmtab.loc[:, idx[:, 'mean']]
means.columns = means.columns.droplevel(1)
self._analytes_missing_srm = means.columns.values[means.isnull().any()] # analyte ratios missing from SRM table
self._srm_id_analyte_ratios = means.columns.values[~means.isnull().any()] # analyte ratioes identified
# self._calib_analyte_ratios = means.columns.values[~means.isnull().all()]
self.uncalibrated.intersection_update(srm_nocal)
self._analytes_missing_from_srm.update(srm_nocal)
# Print any warnings
if len(warns) > 0:
print('WARNING: Some analytes are not present in the SRM database for some standards:')
for srm, a in warns.items():
print(f' {srm}: ' + ', '.join(self.analytes_sorted(a, focus_stage='bkgsub')))
if len(self.uncalibrated) > 0:
self.analyte_ratios.difference_update(self.uncalibrated)
print('WARNING: Some analytes are not present in the SRM database for ANY standards:')
print(f'{self.uncalibrated} have been removed from further analysis.')
def srm_compile_measured(self, n_min=10, focus_stage='ratios'):
"""
Compile mean and standard errors of measured SRMs
Parameters
----------
n_min : int
The minimum number of points to consider as a valid measurement.
Default = 10.
"""
warns = []
# compile mean and standard errors of samples
for s in self.stds:
s_stdtab = pd.DataFrame(columns=pd.MultiIndex.from_product([s.analyte_ratios, ['err', 'mean']]))
s_stdtab.index.name = 'uTime'
if not s.n > 0:
s.stdtab = s_stdtab
continue
for n in range(1, s.n + 1):
ind = s.ns == n
if sum(ind) >= n_min:
for a in s.analyte_ratios:
aind = ind & ~np.isnan(nominal_values(s.data[focus_stage][a]))
s_stdtab.loc[np.nanmean(s.uTime[s.ns == n]),
(a, 'mean')] = np.nanmean(nominal_values(s.data[focus_stage][a][aind]))
s_stdtab.loc[np.nanmean(s.uTime[s.ns == n]),
(a, 'err')] = np.nanstd(nominal_values(s.data[focus_stage][a][aind])) / np.sqrt(sum(aind))
else:
warns.append(' Ablation {:} of SRM measurement {:} ({:} points)'.format(n, s.sample, sum(ind)))
# sort column multiindex
s_stdtab = s_stdtab.loc[:, s_stdtab.columns.sort_values()]
# sort row index
s_stdtab.sort_index(inplace=True)
# create 'SRM' column for naming SRM
s_stdtab.loc[:, 'STD'] = s.sample
s.stdtab = s_stdtab
if len(warns) > 0:
print('WARNING: Some SRM ablations have been excluded because they do not contain enough data:')
print('\n'.join(warns))
print("To *include* these ablations, reduce the value of n_min (currently {:})".format(n_min))
# compile them into a table
stdtab = pd.concat([s.stdtab for s in self.stds]).apply(pd.to_numeric, 1, errors='ignore')
stdtab = stdtab.reindex(self.analytes_sorted(self.analyte_ratios, focus_stage=focus_stage) + ['STD'], level=0, axis=1)
# identify groups of consecutive SRMs
ts = stdtab.index.values
start_times = [s.uTime[0] for s in self.data.values()]
lastpos = sum(ts[0] > start_times)
group = [1]
for t in ts[1:]:
pos = sum(t > start_times)
rpos = pos - lastpos
if rpos <= 1:
group.append(group[-1])
else:
group.append(group[-1] + 1)
lastpos = pos
stdtab.loc[:, 'group'] = group
# calculate centre time for the groups
stdtab.loc[:, 'gTime'] = np.nan
for g, d in stdtab.groupby('group'):
ind = stdtab.group == g
stdtab.loc[ind, 'gTime'] = stdtab.loc[ind].index.values.mean()
self.stdtab = stdtab
def srm_id_auto(self, srms_used=['NIST610', 'NIST612', 'NIST614'], analytes=None, n_min=10, reload_srm_database=False):
"""
Function for automarically identifying SRMs using KMeans clustering.
KMeans is performed on the log of SRM composition, which aids separation
of relatively similar SRMs within a large compositional range.
Parameters
----------
srms_used : iterable
Which SRMs have been used. Must match SRM names
in SRM database *exactly* (case sensitive!).
analytes : array_like
Which analyte ratios to base the identification on. If None,
all analyte ratios are used (default).
n_min : int
The minimum number of data points a SRM measurement
must contain to be included.
reload_srm_database : bool
Whether or not to re-load the SRM database before running the function.
"""
# TODO: srm_id_plot!
if isinstance(srms_used, str):
srms_used = [srms_used]
# reload SRM database (if reloard_srm_databse=True)
self.srm_load_database(srms_used, reload_srm_database)
analytes = self._analyte_checker(analytes)
analytes.difference_update(self._analytes_missing_srm)
analytes = list(analytes)
# get and scale mean srm values for all analytes
srmid = self.srmtab.loc[:, idx[analytes, 'mean']]
_srmid = scale(np.log(srmid))
srm_labels = srmid.index.values
# get and scale measured srm values for all analytes
stdid = self.stdtab.loc[:, idx[analytes, 'mean']]
_stdid = scale(np.log(stdid))
_stdid[np.isnan(_stdid)] = -12
# fit KMeans classifier to srm database
classifier = KMeans(len(srms_used)).fit(_srmid)
# apply classifier to measured data
std_classes = classifier.predict(_stdid)
# get srm names from classes
std_srm_labels = np.array([srm_labels[np.argwhere(classifier.labels_ == i)][0][0] for i in std_classes])
self.stdtab.loc[:, 'SRM'] = std_srm_labels
self._srm_key_dict = {k: v for k, v in zip(self.stdtab.STD, self.stdtab.SRM)}
self.srms_ided = True
self.srm_build_calib_table()
def srm_build_calib_table(self):
"""
Combine SRM database values and identified measured values into a calibration database.
"""
caltab = self.stdtab.reset_index()
caltab.set_index(['gTime', 'uTime'], inplace=True)
levels = ['meas_' + c if c != '' else c for c in caltab.columns.levels[1]]
caltab.columns.set_levels(levels, 1, inplace=True)
for a in self.analyte_ratios:
caltab.loc[:, (a, 'srm_mean')] = self.srmtab.loc[caltab.SRM, (a, 'mean')].values
caltab.loc[:, (a, 'srm_err')] = self.srmtab.loc[caltab.SRM, (a, 'err')].values
self.caltab = caltab.reindex(self.stdtab.columns.levels[0], axis=1, level=0)
def clear_calibration(self):
if self.srms_ided:
del self.stdtab
del self.srmdat
del self.srmtab
self.srms_ided = False
if 'calibrated' in self.stages_complete:
del self.calib_params
del self.calib_ps
self.stages_complete.remove('calibrated')
self.focus_stage = 'ratios'
self.set_focus('ratios')
# apply calibration to data
@_log
def calibrate(self, analytes=None, drift_correct=True,
srms_used=['NIST610', 'NIST612', 'NIST614'],
zero_intercept=True, n_min=10, reload_srm_database=False):
"""
Calibrates the data to measured SRM values.
Assumes that y intercept is zero.
Parameters
----------
analytes : str or iterable
Which analytes you'd like to calibrate. Defaults to all.
drift_correct : bool
Whether to pool all SRM measurements into a single calibration,
or vary the calibration through the run, interpolating
coefficients between measured SRMs.
srms_used : str or iterable
Which SRMs have been measured. Must match names given in
SRM data file *exactly*.
n_min : int
The minimum number of data points an SRM measurement
must have to be included.
Returns
-------
None
"""
# load SRM database
self.srm_load_database(srms_used, reload_srm_database)
# compile measured SRM data
self.srm_compile_measured(n_min)
analytes = self._analyte_checker(analytes)
if isinstance(srms_used, str):
srms_used = [srms_used]
if not hasattr(self, 'srmtabs'):
self.srm_id_auto(srms_used=srms_used, n_min=n_min, reload_srm_database=reload_srm_database)
# make container for calibration params
gTime = np.asanyarray(self.caltab.index.levels[0])
if not hasattr(self, 'calib_params'):
self.calib_params = pd.DataFrame(columns=pd.MultiIndex.from_product([analytes, ['m']]),
index=gTime)
if zero_intercept:
fn = lambda x, m: x * m
else:
fn = lambda x, m, c: x * m + c
for a in analytes:
if zero_intercept:
if (a, 'c') in self.calib_params:
self.calib_params.drop((a, 'c'), 1, inplace=True)
else:
self.calib_params.loc[:, (a, 'c')] = 0
self.calib_params.loc[:, (a, 'c')] = self.calib_params[(a, 'c')].astype(object, copy=False) # set new column to objet type
if drift_correct:
for g in gTime:
if self.caltab.loc[g].size == 0:
continue
meas = self.caltab.loc[g, (a, 'meas_mean')].values
srm = self.caltab.loc[g, (a, 'srm_mean')].values
viable = ~np.isnan(meas + srm) # remove any nan values
meas = meas[viable]
srm = srm[viable]
meas_err = self.caltab.loc[g, (a, 'meas_err')].values[viable]
srm_err = self.caltab.loc[g, (a, 'srm_err')].values[viable]
# TODO: replace curve_fit with Sambridge's 2D likelihood function for better uncertainty incorporation?
sigma = np.sqrt(meas_err**2 + srm_err**2)
if len(meas) > 1:
# multiple SRMs - do a regression
p, cov = curve_fit(fn, meas, srm, sigma=sigma)
pe = unc.correlated_values(p, cov)
self.calib_params.loc[g, (a, 'm')] = pe[0]
if not zero_intercept:
self.calib_params.loc[g, (a, 'c')] = pe[1]
else:
# deal with case where there's only one datum
self.calib_params.loc[g, (a, 'm')] = (un.uarray(srm, srm_err) /
un.uarray(meas, meas_err))[0]
if not zero_intercept:
self.calib_params.loc[g, (a, 'c')] = 0
else:
meas = self.caltab.loc[:, (a, 'meas_mean')].values
srm = self.caltab.loc[:, (a, 'srm_mean')].values
viable = ~np.isnan(meas + srm) # remove any nan values
meas = meas[viable]
srm = srm[viable]
meas_err = self.caltab.loc[:, (a, 'meas_err')].values[viable]
srm_err = self.caltab.loc[:, (a, 'srm_err')].values[viable]
# TODO: replace curve_fit with Sambridge's 2D likelihood function for better uncertainty incorporation?
sigma = np.sqrt(meas_err**2 + srm_err**2)
if sum(viable) > 1:
p, cov = curve_fit(fn, meas, srm, sigma=sigma)
pe = unc.correlated_values(p, cov)
self.calib_params.loc[:, (a, 'm')] = pe[0]
if not zero_intercept:
self.calib_params.loc[:, (a, 'c')] = pe[1]
else:
self.calib_params.loc[:, (a, 'm')] = (un.uarray(srm, srm_err) /
un.uarray(meas, meas_err))[0]
if not zero_intercept:
self.calib_params.loc[:, (a, 'c')] = 0
if self.calib_params.index.min() == 0:
self.calib_params.drop(0, inplace=True)
self.calib_params.drop(self.calib_params.index.max(), inplace=True)
self.calib_params.loc[0, :] = self.calib_params.loc[self.calib_params.index.min(), :]
maxuT = np.max([d.uTime.max() for d in self.data.values()]) # calculate max uTime
self.calib_params.loc[maxuT, :] = self.calib_params.loc[self.calib_params.index.max(), :]
# sort indices for slice access
self.calib_params.sort_index(1, inplace=True)
self.calib_params.sort_index(0, inplace=True)
# calculcate interpolators for applying calibrations
self.calib_ps = Bunch()
for a in analytes:
# TODO: revisit un_interp1d to see whether it plays well with correlated values.
# Possible re-write to deal with covariance matrices?
self.calib_ps[a] = {'m': un_interp1d(self.calib_params.index.values,
self.calib_params.loc[:, (a, 'm')].values)}
if not zero_intercept:
self.calib_ps[a]['c'] = un_interp1d(self.calib_params.index.values,
self.calib_params.loc[:, (a, 'c')].values)
with self.pbar.set(total=len(self.data), desc='Applying Calibrations') as prog:
for d in self.data.values():
d.calibrate(self.calib_ps, analytes)
d.uncalibrated = self.uncalibrated
prog.update()
# record SRMs used for plotting
markers = 'osDsv<>PX' # for future implementation of SRM-specific markers.
if not hasattr(self, 'srms_used'):
self.srms_used = set(srms_used)
else:
self.srms_used.update(srms_used)
self.srm_mdict = {k: markers[i] for i, k in enumerate(self.srms_used)}
self.stages_complete.update(['calibrated'])
self.focus_stage = 'calibrated'
return
# data filtering
# TODO: Re-factor filtering to use 'classifier' objects?
# functions for calculating mass fraction (ppm)
def get_sample_list(self, save_as=None, overwrite=False):
"""
Save a csv list of of all samples to be populated with internal standard concentrations.
Parameters
----------
save_as : str
Location to save the file. Defaults to the export directory.
"""
if save_as is None:
save_as = self._file_internal_standard_massfrac
else:
self._file_internal_standard_massfrac = save_as
if os.path.exists(save_as):
if not overwrite:
raise IOError(f'File {save_as} exists. Please change the save location or specify overwrite=True')
empty = pd.DataFrame(index=self.samples, columns=['int_stand_massfrac'])
empty.to_csv(save_as)
print(self._wrap_text(f'Sample List saved to {save_as} \nPlease modify and re-import using read_internal_standard_concs()'))
def read_internal_standard_concs(self, sample_conc_file=None):
"""
Load in a per-sample list of internal sample concentrations.
Parameters
----------
sample_conc_file : str
Path to csv file containing internal standard mass fractions.
Must contain the sample names in the first column, column names
in the first row, and contain a column called 'int_stand_massfrac'.
If in doubt, use the `get_sample_list` function to generate a
blank template for your samples.
"""
if sample_conc_file is None:
sample_conc_file = self._file_internal_standard_massfrac
else:
self._file_internal_standard_massfrac = sample_conc_file
self.internal_standard_concs = pd.read_csv(sample_conc_file, index_col=0)
return self.internal_standard_concs
@_log
def calculate_mass_fraction(self, internal_standard_concs=None, analytes=None, analyte_masses=None):
"""
Convert calibrated molar ratios to mass fraction.
Parameters
----------
internal_standard_concs : float or str
The concentration of the internal standard in your samples.
If a string, should be the file name pointing towards the
[completed] output of get_sample_list().
analytes : str of array_like
The analytes you want to calculate.
analyte_masses : dict
A dict containing the masses to use for each analyte.
If None and the analyte names contain a number, that number
is used as the mass. If None and the analyte names do *not*
contain a number, the average mass for the element is used.
"""
analytes = self._analyte_checker(analytes, focus_stage='calibrated')
if analyte_masses is None:
analyte_masses = analyte_mass(self.analytes, False)
if isinstance(internal_standard_concs, str):
self.internal_standard_concs = self.read_internal_standard_concs(sample_conc_file=internal_standard_concs)
elif isinstance(internal_standard_concs, float):
self.internal_standard_concs = internal_standard_concs
elif not isinstance(self.internal_standard_concs, pd.DataFrame):
self.internal_standard_concs = self.read_internal_standard_concs()
isc = self.internal_standard_concs
if not isinstance(isc, pd.core.frame.DataFrame):
with self.pbar.set(total=len(self.data), desc='Calculating Mass Fractions') as prog:
for d in self.data.values():
d.calc_mass_fraction(isc, analytes, analyte_masses)
prog.update()
else:
with self.pbar.set(total=len(self.data), desc='Calculating Mass Fractions') as prog:
for k, d in self.data.items():
if k in isc.index:
d.calc_mass_fraction(isc.loc[k, 'int_stand_massfrac'], analytes, analyte_masses)
else:
d.calc_mass_fraction(np.nan, analytes, analyte_masses)
prog.update()
self.stages_complete.update(['mass_fraction'])
self.focus_stage = 'mass_fraction'
@_log
def clear_subsets(self):
"""
Clears all subsets
"""
self._has_subsets = False
self._subset_names = []
self.subsets = Bunch()
self.subsets['All_Analyses'] = self.samples
self.subsets[self.srm_identifier] = [s for s in self.samples if self.srm_identifier in s]
self.subsets['All_Samples'] = [s for s in self.samples if self.srm_identifier not in s]
self.subsets['not_in_set'] = self.subsets['All_Samples'].copy()
@_log
def make_subset(self, samples=None, name=None, force=False, silent=False):
"""
Creates a subset of samples, which can be treated independently.
Parameters
----------
samples : str or array_like
Name of sample, or list of sample names.
name : (optional) str or number
The name of the sample group. Defaults to n + 1, where n is
the highest existing group number
force : bool
If there is an existing subset that contains the same samples,
a new set is not created unles `force=True`. Default is False.
"""
if isinstance(samples, str):
samples = [samples]
# Check if a subset containing the same samples already exists.
already_present = False
existing_name = ''
for k, v in self.subsets.items():
if set(v) == set(samples) and k != 'not_in_set':
already_present = True
existing_name = k
if already_present:
if not silent:
print('***NOPE***')
print(self._wrap_text(
f"A subset containing those samples already exists, and is called '{existing_name}'. A new subset has not been created. I suggest you use the existing one. If you'd like to go ahead anyway, set `force=True` to make a new subset with your provided name."
))
if not force:
return
not_exists = [s for s in samples if s not in self.subsets['All_Analyses']]
if len(not_exists) > 0:
raise ValueError(', '.join(not_exists) + ' not in the list of sample names.\nPlease check your sample names.\nNote: Sample names are stored in the .samples attribute of your analysis.')
if name is None:
name = max([-1] + [x for x in self.subsets.keys() if isinstance(x, int)]) + 1
self._subset_names.append(name)
if samples is not None:
self.subsets[name] = samples
for s in samples:
try:
self.subsets['not_in_set'].remove(s)
except ValueError:
pass
self._has_subsets = True
# for subset in np.unique(list(self.subsets.values())):
# self.subsets[subset] = sorted([k for k, v in self.subsets.items() if str(v) == subset])
if not silent:
print(f'Subset created called {name}.')
return name
@_log
def zeroscreen(self, focus_stage=None):
"""
Remove all points containing data below zero (which are impossible!)
"""
if focus_stage is None:
focus_stage = self.focus_stage
for s in self.data.values():
ind = np.ones(len(s.Time), dtype=bool)
for v in s.data[focus_stage].values():
ind = ind & (nominal_values(v) > 0)
for k in s.data[focus_stage].keys():
s.data[focus_stage][k][~ind] = unc.ufloat(np.nan, np.nan)
self.set_focus(focus_stage)
return
@_log
def filter_threshold(self, analyte, threshold,
samples=None, subset=None):
"""
Applies a threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
threshold : float
The threshold value.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analyte = self._analyte_checker(analyte, single=True)
self.minimal_analytes.update([analyte])
with self.pbar.set(total=len(samples), desc='Threshold Filter') as prog:
for s in samples:
self.data[s].filter_threshold(analyte, threshold)
prog.update()
@_log
def filter_threshold_percentile(self, analyte, percentiles, level='population', filt=False,
samples=None, subset=None):
"""
Applies a threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
percentiles : float or iterable of len=2
The percentile values.
level : str
Whether to calculate percentiles from the entire dataset
('population') or for each individual sample ('individual')
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
params = locals()
del(params['self'])
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analyte = self._analyte_checker(analyte, single=True)
self.minimal_analytes.update([analyte])
if isinstance(percentiles, (int, float)):
percentiles = [percentiles]
if level == 'population':
# Get all samples
self.get_focus(filt=filt, subset=subset, nominal=True)
dat = self.focus[analyte][~np.isnan(self.focus[analyte])]
# calculate filter limits
lims = np.percentile(dat, percentiles)
# Calculate filter for individual samples
with self.pbar.set(total=len(samples), desc='Percentile theshold filter') as prog:
for s in samples:
d = self.data[s]
setn = d.filt.maxset + 1
g = d.focus[analyte]
if level == 'individual':
gt = nominal_values(g)
lims = np.percentile(gt[~np.isnan(gt)], percentiles)
if len(lims) == 1:
above = g >= lims[0]
below = g < lims[0]
d.filt.add(analyte + '_{:.1f}-pcnt_below'.format(percentiles[0]),
below,
'Values below {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
d.filt.add(analyte + '_{:.1f}-pcnt_above'.format(percentiles[0]),
above,
'Values above {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
elif len(lims) == 2:
inside = (g >= min(lims)) & (g <= max(lims))
outside = (g < min(lims)) | (g > max(lims))
lpc = '-'.join(['{:.1f}'.format(p) for p in percentiles])
d.filt.add(analyte + '_' + lpc + '-pcnt_inside',
inside,
'Values between ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
d.filt.add(analyte + '_' + lpc + '-pcnt_outside',
outside,
'Values outside ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
prog.update()
return
@_log
def filter_gradient_threshold(self, analyte, threshold, win=15,
recalc=True, win_mode='mid', win_exclude_outside=True, absolute_gradient=True,
samples=None, subset=None):
"""
Calculate a gradient threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
win : int
The window over which to calculate the moving gradient
threshold : float
The threshold value.
recalc : bool
Whether or not to re-calculate the gradients.
win_mode : str
Whether the rolling window should be centered on the left, middle or centre
of the returned value. Can be 'left', 'mid' or 'right'.
win_exclude_outside : bool
If True, regions at the start and end where the gradient cannot be calculated
(depending on win_mode setting) will be excluded by the filter.
absolute_gradient : bool
If True, the filter is applied to the absolute gradient (i.e. always positive),
allowing the selection of 'flat' vs 'steep' regions regardless of slope direction.
If Falose, the sign of the gradient matters, allowing the selection of positive or
negative slopes only.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analyte = self._analyte_checker(analyte, single=True)
self.minimal_analytes.update([analyte])
with self.pbar.set(total=len(samples), desc='Gradient Threshold Filter') as prog:
for s in samples:
self.data[s].filter_gradient_threshold(analyte=analyte, win=win, threshold=threshold, recalc=recalc,
win_mode=win_mode, win_exclude_outside=win_exclude_outside,
absolute_gradient=absolute_gradient)
prog.update()
@_log
def filter_gradient_threshold_percentile(self, analyte, percentiles, level='population', win=15, filt=False,
samples=None, subset=None):
"""
Calculate a gradient threshold filter to the data.
Generates two filters above and below the threshold value for a
given analyte.
Parameters
----------
analyte : str
The analyte that the filter applies to.
win : int
The window over which to calculate the moving gradient
percentiles : float or iterable of len=2
The percentile values.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
params = locals()
del(params['self'])
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analyte = self._analyte_checker(analyte, single=True)
self.minimal_analytes.update([analyte])
# Calculate gradients of all samples
self.get_gradients(analytes=[analyte], win=win, filt=filt, subset=subset)
grad = self.gradients[analyte][~np.isnan(self.gradients[analyte])]
if isinstance(percentiles, (int, float)):
percentiles = [percentiles]
if level == 'population':
# calculate filter limits
lims = np.percentile(grad, percentiles)
# Calculate filter for individual samples
with self.pbar.set(total=len(samples), desc='Percentile Threshold Filter') as prog:
for s in samples:
d = self.data[s]
setn = d.filt.maxset + 1
g = calc_grads(d.Time, d.focus, [analyte], win)[analyte]
if level == 'individual':
gt = nominal_values(g)
lims = np.percentile(gt[~np.isnan(gt)], percentiles)
if len(lims) == 1:
above = g >= lims[0]
below = g < lims[0]
d.filt.add(analyte + '_{:.1f}-grd-pcnt_below'.format(percentiles[0]),
below,
'Gradients below {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
d.filt.add(analyte + '_{:.1f}-grd-pcnt_above'.format(percentiles[0]),
above,
'Gradients above {:.1f}th {:} percentile ({:.2e})'.format(percentiles[0], analyte, lims[0]),
params, setn=setn)
elif len(lims) == 2:
inside = (g >= min(lims)) & (g <= max(lims))
outside = (g < min(lims)) | (g > max(lims))
lpc = '-'.join(['{:.1f}'.format(p) for p in percentiles])
d.filt.add(analyte + '_' + lpc + '-grd-pcnt_inside',
inside,
'Gradients between ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
d.filt.add(analyte + '_' + lpc + '-grd-pcnt_outside',
outside,
'Gradients outside ' + lpc + ' ' + analyte + 'percentiles',
params, setn=setn)
prog.update()
return
@_log
def filter_clustering(self, analytes, filt=False, normalise=True,
method='kmeans', include_time=False, samples=None,
sort=True, subset=None, level='sample', min_data=10, **kwargs):
"""
Applies an n - dimensional clustering filter to the data.
Parameters
----------
analytes : str
The analyte(s) that the filter applies to.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
normalise : bool
Whether or not to normalise the data to zero mean and unit
variance. Reccomended if clustering based on more than 1 analyte.
Uses `sklearn.preprocessing.scale`.
method : str
Which clustering algorithm to use:
* 'meanshift': The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
* 'kmeans': The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
level : str
Whether to conduct the clustering analysis at the 'sample' or
'population' level.
include_time : bool
Whether or not to include the Time variable in the clustering
analysis. Useful if you're looking for spatially continuous
clusters in your data, i.e. this will identify each spot in your
analysis as an individual cluster.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
sort : bool
Whether or not you want the cluster labels to
be sorted by the mean magnitude of the signals
they are based on (0 = lowest)
min_data : int
The minimum number of data points that should be considered by
the filter. Default = 10.
**kwargs
Parameters passed to the clustering algorithm specified by
`method`.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K-Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, focus_stage=self.focus_stage)
samples = self._get_samples(subset)
analytes = self.analytes_sorted(analytes)
self.minimal_analytes.update(analytes)
if level == 'sample':
with self.pbar.set(total=len(samples), desc='Clustering Filter') as prog:
for s in samples:
self.data[s].filter_clustering(analytes=analytes, filt=filt,
normalise=normalise,
method=method,
include_time=include_time,
min_data=min_data,
sort=sort,
**kwargs)
prog.update()
if level == 'population':
if isinstance(sort, bool):
sort_by = 0
else:
sort_by = sort
name = '_'.join(analytes) + '_{}'.format(method)
self.fit_classifier(name=name, analytes=analytes, method=method,
subset=subset, filt=filt, sort_by=sort_by, **kwargs)
self.apply_classifier(name=name, subset=subset)
@_log
def fit_classifier(self, name, analytes, method, samples=None,
subset=None, filt=True, sort_by=0, **kwargs):
"""
Create a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier.
analytes : str or iterable
Which analytes the clustering algorithm should consider.
method : str
Which clustering algorithm to use. Can be:
'meanshift'
The `sklearn.cluster.MeanShift` algorithm.
Automatically determines number of clusters
in data based on the `bandwidth` of expected
variation.
'kmeans'
The `sklearn.cluster.KMeans` algorithm. Determines
the characteristics of a known number of clusters
within the data. Must provide `n_clusters` to specify
the expected number of clusters.
samples : iterable
list of samples to consider. Overrides 'subset'.
subset : str
The subset of samples used to fit the classifier. Ignored if
'samples' is specified.
sort_by : int
Which analyte the resulting clusters should be sorted
by - defaults to 0, which is the first analyte.
**kwargs :
method-specific keyword parameters - see below.
Meanshift Parameters
bandwidth : str or float
The bandwith (float) or bandwidth method ('scott' or 'silverman')
used to estimate the data bandwidth.
bin_seeding : bool
Modifies the behaviour of the meanshift algorithm. Refer to
sklearn.cluster.meanshift documentation.
K - Means Parameters
n_clusters : int
The number of clusters expected in the data.
Returns
-------
name : str
"""
# isolate data
if samples is not None:
subset = self.make_subset(samples, silent=True)
analytes = self.analytes_sorted(analytes, focus_stage=self.focus_stage)
self.minimal_analytes.update(analytes)
self.get_focus(subset=subset, filt=filt)
# create classifer
c = classifier(analytes,
sort_by)
# fit classifier
c.fit(data=self.focus,
method=method,
**kwargs)
self.classifiers[name] = c
return name
@_log
def apply_classifier(self, name, samples=None, subset=None):
"""
Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
c = self.classifiers[name]
labs = c.classifier.ulabels_
with self.pbar.set(total=len(samples), desc='Applying ' + name + ' classifier') as prog:
for s in samples:
d = self.data[s]
try:
f = c.predict(d.focus)
except ValueError:
# in case there's no data
f = np.array([-2] * len(d.Time))
for l in labs:
ind = f == l
d.filt.add(name=name + '_{:.0f}'.format(l),
filt=ind,
info=name + ' ' + c.method + ' classifier',
params=(c.analytes, c.method))
prog.update()
return name
@_log
def filter_correlation(self, x_analyte, y_analyte, window=None,
r_threshold=0.9, p_threshold=0.05, filt=True,
samples=None, subset=None):
"""
Applies a correlation filter to the data.
Calculates a rolling correlation between every `window` points of
two analytes, and excludes data where their Pearson's R value is
above `r_threshold` and statistically significant.
Data will be excluded where their absolute R value is greater than
`r_threshold` AND the p - value associated with the correlation is
less than `p_threshold`. i.e. only correlations that are statistically
significant are considered.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
r_threshold : float
The correlation index above which to exclude data.
Note: the absolute pearson R value is considered, so
negative correlations below -`r_threshold` will also
be excluded.
p_threshold : float
The significant level below which data are excluded.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
x_analyte = self._analyte_checker(x_analyte, single=True)
y_analyte = self._analyte_checker(y_analyte, single=True)
self.minimal_analytes.update([x_analyte, y_analyte])
with self.pbar.set(total=len(samples), desc='Correlation Filter') as prog:
for s in samples:
self.data[s].filter_correlation(x_analyte, y_analyte,
window=window,
r_threshold=r_threshold,
p_threshold=p_threshold,
filt=filt)
prog.update()
@_log
def correlation_plots(self, x_analyte, y_analyte, window=15, filt=True, recalc=False, samples=None, subset=None, outdir=None):
"""
Plot the local correlation between two analytes.
Parameters
----------
x_analyte, y_analyte : str
The names of the x and y analytes to correlate.
window : int, None
The rolling window used when calculating the correlation.
filt : bool
Whether or not to apply existing filters to the data before
calculating this filter.
recalc : bool
If True, the correlation is re-calculated, even if it is already present.
Returns
-------
None
"""
if outdir is None:
outdir = self.report_dir + '/correlations/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
x_analyte = self._analyte_checker(x_analyte, single=True)
y_analyte = self._analyte_checker(y_analyte, single=True)
if subset is not None:
samples = self._get_samples(subset)
elif samples is None:
samples = self.subsets['All_Analyses']
elif isinstance(samples, str):
samples = [samples]
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
f, _ = self.data[s].correlation_plot(x_analyte=x_analyte, y_analyte=y_analyte,
window=window, filt=filt, recalc=recalc)
f.savefig('{}/{}_{}-{}.pdf'.format(outdir, s, x_analyte, y_analyte))
plt.close(f)
prog.update()
return
@_log
def filter_on(self, filt=None, analyte=None, samples=None, subset=None, show_status=False):
"""
Turns data filters on for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analyte = self._analyte_checker(analyte)
for s in samples:
try:
self.data[s].filt.on(analyte, filt)
except:
warnings.warn("filt.on failure in sample " + s)
if show_status:
self.filter_status(subset=subset)
return
@_log
def filter_off(self, filt=None, analyte=None, samples=None, subset=None, show_status=False):
"""
Turns data filters off for particular analytes and samples.
Parameters
----------
filt : optional, str or array_like
Name, partial name or list of names of filters. Supports
partial matching. i.e. if 'cluster' is specified, all
filters with 'cluster' in the name are activated.
Defaults to all filters.
analyte : optional, str or array_like
Name or list of names of analytes. Defaults to all analytes.
samples : optional, array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analyte = self._analyte_checker(analyte)
for s in samples:
try:
self.data[s].filt.off(analyte, filt)
except:
warnings.warn("filt.off failure in sample " + s)
if show_status:
self.filter_status(subset=subset)
return
def filter_status(self, sample=None, subset=None, stds=False):
"""
Prints the current status of filters for specified samples.
Parameters
----------
sample : str
Which sample to print.
subset : str
Specify a subset
stds : bool
Whether or not to include standards.
"""
if sample is None and subset is None:
if not self._has_subsets:
return self.data[self.subsets['All_Samples'][0]].filt.filter_table
else:
fdfs = {}
for n in sorted(str(sn) for sn in self._subset_names):
if n in self.subsets:
pass
elif int(n) in self.subsets:
n = int(n)
pass
subset_name = str(n)
fdfs[subset_name] = self.data[self.subsets[n][0]].filt.filter_table
if len(self.subsets['not_in_set']) > 0:
fdfs['Not in Subset'] = self.data[self.subsets['not_in_set'][0]].filt.filter_table
return pd.concat(fdfs, names=['subset'])
elif sample is not None:
fdfs = {}
fdfs[sample] = self.data[sample].filt.filter_table
return pd.concat(fdfs, names=['sample'])
elif subset is not None:
if isinstance(subset, (str, int, float)):
subset = [subset]
fdfs = {}
for n in subset:
subset_name = str(n)
fdfs[subset_name] = self.data[self.subsets[n][0]].filt.filter_table
return pd.concat(fdfs, names=['subset'])
@_log
def filter_clear(self, samples=None, subset=None):
"""
Clears (deletes) all data filters.
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
for s in samples:
self.data[s].filt.clear()
@_log
def filter_defragment(self, threshold, mode='include', filt=True, samples=None, subset=None):
"""
Remove 'fragments' from the calculated filter
Parameters
----------
threshold : int
Contiguous data regions that contain this number
or fewer points are considered 'fragments'
mode : str
Specifies wither to 'include' or 'exclude' the identified
fragments.
filt : bool or filt string
Which filter to apply the defragmenter to. Defaults to True
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
for s in samples:
f = self.data[s].filt.grab_filt(filt)
self.data[s].filt.add(name='defrag_{:s}_{:.0f}'.format(mode, threshold),
filt=filters.defrag(f, threshold, mode),
info='Defrag {:s} filter with threshold {:.0f}'.format(mode, threshold),
params=(threshold, mode, filt, samples, subset))
@_log
def filter_exclude_downhole(self, threshold, filt=True, samples=None, subset=None):
"""
Exclude all points down-hole (after) the first excluded data.
Parameters
----------
threhold : int
The minimum number of contiguous excluded data points
that must exist before downhole exclusion occurs.
file : valid filter string or bool
Which filter to consider. If True, applies to currently active
filters.
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
for s in samples:
self.data[s].filter_exclude_downhole(threshold, filt)
@_log
def filter_trim(self, start=1, end=1, filt=True, samples=None, subset=None):
"""
Remove points from the start and end of filter regions.
Parameters
----------
start, end : int
The number of points to remove from the start and end of
the specified filter.
filt : valid filter string or bool
Which filter to trim. If True, applies to currently active
filters.
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
for s in samples:
self.data[s].filter_trim(start, end, filt)
def filter_nremoved(self, filt=True, quiet=False):
"""
Report how many data are removed by the active filters.
"""
rminfo = {}
for n in self.subsets['All_Samples']:
s = self.data[n]
rminfo[n] = s.filt_nremoved(filt)
if not quiet:
maxL = max([len(s) for s in rminfo.keys()])
print('{string:{number}s}'.format(string='Sample ', number=maxL + 3) +
'{total:4s}'.format(total='tot') +
'{removed:4s}'.format(removed='flt') +
'{percent:4s}'.format(percent='%rm'))
for k, (ntot, nfilt, pcrm) in rminfo.items():
print('{string:{number}s}'.format(string=k, number=maxL + 3) +
'{total:4.0f}'.format(total=ntot) +
'{removed:4.0f}'.format(removed=nfilt) +
'{percent:4.0f}'.format(percent=pcrm))
return rminfo
@_log
def optimise_signal(self, analytes, min_points=5,
threshold_mode='kde_first_max',
threshold_mult=1., x_bias=0, filt=True,
weights=None, mode='minimise',
samples=None, subset=None):
"""
Optimise data selection based on specified analytes.
Identifies the longest possible contiguous data region in
the signal where the relative standard deviation (std) and
concentration of all analytes is minimised.
Optimisation is performed via a grid search of all possible
contiguous data regions. For each region, the mean std and
mean scaled analyte concentration ('amplitude') are calculated.
The size and position of the optimal data region are identified
using threshold std and amplitude values. Thresholds are derived
from all calculated stds and amplitudes using the method specified
by `threshold_mode`. For example, using the 'kde_max' method, a
probability density function (PDF) is calculated for std and
amplitude values, and the threshold is set as the maximum of the
PDF. These thresholds are then used to identify the size and position
of the longest contiguous region where the std is below the threshold,
and the amplitude is either below the threshold.
All possible regions of the data that have at least
`min_points` are considered.
For a graphical demonstration of the action of signal_optimiser,
use `optimisation_plot`.
Parameters
----------
d : latools.D object
An latools data object.
analytes : str or array_like
Which analytes to consider.
min_points : int
The minimum number of contiguous points to
consider.
threshold_mode : str
The method used to calculate the optimisation
thresholds. Can be 'mean', 'median', 'kde_max'
or 'bayes_mvs', or a custom function. If a
function, must take a 1D array, and return a
single, real number.
weights : array_like of length len(analytes)
An array of numbers specifying the importance of
each analyte considered. Larger number makes the
analyte have a greater effect on the optimisation.
Default is None.
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
analytes = self._analyte_checker(analytes)
self.minimal_analytes.update(analytes)
errs = []
with self.pbar.set(total=len(samples), desc='Optimising Data selection') as prog:
for s in samples:
e = self.data[s].signal_optimiser(analytes=analytes, min_points=min_points,
threshold_mode=threshold_mode, threshold_mult=threshold_mult,
x_bias=x_bias, weights=weights, filt=filt, mode=mode)
if e != '':
errs.append(e)
prog.update()
if len(errs) > 0:
print('\nA Few Problems:\n' + '\n'.join(errs) + '\n\n *** Check Optimisation Plots ***')
@_log
def optimisation_plots(self, overlay_alpha=0.5, samples=None, subset=None, **kwargs):
"""
Plot the result of signal_optimise.
`signal_optimiser` must be run first, and the output
stored in the `opt` attribute of the latools.D object.
Parameters
----------
d : latools.D object
A latools data object.
overlay_alpha : float
The opacity of the threshold overlays. Between 0 and 1.
**kwargs
Passed to `trace_plot`
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
outdir=self.report_dir + '/optimisation_plots/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
figs = self.data[s].optimisation_plot(overlay_alpha, **kwargs)
n = 1
for f, _ in figs:
if f is not None:
f.savefig(os.path.join(outdir, s + '_optim_{:.0f}.pdf'.format(n)))
plt.close(f)
n += 1
prog.update()
return
# plot calibrations
@_log
def calibration_plot(self, analyte_ratios=None, datarange=True, loglog=False, ncol=3, srm_group=None, percentile_data_cutoff=85, save=True):
return plot.calibration_plot(self=self, analyte_ratios=analyte_ratios, datarange=datarange,
loglog=loglog, ncol=ncol, srm_group=srm_group,
percentile_data_cutoff=percentile_data_cutoff, save=save)
# set the focus attribute for specified samples
@_log
def set_focus(self, focus_stage=None, samples=None, subset=None):
"""
Set the 'focus' attribute of the data file.
The 'focus' attribute of the object points towards data from a
particular stage of analysis. It is used to identify the 'working
stage' of the data. Processing functions operate on the 'focus'
stage, so if steps are done out of sequence, things will break.
Names of analysis stages:
* 'rawdata': raw data, loaded from csv file when object
is initialised.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data,
padded with np.nan. Created by self.separate, after
signal and background regions have been identified by
self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by
self.calibrate.
Parameters
----------
focus : str
The name of the analysis stage desired.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
if subset is None:
subset = 'All_Analyses'
samples = self._get_samples(subset)
if focus_stage is None:
focus_stage = self.focus_stage
else:
self.focus_stage = focus_stage
for s in samples:
self.data[s].setfocus(focus_stage)
# fetch all the data from the data objects
def get_focus(self, filt=False, samples=None, subset=None, nominal=False):
"""
Collect all data from all samples into a single array.
Data from standards is not collected.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
samples : str or list
which samples to get
subset : str or int
which subset to get
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
focus = {'uTime': []}
columns = self._analyte_checker()
focus.update({a: [] for a in columns})
for sa in samples:
s = self.data[sa]
focus['uTime'].append(s.uTime)
for a in columns:
tmp = s.focus[a].copy()
if s.filt is not None:
ind = s.filt.grab_filt(filt, a)
tmp[~ind] = np.nan
focus[a].append(tmp)
if nominal:
self.focus.update({k: nominal_values(np.concatenate(v)) for k, v, in focus.items()})
else:
self.focus.update({k: np.concatenate(v) for k, v, in focus.items()})
# remove old columns
for k in list(self.focus.keys()):
if k not in columns:
self.focus.pop(k)
return
# fetch all the gradients from the data objects
def get_gradients(self, analytes=None, win=15, filt=False, samples=None, subset=None, recalc=True):
"""
Collect all data from all samples into a single array.
Data from standards is not collected.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
samples : str or list
which samples to get
subset : str or int
which subset to get
Returns
-------
None
"""
analytes = self._analyte_checker(analytes)
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
# check if gradients already calculated
if all([self.data[s].grads_calced for s in samples]) and hasattr(self, 'gradients'):
if not recalc:
print("Using existing gradients. Set recalc=True to re-calculate.")
return
if not hasattr(self, 'gradients'):
self.gradients = Bunch()
# t = 0
focus = {'uTime': []}
focus.update({a: [] for a in analytes})
with self.pbar.set(total=len(samples), desc='Calculating Gradients') as prog:
for sa in samples:
s = self.data[sa]
focus['uTime'].append(s.uTime)
ind = s.filt.grab_filt(filt)
grads = calc_grads(s.uTime, s.focus, keys=analytes, win=win)
for a in analytes:
tmp = grads[a]
tmp[~ind] = np.nan
focus[a].append(tmp)
s.grads = tmp
s.grads_calced = True
prog.update()
self.gradients.update({k: np.concatenate(v) for k, v, in focus.items()})
return
def gradient_histogram(self, analytes=None, win=15, filt=False, bins=None, samples=None, subset=None, recalc=True, ncol=4):
"""
Plot a histogram of the gradients in all samples.
Parameters
----------
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
bins : None or array_like
The bins to use in the histogram
samples : str or list
which samples to get
subset : str or int
which subset to get
recalc : bool
Whether to re-calculate the gradients, or use existing gradients.
Returns
-------
fig, ax
"""
analytes = self._analyte_checker(analytes)
if not hasattr(self, 'gradients'):
self.gradients = Bunch()
ncol = int(ncol)
n = len(analytes)
nrow = plot.calc_nrow(n, ncol)
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
self.get_gradients(analytes=analytes, win=win, filt=filt, subset=subset, recalc=recalc)
fig, axs = plt.subplots(nrow, ncol, figsize=[3. * ncol, 2.5 * nrow])
if not isinstance(axs, np.ndarray):
axs = [axs]
i = 0
for a, ax in zip(analytes, axs.flatten()):
d = nominal_values(self.gradients[a])
d = d[~np.isnan(d)]
m, u = unitpicker(d, focus_stage=self.focus_stage, denominator=self.internal_standard)
if bins is None:
ibins = np.linspace(*np.percentile(d * m, [1, 99]), 50)
else:
ibins = bins
ax.hist(d * m, bins=ibins, color=self.cmaps[a])
ax.axvline(0, ls='dashed', lw=1, c=(0,0,0,0.7))
ax.set_title(a, loc='left')
if ax.is_first_col():
ax.set_ylabel('N')
ax.set_xlabel(u + '/s')
i += 1
if i < ncol * nrow:
for ax in axs.flatten()[i:]:
ax.set_visible(False)
fig.tight_layout()
return fig, axs
# crossplot of all data
@_log
def crossplot(self, analytes=None, lognorm=True,
bins=25, filt=False, samples=None,
subset=None, figsize=(12, 12), save=False,
colourful=True, mode='hist2d', **kwargs):
"""
Plot analytes against each other.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
figsize : tuple
Figure size (width, height) in inches.
save : bool or str
If True, plot is saves as 'crossplot.png', if str plot is
saves as str.
colourful : bool
Whether or not the plot should be colourful :).
mode : str
'hist2d' (default) or 'scatter'
Returns
-------
(fig, axes)
"""
analytes = self.analytes_sorted(analytes, focus_stage=self.focus_stage)
# sort analytes
try:
analytes = sorted(analytes, key=lambda x: float(re.findall('[0-9.-]+', x)[0]))
except IndexError:
analytes = sorted(analytes)
self.get_focus(filt=filt, samples=samples, subset=subset)
fig, axes = plot.crossplot(dat=self.focus, keys=analytes, lognorm=lognorm,
bins=bins, figsize=figsize, colourful=colourful,
focus_stage=self.focus_stage, cmap=self.cmaps, mode=mode)
if save or isinstance(save, str):
if isinstance(save, str):
fig.savefig(os.path.join(self.report_dir, save), dpi=200)
else:
fig.savefig(os.path.join(self.report_dir, 'crossplot.png'), dpi=200)
return fig, axes
@_log
def gradient_crossplot(self, analytes=None, win=15, lognorm=True,
bins=25, filt=False, samples=None,
subset=None, figsize=(12, 12), save=False,
colourful=True, mode='hist2d', recalc=True, **kwargs):
"""
Plot analyte gradients against each other.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
lognorm : bool
Whether or not to log normalise the colour scale
of the 2D histogram.
bins : int
The number of bins in the 2D histogram.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
figsize : tuple
Figure size (width, height) in inches.
save : bool or str
If True, plot is saves as 'crossplot.png', if str plot is
saves as str.
colourful : bool
Whether or not the plot should be colourful :).
mode : str
'hist2d' (default) or 'scatter'
recalc : bool
Whether to re-calculate the gradients, or use existing gradients.
Returns
-------
(fig, axes)
"""
analytes = self.analytes_sorted(analytes, focus_stage=self.focus_stage)
samples = self._get_samples(subset)
# calculate gradients
self.get_gradients(analytes=analytes, win=win, filt=filt, subset=subset, recalc=recalc)
# self.get_focus(filt=filt, samples=samples, subset=subset)
# grads = calc_grads(self.focus.uTime, self.focus, analytes, win)
fig, axes = plot.crossplot(dat=self.gradients, keys=analytes, lognorm=lognorm,
bins=bins, figsize=figsize, colourful=colourful,
focus_stage=self.focus_stage, cmap=self.cmaps,
denominator=self.internal_standard, mode=mode)
if save:
fig.savefig(self.report_dir + '/g_crossplot.png', dpi=200)
return fig, axes
def histograms(self, analytes=None, bins=25, logy=False,
samples=None, subset=None, filt=False, colourful=True):
"""
Plot histograms of analytes.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
bins : int
The number of bins in each histogram (default = 25)
logy : bool
If true, y axis is a log scale.
samples : array_like or None
Which samples to plot. If None, all samples are plotted.
subset : str or number
The subset of samples (defined by make_subset) you want to plot.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
colourful : bool
If True, histograms are colourful :)
Returns
-------
(fig, axes)
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
analytes = self.analytes_sorted(analytes, focus_stage=self.focus_stage)
if colourful:
cmap = self.cmaps
else:
cmap = None
self.get_focus(filt=filt, subset=subset)
fig, axes = plot.histograms(self.focus, keys=analytes,
bins=bins, logy=logy, cmap=cmap)
return fig, axes
def filter_effect(self, analytes=None, stats=['mean', 'std'], filt=True):
"""
Quantify the effects of the active filters.
Parameters
----------
analytes : str or list
Which analytes to consider.
stats : list
Which statistics to calculate.
file : valid filter string or bool
Which filter to consider. If True, applies all
active filters.
Returns
-------
pandas.DataFrame
Contains statistics calculated for filtered and
unfiltered data, and the filtered/unfiltered ratio.
"""
analytes = self.analytes_sorted(analytes, focus_stage=self.focus_stage)
# calculate filtered and unfiltered stats
self.sample_stats(analytes, stats=stats, filt=False)
suf = self.stats.copy()
self.sample_stats(analytes, stats=stats, filt=filt)
sf = self.stats.copy()
# create dataframe for results
cols = []
for s in self.stats_calced:
cols += ['unfiltered_{:}'.format(s), 'filtered_{:}'.format(s)]
comp = pd.DataFrame(index=self.samples,
columns=pd.MultiIndex.from_arrays([cols, [None] * len(cols)]))
# collate stats
for k, v in suf.items():
vf = sf[k]
for i, a in enumerate(v['analytes']):
for s in self.stats_calced:
comp.loc[k, ('unfiltered_{:}'.format(s), a)] = v[s][i,0]
comp.loc[k, ('filtered_{:}'.format(s), a)] = vf[s][i,0]
comp.dropna(0, 'all', inplace=True)
comp.dropna(1, 'all', inplace=True)
comp.sort_index(1, inplace=True)
# calculate filtered/unfiltered ratios
rats = []
for s in self.stats_calced:
rat = comp.loc[:, 'filtered_{:}'.format(s)] / comp.loc[:, 'unfiltered_{:}'.format(s)]
rat.columns = pd.MultiIndex.from_product([['{:}_ratio'.format(s)], rat.columns])
rats.append(rat)
# join it all up
comp = comp.join(pd.concat(rats, 1))
comp.sort_index(1, inplace=True)
return comp.loc[:, (pd.IndexSlice[:], pd.IndexSlice[analytes])]
def crossplot_filters(self, filter_string, analytes=None,
samples=None, subset=None, filt=None):
"""
Plot the results of a group of filters in a crossplot.
Parameters
----------
filter_string : str
A string that identifies a group of filters.
e.g. 'test' would plot all filters with 'test' in the
name.
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
Returns
-------
fig, axes objects
"""
analytes = self.analytes_sorted(analytes, focus_stage=self.focus_stage)
if samples is None:
samples = self._get_samples(subset)
# isolate relevant filters
filts = self.data[samples[0]].filt.components.keys()
cfilts = [f for f in filts if filter_string in f]
flab = re.compile('.*_(.*)$') # regex to get filter name
# aggregate data
self.get_focus(subset=subset, filt=filt)
# set up axes
numvars = len(analytes)
fig, axes = plt.subplots(nrows=numvars, ncols=numvars,
figsize=(12, 12))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
for ax in axes.flat:
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
if ax.is_first_col():
ax.yaxis.set_ticks_position('left')
if ax.is_last_col():
ax.yaxis.set_ticks_position('right')
if ax.is_first_row():
ax.xaxis.set_ticks_position('top')
if ax.is_last_row():
ax.xaxis.set_ticks_position('bottom')
cmlist = ['Blues', 'BuGn', 'BuPu', 'GnBu',
'Greens', 'Greys', 'Oranges', 'OrRd',
'PuBu', 'PuBuGn', 'PuRd', 'Purples',
'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd']
# isolate nominal_values for all analytes
focus = {k: nominal_values(v) for k, v in self.focus.items()}
# determine units for all analytes
udict = {a: unitpicker(np.nanmean(focus[a]),
focus_stage=self.focus_stage,
denominator=self.internal_standard) for a in analytes}
# determine ranges for all analytes
rdict = {a: (np.nanmin(focus[a] * udict[a][0]),
np.nanmax(focus[a] * udict[a][0])) for a in analytes}
for f in cfilts:
self.get_focus(f, subset=subset)
focus = {k: nominal_values(v) for k, v in self.focus.items()}
lab = flab.match(f).groups()[0]
axes[0, 0].scatter([], [], s=10, label=lab)
for i, j in zip(*np.triu_indices_from(axes, k=1)):
# get analytes
ai = analytes[i]
aj = analytes[j]
# remove nan, apply multipliers
pi = focus[ai][~np.isnan(focus[ai])] * udict[ai][0]
pj = focus[aj][~np.isnan(focus[aj])] * udict[aj][0]
# make plot
axes[i, j].scatter(pj, pi, alpha=0.4, s=10, lw=0.5, edgecolor='k')
axes[j, i].scatter(pi, pj, alpha=0.4, s=10, lw=0.5, edgecolor='k')
axes[i, j].set_ylim(*rdict[ai])
axes[i, j].set_xlim(*rdict[aj])
axes[j, i].set_ylim(*rdict[aj])
axes[j, i].set_xlim(*rdict[ai])
# diagonal labels
for a, n in zip(analytes, np.arange(len(analytes))):
axes[n, n].annotate(a + '\n' + udict[a][1], (0.5, 0.5),
xycoords='axes fraction',
ha='center', va='center')
axes[n, n].set_xlim(*rdict[a])
axes[n, n].set_ylim(*rdict[a])
axes[0, 0].legend(loc='upper left', title=filter_string)
# switch on alternating axes
for i, j in zip(range(numvars), itertools.cycle((-1, 0))):
axes[j, i].xaxis.set_visible(True)
for label in axes[j, i].get_xticklabels():
label.set_rotation(90)
axes[i, j].yaxis.set_visible(True)
return fig, axes
# Plot traces
@_log
def trace_plots(self, analytes=None, samples=None, ranges=False,
focus_stage=None, outdir=None, filt=None, scale='log',
figsize=[10, 4], stats=False, stat='nanmean',
err='nanstd', subset=None):
"""
Plot analytes as a function of time.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
samples: optional, array_like or str
The sample(s) to plot. Defaults to all samples.
ranges : bool
Whether or not to show the signal/backgroudn regions
identified by 'autorange'.
focus_stage : str
The focus 'stage' of the analysis to plot. Can be
'rawdata', 'despiked':, 'signal', 'background',
'bkgsub', 'ratios' or 'calibrated'.
outdir : str
Path to a directory where you'd like the plots to be
saved. Defaults to 'reports/[focus]' in your data directory.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
scale : str
If 'log', plots the data on a log scale.
figsize : array_like
Array of length 2 specifying figure [width, height] in
inches.
stats : bool
Whether or not to overlay the mean and standard deviations
for each trace.
stat, err: str
The names of the statistic and error components to plot.
Deafaults to 'nanmean' and 'nanstd'.
Returns
-------
None
"""
if focus_stage is None:
focus_stage = self.focus_stage
if outdir is None:
outdir = os.path.join(self.report_dir, focus_stage)
if not os.path.isdir(outdir):
os.mkdir(outdir)
analytes = self.analytes_sorted(analytes, focus_stage=focus_stage)
# if samples is not None:
# subset = self.make_subset(samples, silent=True)
if subset is not None:
samples = self._get_samples(subset)
elif samples is None:
samples = self.subsets['All_Analyses']
elif isinstance(samples, str):
samples = [samples]
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
f, a = self.data[s].trace_plot(analytes=analytes, figsize=figsize,
scale=scale, filt=filt,
ranges=ranges, stats=stats,
stat=stat, err=err, focus_stage=focus_stage)
# ax = fig.axes[0]
# for l, u in s.sigrng:
# ax.axvspan(l, u, color='r', alpha=0.1)
# for l, u in s.bkgrng:
# ax.axvspan(l, u, color='k', alpha=0.1)
f.savefig(os.path.join(outdir, s + '_traces.pdf'))
# TODO: on older(?) computers raises
# 'OSError: [Errno 24] Too many open files'
plt.close(f)
prog.update()
return
# Plot gradients
@_log
def gradient_plots(self, analytes=None, win=None, samples=None, ranges=False,
focus_stage=None, filt=False, recalc=False, outdir=None,
figsize=[10, 4], subset='All_Analyses'):
"""
Plot analyte gradients as a function of time.
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to plot. Defaults to all analytes.
samples: optional, array_like or str
The sample(s) to plot. Defaults to all samples.
ranges : bool
Whether or not to show the signal/backgroudn regions
identified by 'autorange'.
focus : str
The focus 'stage' of the analysis to plot. Can be
'rawdata', 'despiked':, 'signal', 'background',
'bkgsub', 'ratios' or 'calibrated'.
outdir : str
Path to a directory where you'd like the plots to be
saved. Defaults to 'reports/[focus]' in your data directory.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
scale : str
If 'log', plots the data on a log scale.
figsize : array_like
Array of length 2 specifying figure [width, height] in
inches.
stats : bool
Whether or not to overlay the mean and standard deviations
for each trace.
stat, err: str
The names of the statistic and error components to plot.
Deafaults to 'nanmean' and 'nanstd'.
Returns
-------
None
"""
if focus_stage is None:
focus_stage = self.focus_stage
if outdir is None:
outdir = os.path.join(self.report_dir, focus_stage + '_gradient')
if not os.path.isdir(outdir):
os.mkdir(outdir)
analytes = self.analytes_sorted(analytes, focus_stage=focus_stage)
# if samples is not None:
# subset = self.make_subset(samples, silent=True)
if subset is not None:
samples = self._get_samples(subset)
elif samples is None:
samples = self.subsets['All_Analyses']
elif isinstance(samples, str):
samples = [samples]
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
f, a = self.data[s].gplot(analytes=analytes, win=win, figsize=figsize,
ranges=ranges, focus_stage=focus_stage, filt=filt, recalc=recalc)
# ax = fig.axes[0]
# for l, u in s.sigrng:
# ax.axvspan(l, u, color='r', alpha=0.1)
# for l, u in s.bkgrng:
# ax.axvspan(l, u, color='k', alpha=0.1)
f.savefig(os.path.join(outdir, s + '_gradients.pdf'))
# TODO: on older(?) computers raises
# 'OSError: [Errno 24] Too many open files'
plt.close(f)
prog.update()
return
def plot_stackhist(self, subset='All_Samples', samples=None, analytes=None, axs=None, filt=True, **kwargs):
"""
Plot a stacked histograms of analytes for all given samples (or a pre-defined subset)
Parameters
----------
subset : str
The subset of samples to plot. Overruled by 'samples', if provided.
samples : array-like
The samples to plot. If blank, reverts to 'All_Samples' subset.
analytes : str or array-like
The analytes to plot
axs : array-like
An array of matplotlib.Axes objects the same length as analytes.
**kwargs
passed to matplotlib.pyplot.bar() plotting function
"""
analytes = self.analytes_sorted(analytes, focus_stage=self.focus_stage)
if axs is None:
fig, axs = plt.subplots(1, len(analytes), figsize=[2 * len(analytes), 2],
constrained_layout=True, sharey=True)
elif len(axs) != len(analytes):
raise ValueError(f'Must provide the same number of axes ({len(axs)}) and analytes ({len(analytes)})')
if samples is None:
samples = self.subsets[subset]
elif isinstance(samples, str):
samples = [samples]
self.get_focus(filt=filt, samples=samples)
for i, a in enumerate(analytes):
m, unit = unitpicker(self.focus[a], focus_stage=self.focus_stage)
arrays = []
for s in samples:
sub = self.data[s].get_individual_ablations(analytes, filt=filt)
arrays += [nominal_values(d[a]) * m for d in sub]
plot.stackhist(arrays, ax=axs[i], **kwargs)
axs[i].set_xlabel(pretty_element(a) + '\n' + unit)
# filter reports
@_log
def filter_reports(self, analytes, filt_str='all', nbin=5, samples=None,
outdir=None, subset='All_Samples'):
"""
Plot filter reports for all filters that contain ``filt_str``
in the name.
"""
if outdir is None:
outdir = self.report_dir + '/filters/' + filt_str
if not os.path.isdir(self.report_dir + '/filters'):
os.mkdir(self.report_dir + '/filters')
if not os.path.isdir(outdir):
os.mkdir(outdir)
analytes = self.analytes_sorted(analytes, focus_stage=self.focus_stage)
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
with self.pbar.set(total=len(samples), desc='Drawing Plots') as prog:
for s in samples:
_ = self.data[s].filter_report(filt=filt_str,
analytes=analytes,
savedir=outdir,
nbin=nbin)
prog.update()
# plt.close(fig)
return
# def _stat_boostrap(self, analytes=None, filt=True,
# stat_fn=np.nanmean, ci=95):
# """
# Calculate sample statistics with bootstrapped confidence intervals.
# Parameters
# ----------
# analytes : optional, array_like or str
# The analyte(s) to calculate statistics for. Defaults to
# all analytes.
# filt : str, dict or bool
# Either logical filter expression contained in a str,
# a dict of expressions specifying the filter string to
# use for each analyte or a boolean. Passed to `grab_filt`.
# stat_fns : array_like
# list of functions that take a single array_like input,
# and return a single statistic. Function should be able
# to cope with numpy NaN values.
# ci : float
# Confidence interval to calculate.
# Returns
# -------
# None
# """
# return
@_log
def sample_stats(self, analytes=None, filt=True,
stats=['mean', 'std'], include_srms=False,
eachtrace=True, focus_stage=None, csf_dict={}):
"""
Calculate sample statistics.
Returns samples, analytes, and arrays of statistics
of shape (samples, analytes). Statistics are calculated
from the 'focus' data variable, so output depends on how
the data have been processed.
Included stat functions:
* :func:`~latools.stat_fns.mean`: arithmetic mean
* :func:`~latools.stat_fns.std`: arithmetic standard deviation
* :func:`~latools.stat_fns.se`: arithmetic standard error
* :func:`~latools.stat_fns.H15_mean`: Huber mean (outlier removal)
* :func:`~latools.stat_fns.H15_std`: Huber standard deviation (outlier removal)
* :func:`~latools.stat_fns.H15_se`: Huber standard error (outlier removal)
Parameters
----------
analytes : optional, array_like or str
The analyte(s) to calculate statistics for. Defaults to
all analytes.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
stats : array_like or str
take a single array_like input, and return a single statistic.
list of functions or names (see above) or functions that
Function should be able to cope with NaN values.
eachtrace : bool
Whether to calculate the statistics for each analysis
spot individually, or to produce per - sample means.
Default is True.
focus_stage : str
Which stage of analysis to calculate stats for.
Defaults to current stage.
Can be one of:
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
* 'massfrac': mass fraction of each element.
Returns
-------
None
Adds dict to analyse object containing samples, analytes and
functions and data.
"""
if 'autorange' not in self.stages_complete:
raise RuntimeError('Cannot calculate statistics until autorange has been run.')
analytes = self.analytes_sorted(analytes, focus_stage=focus_stage)
if focus_stage is None:
focus_stage = self.focus_stage
self.stats = Bunch()
self.stats_calced = []
stat_fns = Bunch()
stat_dict = {'mean': np.nanmean,
'std': np.nanstd,
'nanmean': np.nanmean,
'nanstd': np.nanstd,
'se': stderr,
'H15_mean': H15_mean,
'H15_std': H15_std,
'H15_se': H15_se}
if isinstance(stats, str):
stats = [stats]
for s in stats:
if isinstance(s, str):
if s in stat_dict.keys():
self.stats_calced.append(s)
stat_fns[s] = stat_dict[s]
if s in csf_dict.keys():
self.stats_calced.append(s)
exec(csf_dict[s])
stat_fns[s] = eval(s)
elif callable(s):
self.stats_calced.append(s.__name__)
stat_fns[s.__name__] = s
if not hasattr(self, 'custom_stat_functions'):
self.custom_stat_functions = ''
self.custom_stat_functions += inspect.getsource(s) + '\n\n\n\n'
# calculate stats for each sample
if include_srms:
samples = self.samples
else:
samples = [s for s in self.samples if self.srm_identifier not in s]
with self.pbar.set(total=len(samples), desc='Calculating Stats') as prog:
for s in samples:
self.data[s].sample_stats(analytes, filt=filt,
stat_fns=stat_fns,
eachtrace=eachtrace,
focus_stage=focus_stage)
self.stats[s] = self.data[s].stats
prog.update()
self.stat_focus_stage = focus_stage
return
@_log
def ablation_times(self, samples=None, subset=None):
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
ats = Bunch()
for s in samples:
ats[s] = self.data[s].ablation_times()
frames = []
for s in samples:
d = ats[s]
td = pd.DataFrame.from_dict(d, orient='index')
td.columns = ['Time']
frames.append(td)
out = pd.concat(frames, keys=samples)
out.index.names = ['sample', 'rep']
return out
# function for visualising sample statistics
@_log
def statrace_plot(self, analytes=None, samples=None, figsize=None,
stat='mean', err='std', subset=None):
"""
Function for visualising per-ablation and per-sample means.
Parameters
----------
analytes : str or iterable
Which analyte(s) to plot
samples : str or iterable
Which sample(s) to plot
figsize : tuple
Figure (width, height) in inches
stat : str
Which statistic to plot. Must match
the name of the functions used in
'sample_stats'.
err : str
Which uncertainty to plot.
subset : str
Which subset of samples to plot.
"""
if not hasattr(self, 'stats'):
self.sample_stats()
analytes = self.analytes_sorted(analytes, focus_stage=self.focus_stage)
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
if figsize is None:
figsize = (1.5 * len(self.stats), 3 * len(analytes))
fig, axs = plt.subplots(len(analytes), 1, figsize=figsize)
for ax, an in zip(axs, analytes):
i = 0
stab = self.getstats()
m, u = unitpicker(np.percentile(stab.loc[:, an].dropna(), 25), 0.1,
focus_stage='calibrated',
denominator=self.internal_standard)
for s in samples:
if self.srm_identifier not in s:
d = self.stats[s]
if d[stat].ndim == 2:
n = d[stat].shape[-1]
x = np.linspace(i - .1 * n / 2, i + .1 * n / 2, n)
else:
x = [i]
a_ind = d['analytes'] == an
# plot individual ablations with error bars
ax.errorbar(x, d[stat][a_ind][0] * m,
yerr=d[err][a_ind][0] * m,
marker='o', color=self.cmaps[an],
lw=0, elinewidth=1)
ax.set_ylabel('%s / %s (%s )' % (pretty_element(an),
pretty_element(self.internal_standard),
u))
# plot whole - sample mean
if len(x) > 1:
# mean calculation with error propagation?
# umean = un.uarray(d[stat][a_ind][0] * m, d[err][a_ind][0] * m).mean()
# std = un.std_devs(umean)
# mean = un.nominal_values(umean)
mean = np.nanmean(d[stat][a_ind][0] * m)
std = np.nanstd(d[stat][a_ind][0] * m)
ax.plot(x, [mean] * len(x), c=self.cmaps[an], lw=2)
ax.fill_between(x, [mean + std] * len(x),
[mean - std] * len(x),
lw=0, alpha=0.2, color=self.cmaps[an])
# highlight each sample
if i % 2 == 1:
ax.axvspan(i - .5, i + .5, color=(0, 0, 0, 0.05), lw=0)
i += 1
ax.set_xticks(np.arange(0, len(self.stats)))
ax.set_xlim(-0.5, len(self.stats) - .5)
ax.set_xticklabels(samples)
return fig, ax
@_log
def getstats(self, save=True, filename=None, samples=None, subset=None, ablation_time=False):
"""
Return pandas dataframe of all sample statistics.
"""
slst = []
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
for s in self.stats_calced:
for nm in samples:
if self.stats[nm][s].ndim == 2:
# make multi - index
reps = np.arange(self.stats[nm][s].shape[-1])
ss = np.array([s] * reps.size)
nms = np.array([nm] * reps.size)
# make sub - dataframe
stdf = pd.DataFrame(self.stats[nm][s].T,
columns=self.stats[nm]['analytes'],
index=[ss, nms, reps])
stdf.index.set_names(['statistic', 'sample', 'rep'],
inplace=True)
else:
stdf = pd.DataFrame(self.stats[nm][s],
index=self.stats[nm]['analytes'],
columns=[[s], [nm]]).T
stdf.index.set_names(['statistic', 'sample'],
inplace=True)
slst.append(stdf)
out = pd.concat(slst)
if ablation_time:
ats = self.ablation_times(samples=samples, subset=subset)
ats['statistic'] = 'nanmean'
ats.set_index('statistic', append=True, inplace=True)
ats = ats.reorder_levels(['statistic', 'sample', 'rep'])
out = out.join(ats)
if save:
if filename is None:
filename = 'stat_export.csv'
out.to_csv(os.path.join(self.export_dir, filename))
self.stats_df = out
return out.reindex(self.analytes_sorted(out.columns.values, focus_stage=self.stat_focus_stage), axis=1)
# raw data export function
def _minimal_export_traces(self, outdir=None, analytes=None,
samples=None, subset='All_Analyses'):
"""
Used for exporting minimal dataset. DON'T USE.
"""
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
focus_stage = 'rawdata'
# ud = 'counts'
if not os.path.isdir(outdir):
os.mkdir(outdir)
for s in samples:
d = self.data[s].data[focus_stage]
out = Bunch()
for a in analytes:
out[a] = d[a]
out = pd.DataFrame(out, index=self.data[s].Time)
out.index.name = 'Time'
d = dateutil.parser.parse(self.data[s].meta['date'])
header = ['# Minimal Reproduction Dataset Exported from LATOOLS on %s' %
(time.strftime('%Y:%m:%d %H:%M:%S')),
"# Analysis described in '../analysis.lalog'",
'# Run latools.reproduce to import analysis.',
'#',
'# Sample: %s' % (s),
'# Analysis Time: ' + d.strftime('%Y-%m-%d %H:%M:%S')]
header = '\n'.join(header) + '\n'
csv = out.to_csv()
with open('%s/%s.csv' % (outdir, s), 'w') as f:
f.write(header)
f.write(csv)
return
@_log
def export_traces(self, outdir=None, focus_stage=None, analytes=None,
samples=None, subset='All_Analyses', filt=False, zip_archive=False):
"""
Function to export raw data.
Parameters
----------
outdir : str
directory to save toe traces. Defaults to 'main-dir-name_export'.
focus_stage : str
The name of the analysis stage to export.
* 'rawdata': raw data, loaded from csv file.
* 'despiked': despiked data.
* 'signal'/'background': isolated signal and background data.
Created by self.separate, after signal and background
regions have been identified by self.autorange.
* 'bkgsub': background subtracted data, created by
self.bkg_correct
* 'ratios': element ratio data, created by self.ratio.
* 'calibrated': ratio data calibrated to standards, created by self.calibrate.
Defaults to the most recent stage of analysis.
analytes : str or array_like
Either a single analyte, or list of analytes to export.
Defaults to all analytes.
samples : str or array_like
Either a single sample name, or list of samples to export.
Defaults to all samples.
filt : str, dict or bool
Either logical filter expression contained in a str,
a dict of expressions specifying the filter string to
use for each analyte or a boolean. Passed to `grab_filt`.
"""
analytes = self.analytes_sorted(analytes, focus_stage=focus_stage)
if samples is not None:
subset = self.make_subset(samples, silent=True)
samples = self._get_samples(subset)
if focus_stage is None:
focus_stage = self.focus_stage
if outdir is None:
outdir = os.path.join(self.export_dir, 'trace_export')
ud = {'rawdata': 'counts',
'despiked': 'counts',
'bkgsub': 'background corrected counts',
'ratios': 'counts/count',
'calibrated': 'mol/mol',
'mass_fraction': 'mass fraction'}
if not os.path.isdir(outdir):
os.mkdir(outdir)
for s in samples:
d = self.data[s].data[focus_stage]
out = Bunch()
for a in analytes:
if a not in d:
continue
ind = self.data[s].filt.grab_filt(filt, a)
out[a] = nominal_values(d[a][ind])
if focus_stage not in ['rawdata', 'despiked']:
out[a + '_std'] = std_devs(d[a][ind])
out[a + '_std'][out[a + '_std'] == 0] = np.nan
out = pd.DataFrame(out, index=self.data[s].Time[ind])
out.index.name = 'Time'
header = ['# Sample: %s' % (s),
'# Data Exported from LATOOLS on %s' %
(time.strftime('%Y:%m:%d %H:%M:%S')),
'# Processed using %s configuration' % (self.config['config']),
'# Analysis Stage: %s' % (focus_stage),
'# Unit: %s' % ud[focus_stage]]
header = '\n'.join(header) + '\n'
csv = out.to_csv()
with open('%s/%s_%s.csv' % (outdir, s, focus_stage), 'w') as f:
f.write(header)
f.write(csv)
if zip_archive:
utils.zipdir(outdir, delete=True)
return
def save_log(self, directory=None, logname=None, header=None):
"""
Save analysis.lalog in specified location
"""
if directory is None:
directory = self.export_dir
if not os.path.isdir(directory):
directory = os.path.dirname(directory)
if logname is None:
logname = 'analysis.lalog'
if header is None:
header = self._log_header()
loc = logging.write_logfile(self.log, header,
os.path.join(directory, logname))
return loc
def minimal_export(self, target_analytes=None, path=None):
"""
Exports a analysis parameters, standard info and a minimal dataset,
which can be imported by another user.
Parameters
----------
target_analytes : str or iterable
Which analytes to include in the export. If specified, the export
will contain these analytes, and all other analytes used during
data processing (e.g. during filtering). If not specified,
all analytes are exported.
path : str
Where to save the minimal export.
If it ends with .zip, a zip file is created.
If it's a folder, all data are exported to a folder.
"""
target_analytes = self._analyte_checker(target_analytes, check_ratios=False)
zip_archive = False
# set up data path
if path is None:
path = self.export_dir + '/minimal_export.zip'
if path.endswith('.zip'):
path = path.replace('.zip', '')
zip_archive = True
if not os.path.isdir(path):
os.mkdir(path)
# parse minimal analytes (exclude ratios, include target_analytes)
export_analytes = target_analytes.union(split_analyte_ratios(self.minimal_analytes))
export_analytes = self.analytes_sorted(export_analytes, check_ratios=False)
# export data
self._minimal_export_traces(path + '/data', analytes=export_analytes)
# define analysis_log header
log_header = ['# Minimal Reproduction Dataset Exported from LATOOLS on %s' %
(time.strftime('%Y:%m:%d %H:%M:%S')),
'data_path :: ./data/']
if hasattr(self, 'srmdat'):
log_header.append('srm_table :: ./srm.table')
# export srm table
items = set()
for a in export_analytes:
for srm, ad in self._analyte_srmdat_link.items():
items.update([ad[a]])
srmdat = self.srmdat.loc[idx[:, list(items)], :]
with open(path + '/srm.table', 'w') as f:
f.write(srmdat.to_csv())
# save internal_standard_concs
if self.internal_standard_concs is not None:
log_header.append('internal_standard_concs :: ./internal_standard_concs.csv')
self.internal_standard_concs.to_csv(os.path.join(path, './internal_standard_concs.csv'))
# save custom functions (of defined)
if hasattr(self, 'custom_stat_functions'):
with open(path + '/custom_stat_fns.py', 'w') as f:
f.write(self.custom_stat_functions)
log_header.append('custom_stat_functions :: ./custom_stat_fns.py')
log_header.append('# Analysis Log Start: \n')
# format sample_stats correctly
lss = [(i, l) for i, l in enumerate(self.log) if 'sample_stats' in l]
rep = re.compile("(.*'stats': )(\[.*?\])(.*)")
for i, l in lss:
self.log[i] = rep.sub(r'\1' + str(self.stats_calced) + r'\3', l)
# save log
self.save_log(path, 'analysis.lalog', header=log_header)
if zip_archive:
utils.zipdir(directory=path, delete=True)
return
def reproduce(past_analysis, plotting=False, data_path=None,
srm_table=None, internal_standard_concs=None, custom_stat_functions=None):
"""
Reproduce a previous analysis exported with :func:`latools.analyse.minimal_export`
For normal use, supplying `log_file` and specifying a plotting option should be
enough to reproduce an analysis. All requisites (raw data, SRM table and any
custom stat functions) will then be imported from the minimal_export folder.
You may also specify your own raw_data, srm_table and custom_stat_functions,
if you wish.
Parameters
----------
log_file : str
The path to the log file produced by :func:`~latools.analyse.minimal_export`.
plotting : bool
Whether or not to output plots.
data_path : str
Optional. Specify a different data folder. Data folder
should normally be in the same folder as the log file.
srm_table : str
Optional. Specify a different SRM table. SRM table
should normally be in the same folder as the log file.
internal_standard_concs : pandas.DataFrame
Optional. Specify internal standard concentrations used
to calculate mass fractions.
custom_stat_functions : str
Optional. Specify a python file containing custom
stat functions for use by reproduce. Any custom
stat functions should normally be included in the
same folder as the log file.
"""
if '.zip' in past_analysis:
dirpath = utils.extract_zipdir(past_analysis)
logpath = os.path.join(dirpath, 'analysis.lalog')
elif os.path.isdir(past_analysis):
if os.path.exists(os.path.join(past_analysis, 'analysis.lalog')):
logpath = os.path.join(past_analysis, 'analysis.lalog')
elif 'analysis.lalog' in past_analysis:
logpath = past_analysis
else:
raise ValueError(('\n\n{} is not a valid input.\n\n' +
'Must be one of:\n' +
' - A .zip file exported by latools\n' +
' - An analysis.lalog file\n' +
' - A directory containing an analysis.lalog files\n'))
runargs, paths = logging.read_logfile(logpath)
# parse custom stat functions
csfs = Bunch()
if custom_stat_functions is None and 'custom_stat_functions' in paths.keys():
# load custom functions as a dict
with open(paths['custom_stat_functions'], 'r') as f:
csf = f.read()
fname = re.compile('def (.*)\(.*')
for c in csf.split('\n\n\n\n'):
if fname.match(c):
csfs[fname.match(c).groups()[0]] = c
# create analysis object
rep = analyse(*runargs[0][-1]['args'], **runargs[0][-1]['kwargs'])
# deal with internal standard concentrations
if internal_standard_concs is None and 'internal_standard_concs' in paths:
rep.read_internal_standard_concs(paths['internal_standard_concs'])
# rest of commands
for fname, arg in runargs:
if fname != '__init__':
if 'plot' in fname.lower() and plotting:
getattr(rep, fname)(*arg['args'], **arg['kwargs'])
elif 'sample_stats' in fname.lower():
rep.sample_stats(*arg['args'], csf_dict=csfs, **arg['kwargs'])
else:
getattr(rep, fname)(*arg['args'], **arg['kwargs'])
return rep
|
oscarbranson/latools
|
latools/latools.py
|
Python
|
mit
| 178,184
|
[
"Gaussian"
] |
db6e65c9de3a40fa34f14d67146204359ada04eb5ca52bdb4888b007ff79bf5b
|
#
# Copyright (c) 2010 Brian E. Granger
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from unittest import TestCase
import zmq
from zmq.tests import BaseZMQTestCase
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
class TestPubSub(BaseZMQTestCase):
pass
# We are disabling this test while an issue is being resolved.
# def test_basic(self):
# s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
# s2.setsockopt(zmq.SUBSCRIBE,'')
# import time; time.sleep(0.5)
# msg1 = 'message'
# s1.send(msg1)
# msg2 = s2.recv() # This is blocking!
# self.assertEquals(msg1, msg2)
def test_topic(self):
s1, s2 = self.create_bound_pair(zmq.PUB, zmq.SUB)
s2.setsockopt(zmq.SUBSCRIBE, 'x'.encode())
import time; time.sleep(0.1)
msg1 = 'message'.encode()
s1.send(msg1)
self.assertRaisesErrno(zmq.EAGAIN, s2.recv, zmq.NOBLOCK)
msg1 = 'xmessage'.encode()
s1.send(msg1)
msg2 = s2.recv()
self.assertEquals(msg1, msg2)
|
svpcom/pyzmq-ctypes
|
zmq/tests/test_pubsub.py
|
Python
|
lgpl-3.0
| 2,007
|
[
"Brian"
] |
3432f494fcf295044a7140b5eb0d19dfd4e03b72f02852d7e98691bd3aad882f
|
# sql/compiler.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base SQL and DDL compiler implementations.
Classes provided include:
:class:`.compiler.SQLCompiler` - renders SQL
strings
:class:`.compiler.DDLCompiler` - renders DDL
(data definition language) strings
:class:`.compiler.GenericTypeCompiler` - renders
type specification strings.
To generate user-defined SQL strings, see
:doc:`/ext/compiler`.
"""
import contextlib
import re
from . import schema, sqltypes, operators, functions, visitors, \
elements, selectable, crud
from .. import util, exc
import itertools
RESERVED_WORDS = set([
'all', 'analyse', 'analyze', 'and', 'any', 'array',
'as', 'asc', 'asymmetric', 'authorization', 'between',
'binary', 'both', 'case', 'cast', 'check', 'collate',
'column', 'constraint', 'create', 'cross', 'current_date',
'current_role', 'current_time', 'current_timestamp',
'current_user', 'default', 'deferrable', 'desc',
'distinct', 'do', 'else', 'end', 'except', 'false',
'for', 'foreign', 'freeze', 'from', 'full', 'grant',
'group', 'having', 'ilike', 'in', 'initially', 'inner',
'intersect', 'into', 'is', 'isnull', 'join', 'leading',
'left', 'like', 'limit', 'localtime', 'localtimestamp',
'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset',
'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps',
'placing', 'primary', 'references', 'right', 'select',
'session_user', 'set', 'similar', 'some', 'symmetric', 'table',
'then', 'to', 'trailing', 'true', 'union', 'unique', 'user',
'using', 'verbose', 'when', 'where'])
LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I)
ILLEGAL_INITIAL_CHARACTERS = {str(x) for x in range(0, 10)}.union(['$'])
BIND_PARAMS = re.compile(r'(?<![:\w\$\x5c]):([\w\$]+)(?![:\w\$])', re.UNICODE)
BIND_PARAMS_ESC = re.compile(r'\x5c(:[\w\$]*)(?![:\w\$])', re.UNICODE)
BIND_TEMPLATES = {
'pyformat': "%%(%(name)s)s",
'qmark': "?",
'format': "%%s",
'numeric': ":[_POSITION]",
'named': ":%(name)s"
}
OPERATORS = {
# binary
operators.and_: ' AND ',
operators.or_: ' OR ',
operators.add: ' + ',
operators.mul: ' * ',
operators.sub: ' - ',
operators.div: ' / ',
operators.mod: ' % ',
operators.truediv: ' / ',
operators.neg: '-',
operators.lt: ' < ',
operators.le: ' <= ',
operators.ne: ' != ',
operators.gt: ' > ',
operators.ge: ' >= ',
operators.eq: ' = ',
operators.is_distinct_from: ' IS DISTINCT FROM ',
operators.isnot_distinct_from: ' IS NOT DISTINCT FROM ',
operators.concat_op: ' || ',
operators.match_op: ' MATCH ',
operators.notmatch_op: ' NOT MATCH ',
operators.in_op: ' IN ',
operators.notin_op: ' NOT IN ',
operators.comma_op: ', ',
operators.from_: ' FROM ',
operators.as_: ' AS ',
operators.is_: ' IS ',
operators.isnot: ' IS NOT ',
operators.collate: ' COLLATE ',
# unary
operators.exists: 'EXISTS ',
operators.distinct_op: 'DISTINCT ',
operators.inv: 'NOT ',
operators.any_op: 'ANY ',
operators.all_op: 'ALL ',
# modifiers
operators.desc_op: ' DESC',
operators.asc_op: ' ASC',
operators.nullsfirst_op: ' NULLS FIRST',
operators.nullslast_op: ' NULLS LAST',
}
FUNCTIONS = {
functions.coalesce: 'coalesce%(expr)s',
functions.current_date: 'CURRENT_DATE',
functions.current_time: 'CURRENT_TIME',
functions.current_timestamp: 'CURRENT_TIMESTAMP',
functions.current_user: 'CURRENT_USER',
functions.localtime: 'LOCALTIME',
functions.localtimestamp: 'LOCALTIMESTAMP',
functions.random: 'random%(expr)s',
functions.sysdate: 'sysdate',
functions.session_user: 'SESSION_USER',
functions.user: 'USER',
functions.cube: 'CUBE%(expr)s',
functions.rollup: 'ROLLUP%(expr)s',
functions.grouping_sets: 'GROUPING SETS%(expr)s',
}
EXTRACT_MAP = {
'month': 'month',
'day': 'day',
'year': 'year',
'second': 'second',
'hour': 'hour',
'doy': 'doy',
'minute': 'minute',
'quarter': 'quarter',
'dow': 'dow',
'week': 'week',
'epoch': 'epoch',
'milliseconds': 'milliseconds',
'microseconds': 'microseconds',
'timezone_hour': 'timezone_hour',
'timezone_minute': 'timezone_minute'
}
COMPOUND_KEYWORDS = {
selectable.CompoundSelect.UNION: 'UNION',
selectable.CompoundSelect.UNION_ALL: 'UNION ALL',
selectable.CompoundSelect.EXCEPT: 'EXCEPT',
selectable.CompoundSelect.EXCEPT_ALL: 'EXCEPT ALL',
selectable.CompoundSelect.INTERSECT: 'INTERSECT',
selectable.CompoundSelect.INTERSECT_ALL: 'INTERSECT ALL'
}
class Compiled(object):
"""Represent a compiled SQL or DDL expression.
The ``__str__`` method of the ``Compiled`` object should produce
the actual text of the statement. ``Compiled`` objects are
specific to their underlying database dialect, and also may
or may not be specific to the columns referenced within a
particular set of bind parameters. In no case should the
``Compiled`` object be dependent on the actual values of those
bind parameters, even though it may reference those values as
defaults.
"""
_cached_metadata = None
execution_options = util.immutabledict()
"""
Execution options propagated from the statement. In some cases,
sub-elements of the statement can modify these.
"""
def __init__(self, dialect, statement, bind=None,
schema_translate_map=None,
compile_kwargs=util.immutabledict()):
"""Construct a new :class:`.Compiled` object.
:param dialect: :class:`.Dialect` to compile against.
:param statement: :class:`.ClauseElement` to be compiled.
:param bind: Optional Engine or Connection to compile this
statement against.
:param schema_translate_map: dictionary of schema names to be
translated when forming the resultant SQL
.. versionadded:: 1.1
.. seealso::
:ref:`schema_translating`
:param compile_kwargs: additional kwargs that will be
passed to the initial call to :meth:`.Compiled.process`.
"""
self.dialect = dialect
self.bind = bind
self.preparer = self.dialect.identifier_preparer
if schema_translate_map:
self.preparer = self.preparer._with_schema_translate(
schema_translate_map)
if statement is not None:
self.statement = statement
self.can_execute = statement.supports_execution
if self.can_execute:
self.execution_options = statement._execution_options
self.string = self.process(self.statement, **compile_kwargs)
@util.deprecated("0.7", ":class:`.Compiled` objects now compile "
"within the constructor.")
def compile(self):
"""Produce the internal string representation of this element.
"""
pass
def _execute_on_connection(self, connection, multiparams, params):
if self.can_execute:
return connection._execute_compiled(self, multiparams, params)
else:
raise exc.ObjectNotExecutableError(self.statement)
@property
def sql_compiler(self):
"""Return a Compiled that is capable of processing SQL expressions.
If this compiler is one, it would likely just return 'self'.
"""
raise NotImplementedError()
def process(self, obj, **kwargs):
return obj._compiler_dispatch(self, **kwargs)
def __str__(self):
"""Return the string text of the generated SQL or DDL."""
return self.string or ''
def construct_params(self, params=None):
"""Return the bind params for this compiled object.
:param params: a dict of string/object pairs whose values will
override bind values compiled in to the
statement.
"""
raise NotImplementedError()
@property
def params(self):
"""Return the bind params for this compiled object."""
return self.construct_params()
def execute(self, *multiparams, **params):
"""Execute this compiled object."""
e = self.bind
if e is None:
raise exc.UnboundExecutionError(
"This Compiled object is not bound to any Engine "
"or Connection.", code="2afi")
return e._execute_compiled(self, multiparams, params)
def scalar(self, *multiparams, **params):
"""Execute this compiled object and return the result's
scalar value."""
return self.execute(*multiparams, **params).scalar()
class TypeCompiler(util.with_metaclass(util.EnsureKWArgType, object)):
"""Produces DDL specification for TypeEngine objects."""
ensure_kwarg = r'visit_\w+'
def __init__(self, dialect):
self.dialect = dialect
def process(self, type_, **kw):
return type_._compiler_dispatch(self, **kw)
class _CompileLabel(visitors.Visitable):
"""lightweight label object which acts as an expression.Label."""
__visit_name__ = 'label'
__slots__ = 'element', 'name'
def __init__(self, col, name, alt_names=()):
self.element = col
self.name = name
self._alt_names = (col,) + alt_names
@property
def proxy_set(self):
return self.element.proxy_set
@property
def type(self):
return self.element.type
def self_group(self, **kw):
return self
class SQLCompiler(Compiled):
"""Default implementation of :class:`.Compiled`.
Compiles :class:`.ClauseElement` objects into SQL strings.
"""
extract_map = EXTRACT_MAP
compound_keywords = COMPOUND_KEYWORDS
isdelete = isinsert = isupdate = False
"""class-level defaults which can be set at the instance
level to define if this Compiled instance represents
INSERT/UPDATE/DELETE
"""
isplaintext = False
returning = None
"""holds the "returning" collection of columns if
the statement is CRUD and defines returning columns
either implicitly or explicitly
"""
returning_precedes_values = False
"""set to True classwide to generate RETURNING
clauses before the VALUES or WHERE clause (i.e. MSSQL)
"""
render_table_with_column_in_update_from = False
"""set to True classwide to indicate the SET clause
in a multi-table UPDATE statement should qualify
columns with the table name (i.e. MySQL only)
"""
contains_expanding_parameters = False
"""True if we've encountered bindparam(..., expanding=True).
These need to be converted before execution time against the
string statement.
"""
ansi_bind_rules = False
"""SQL 92 doesn't allow bind parameters to be used
in the columns clause of a SELECT, nor does it allow
ambiguous expressions like "? = ?". A compiler
subclass can set this flag to False if the target
driver/DB enforces this
"""
_textual_ordered_columns = False
"""tell the result object that the column names as rendered are important,
but they are also "ordered" vs. what is in the compiled object here.
"""
_ordered_columns = True
"""
if False, means we can't be sure the list of entries
in _result_columns is actually the rendered order. Usually
True unless using an unordered TextAsFrom.
"""
_numeric_binds = False
"""
True if paramstyle is "numeric". This paramstyle is trickier than
all the others.
"""
insert_prefetch = update_prefetch = ()
def __init__(self, dialect, statement, column_keys=None,
inline=False, **kwargs):
"""Construct a new :class:`.SQLCompiler` object.
:param dialect: :class:`.Dialect` to be used
:param statement: :class:`.ClauseElement` to be compiled
:param column_keys: a list of column names to be compiled into an
INSERT or UPDATE statement.
:param inline: whether to generate INSERT statements as "inline", e.g.
not formatted to return any generated defaults
:param kwargs: additional keyword arguments to be consumed by the
superclass.
"""
self.column_keys = column_keys
# compile INSERT/UPDATE defaults/sequences inlined (no pre-
# execute)
self.inline = inline or getattr(statement, 'inline', False)
# a dictionary of bind parameter keys to BindParameter
# instances.
self.binds = {}
# a dictionary of BindParameter instances to "compiled" names
# that are actually present in the generated SQL
self.bind_names = util.column_dict()
# stack which keeps track of nested SELECT statements
self.stack = []
# relates label names in the final SQL to a tuple of local
# column/label name, ColumnElement object (if any) and
# TypeEngine. ResultProxy uses this for type processing and
# column targeting
self._result_columns = []
# true if the paramstyle is positional
self.positional = dialect.positional
if self.positional:
self.positiontup = []
self._numeric_binds = dialect.paramstyle == "numeric"
self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle]
self.ctes = None
self.label_length = dialect.label_length \
or dialect.max_identifier_length
# a map which tracks "anonymous" identifiers that are created on
# the fly here
self.anon_map = util.PopulateDict(self._process_anon)
# a map which tracks "truncated" names based on
# dialect.label_length or dialect.max_identifier_length
self.truncated_names = {}
Compiled.__init__(self, dialect, statement, **kwargs)
if (
self.isinsert or self.isupdate or self.isdelete
) and statement._returning:
self.returning = statement._returning
if self.positional and self._numeric_binds:
self._apply_numbered_params()
@property
def prefetch(self):
return list(self.insert_prefetch + self.update_prefetch)
@util.memoized_instancemethod
def _init_cte_state(self):
"""Initialize collections related to CTEs only if
a CTE is located, to save on the overhead of
these collections otherwise.
"""
# collect CTEs to tack on top of a SELECT
self.ctes = util.OrderedDict()
self.ctes_by_name = {}
self.ctes_recursive = False
if self.positional:
self.cte_positional = {}
@contextlib.contextmanager
def _nested_result(self):
"""special API to support the use case of 'nested result sets'"""
result_columns, ordered_columns = (
self._result_columns, self._ordered_columns)
self._result_columns, self._ordered_columns = [], False
try:
if self.stack:
entry = self.stack[-1]
entry['need_result_map_for_nested'] = True
else:
entry = None
yield self._result_columns, self._ordered_columns
finally:
if entry:
entry.pop('need_result_map_for_nested')
self._result_columns, self._ordered_columns = (
result_columns, ordered_columns)
def _apply_numbered_params(self):
poscount = itertools.count(1)
self.string = re.sub(
r'\[_POSITION\]',
lambda m: str(util.next(poscount)),
self.string)
@util.memoized_property
def _bind_processors(self):
return dict(
(key, value) for key, value in
((self.bind_names[bindparam],
bindparam.type._cached_bind_processor(self.dialect)
)
for bindparam in self.bind_names)
if value is not None
)
def is_subquery(self):
return len(self.stack) > 1
@property
def sql_compiler(self):
return self
def construct_params(self, params=None, _group_number=None, _check=True):
"""return a dictionary of bind parameter keys and values"""
if params:
pd = {}
for bindparam in self.bind_names:
name = self.bind_names[bindparam]
if bindparam.key in params:
pd[name] = params[bindparam.key]
elif name in params:
pd[name] = params[name]
elif _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number), code="cd3x")
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key, code="cd3x")
elif bindparam.callable:
pd[name] = bindparam.effective_value
else:
pd[name] = bindparam.value
return pd
else:
pd = {}
for bindparam in self.bind_names:
if _check and bindparam.required:
if _group_number:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r, "
"in parameter group %d" %
(bindparam.key, _group_number), code="cd3x")
else:
raise exc.InvalidRequestError(
"A value is required for bind parameter %r"
% bindparam.key, code="cd3x")
if bindparam.callable:
pd[self.bind_names[bindparam]] = bindparam.effective_value
else:
pd[self.bind_names[bindparam]] = bindparam.value
return pd
@property
def params(self):
"""Return the bind param dictionary embedded into this
compiled object, for those values that are present."""
return self.construct_params(_check=False)
@util.dependencies("sqlalchemy.engine.result")
def _create_result_map(self, result):
"""utility method used for unit tests only."""
return result.ResultMetaData._create_result_map(self._result_columns)
def default_from(self):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
"""
return ""
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
def visit_label_reference(
self, element, within_columns_clause=False, **kwargs):
if self.stack and self.dialect.supports_simple_order_by_label:
selectable = self.stack[-1]['selectable']
with_cols, only_froms, only_cols = selectable._label_resolve_dict
if within_columns_clause:
resolve_dict = only_froms
else:
resolve_dict = only_cols
# this can be None in the case that a _label_reference()
# were subject to a replacement operation, in which case
# the replacement of the Label element may have changed
# to something else like a ColumnClause expression.
order_by_elem = element.element._order_by_label_element
if order_by_elem is not None and order_by_elem.name in \
resolve_dict and \
order_by_elem.shares_lineage(
resolve_dict[order_by_elem.name]):
kwargs['render_label_as_label'] = \
element.element._order_by_label_element
return self.process(
element.element, within_columns_clause=within_columns_clause,
**kwargs)
def visit_textual_label_reference(
self, element, within_columns_clause=False, **kwargs):
if not self.stack:
# compiling the element outside of the context of a SELECT
return self.process(
element._text_clause
)
selectable = self.stack[-1]['selectable']
with_cols, only_froms, only_cols = selectable._label_resolve_dict
try:
if within_columns_clause:
col = only_froms[element.element]
else:
col = with_cols[element.element]
except KeyError:
# treat it like text()
util.warn_limited(
"Can't resolve label reference %r; converting to text()",
util.ellipses_string(element.element))
return self.process(
element._text_clause
)
else:
kwargs['render_label_as_label'] = col
return self.process(
col, within_columns_clause=within_columns_clause, **kwargs)
def visit_label(self, label,
add_to_result_map=None,
within_label_clause=False,
within_columns_clause=False,
render_label_as_label=None,
**kw):
# only render labels within the columns clause
# or ORDER BY clause of a select. dialect-specific compilers
# can modify this behavior.
render_label_with_as = (within_columns_clause and not
within_label_clause)
render_label_only = render_label_as_label is label
if render_label_only or render_label_with_as:
if isinstance(label.name, elements._truncated_label):
labelname = self._truncated_identifier("colident", label.name)
else:
labelname = label.name
if render_label_with_as:
if add_to_result_map is not None:
add_to_result_map(
labelname,
label.name,
(label, labelname, ) + label._alt_names,
label.type
)
return label.element._compiler_dispatch(
self, within_columns_clause=True,
within_label_clause=True, **kw) + \
OPERATORS[operators.as_] + \
self.preparer.format_label(label, labelname)
elif render_label_only:
return self.preparer.format_label(label, labelname)
else:
return label.element._compiler_dispatch(
self, within_columns_clause=False, **kw)
def _fallback_column_name(self, column):
raise exc.CompileError("Cannot compile Column object until "
"its 'name' is assigned.")
def visit_column(self, column, add_to_result_map=None,
include_table=True, **kwargs):
name = orig_name = column.name
if name is None:
name = self._fallback_column_name(column)
is_literal = column.is_literal
if not is_literal and isinstance(name, elements._truncated_label):
name = self._truncated_identifier("colident", name)
if add_to_result_map is not None:
add_to_result_map(
name,
orig_name,
(column, name, column.key),
column.type
)
if is_literal:
name = self.escape_literal_column(name)
else:
name = self.preparer.quote(name)
table = column.table
if table is None or not include_table or not table.named_with_column:
return name
else:
effective_schema = self.preparer.schema_for_object(table)
if effective_schema:
schema_prefix = self.preparer.quote_schema(
effective_schema) + '.'
else:
schema_prefix = ''
tablename = table.name
if isinstance(tablename, elements._truncated_label):
tablename = self._truncated_identifier("alias", tablename)
return schema_prefix + \
self.preparer.quote(tablename) + \
"." + name
def visit_fromclause(self, fromclause, **kwargs):
return fromclause.name
def visit_index(self, index, **kwargs):
return index.name
def visit_typeclause(self, typeclause, **kw):
kw['type_expression'] = typeclause
return self.dialect.type_compiler.process(typeclause.type, **kw)
def post_process_text(self, text):
if self.preparer._double_percents:
text = text.replace('%', '%%')
return text
def escape_literal_column(self, text):
if self.preparer._double_percents:
text = text.replace('%', '%%')
return text
def visit_textclause(self, textclause, **kw):
def do_bindparam(m):
name = m.group(1)
if name in textclause._bindparams:
return self.process(textclause._bindparams[name], **kw)
else:
return self.bindparam_string(name, **kw)
if not self.stack:
self.isplaintext = True
# un-escape any \:params
return BIND_PARAMS_ESC.sub(
lambda m: m.group(1),
BIND_PARAMS.sub(
do_bindparam,
self.post_process_text(textclause.text))
)
def visit_text_as_from(self, taf,
compound_index=None,
asfrom=False,
parens=True, **kw):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested', False)
if populate_result_map:
self._ordered_columns = \
self._textual_ordered_columns = taf.positional
for c in taf.column_args:
self.process(c, within_columns_clause=True,
add_to_result_map=self._add_to_result_map)
text = self.process(taf.element, **kw)
if asfrom and parens:
text = "(%s)" % text
return text
def visit_null(self, expr, **kw):
return 'NULL'
def visit_true(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'true'
else:
return "1"
def visit_false(self, expr, **kw):
if self.dialect.supports_native_boolean:
return 'false'
else:
return "0"
def visit_clauselist(self, clauselist, **kw):
sep = clauselist.operator
if sep is None:
sep = " "
else:
sep = OPERATORS[clauselist.operator]
return sep.join(
s for s in
(
c._compiler_dispatch(self, **kw)
for c in clauselist.clauses)
if s)
def visit_case(self, clause, **kwargs):
x = "CASE "
if clause.value is not None:
x += clause.value._compiler_dispatch(self, **kwargs) + " "
for cond, result in clause.whens:
x += "WHEN " + cond._compiler_dispatch(
self, **kwargs
) + " THEN " + result._compiler_dispatch(
self, **kwargs) + " "
if clause.else_ is not None:
x += "ELSE " + clause.else_._compiler_dispatch(
self, **kwargs
) + " "
x += "END"
return x
def visit_type_coerce(self, type_coerce, **kw):
return type_coerce.typed_expression._compiler_dispatch(self, **kw)
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(cast.clause._compiler_dispatch(self, **kwargs),
cast.typeclause._compiler_dispatch(self, **kwargs))
def _format_frame_clause(self, range_, **kw):
return '%s AND %s' % (
"UNBOUNDED PRECEDING"
if range_[0] is elements.RANGE_UNBOUNDED
else "CURRENT ROW" if range_[0] is elements.RANGE_CURRENT
else "%s PRECEDING" % (
self.process(elements.literal(abs(range_[0])), **kw), )
if range_[0] < 0
else "%s FOLLOWING" % (
self.process(elements.literal(range_[0]), **kw), ),
"UNBOUNDED FOLLOWING"
if range_[1] is elements.RANGE_UNBOUNDED
else "CURRENT ROW" if range_[1] is elements.RANGE_CURRENT
else "%s PRECEDING" % (
self.process(elements.literal(abs(range_[1])), **kw), )
if range_[1] < 0
else "%s FOLLOWING" % (
self.process(elements.literal(range_[1]), **kw), ),
)
def visit_over(self, over, **kwargs):
if over.range_:
range_ = "RANGE BETWEEN %s" % self._format_frame_clause(
over.range_, **kwargs)
elif over.rows:
range_ = "ROWS BETWEEN %s" % self._format_frame_clause(
over.rows, **kwargs)
else:
range_ = None
return "%s OVER (%s)" % (
over.element._compiler_dispatch(self, **kwargs),
' '.join([
'%s BY %s' % (
word, clause._compiler_dispatch(self, **kwargs)
)
for word, clause in (
('PARTITION', over.partition_by),
('ORDER', over.order_by)
)
if clause is not None and len(clause)
] + ([range_] if range_ else [])
)
)
def visit_withingroup(self, withingroup, **kwargs):
return "%s WITHIN GROUP (ORDER BY %s)" % (
withingroup.element._compiler_dispatch(self, **kwargs),
withingroup.order_by._compiler_dispatch(self, **kwargs)
)
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
funcfilter.criterion._compiler_dispatch(self, **kwargs)
)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
return "EXTRACT(%s FROM %s)" % (
field, extract.expr._compiler_dispatch(self, **kwargs))
def visit_function(self, func, add_to_result_map=None, **kwargs):
if add_to_result_map is not None:
add_to_result_map(
func.name, func.name, (), func.type
)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
if disp:
return disp(func, **kwargs)
else:
name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s")
return ".".join(list(func.packagenames) + [name]) % \
{'expr': self.function_argspec(func, **kwargs)}
def visit_next_value_func(self, next_value, **kw):
return self.visit_sequence(next_value.sequence)
def visit_sequence(self, sequence):
raise NotImplementedError(
"Dialect '%s' does not support sequence increments." %
self.dialect.name
)
def function_argspec(self, func, **kwargs):
return func.clause_expr._compiler_dispatch(self, **kwargs)
def visit_compound_select(self, cs, asfrom=False,
parens=True, compound_index=0, **kwargs):
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
need_result_map = toplevel or \
(compound_index == 0
and entry.get('need_result_map_for_compound', False))
self.stack.append(
{
'correlate_froms': entry['correlate_froms'],
'asfrom_froms': entry['asfrom_froms'],
'selectable': cs,
'need_result_map_for_compound': need_result_map
})
keyword = self.compound_keywords.get(cs.keyword)
text = (" " + keyword + " ").join(
(c._compiler_dispatch(self,
asfrom=asfrom, parens=False,
compound_index=i, **kwargs)
for i, c in enumerate(cs.selects))
)
group_by = cs._group_by_clause._compiler_dispatch(
self, asfrom=asfrom, **kwargs)
if group_by:
text += " GROUP BY " + group_by
text += self.order_by_clause(cs, **kwargs)
text += (cs._limit_clause is not None
or cs._offset_clause is not None) and \
self.limit_clause(cs, **kwargs) or ""
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom and parens:
return "(" + text + ")"
else:
return text
def _get_operator_dispatch(self, operator_, qualifier1, qualifier2):
attrname = "visit_%s_%s%s" % (
operator_.__name__, qualifier1,
"_" + qualifier2 if qualifier2 else "")
return getattr(self, attrname, None)
def visit_unary(self, unary, **kw):
if unary.operator:
if unary.modifier:
raise exc.CompileError(
"Unary expression does not support operator "
"and modifier simultaneously")
disp = self._get_operator_dispatch(
unary.operator, "unary", "operator")
if disp:
return disp(unary, unary.operator, **kw)
else:
return self._generate_generic_unary_operator(
unary, OPERATORS[unary.operator], **kw)
elif unary.modifier:
disp = self._get_operator_dispatch(
unary.modifier, "unary", "modifier")
if disp:
return disp(unary, unary.modifier, **kw)
else:
return self._generate_generic_unary_modifier(
unary, OPERATORS[unary.modifier], **kw)
else:
raise exc.CompileError(
"Unary expression has no operator or modifier")
def visit_istrue_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return self.process(element.element, **kw)
else:
return "%s = 1" % self.process(element.element, **kw)
def visit_isfalse_unary_operator(self, element, operator, **kw):
if self.dialect.supports_native_boolean:
return "NOT %s" % self.process(element.element, **kw)
else:
return "%s = 0" % self.process(element.element, **kw)
def visit_notmatch_op_binary(self, binary, operator, **kw):
return "NOT %s" % self.visit_binary(
binary, override_operator=operators.match_op)
def _emit_empty_in_warning(self):
util.warn(
'The IN-predicate was invoked with an '
'empty sequence. This results in a '
'contradiction, which nonetheless can be '
'expensive to evaluate. Consider alternative '
'strategies for improved performance.')
def visit_empty_in_op_binary(self, binary, operator, **kw):
if self.dialect._use_static_in:
return "1 != 1"
else:
if self.dialect._warn_on_empty_in:
self._emit_empty_in_warning()
return self.process(binary.left != binary.left)
def visit_empty_notin_op_binary(self, binary, operator, **kw):
if self.dialect._use_static_in:
return "1 = 1"
else:
if self.dialect._warn_on_empty_in:
self._emit_empty_in_warning()
return self.process(binary.left == binary.left)
def visit_binary(self, binary, override_operator=None,
eager_grouping=False, **kw):
# don't allow "? = ?" to render
if self.ansi_bind_rules and \
isinstance(binary.left, elements.BindParameter) and \
isinstance(binary.right, elements.BindParameter):
kw['literal_binds'] = True
operator_ = override_operator or binary.operator
disp = self._get_operator_dispatch(operator_, "binary", None)
if disp:
return disp(binary, operator_, **kw)
else:
try:
opstring = OPERATORS[operator_]
except KeyError:
raise exc.UnsupportedCompilationError(self, operator_)
else:
return self._generate_generic_binary(binary, opstring, **kw)
def visit_mod_binary(self, binary, operator, **kw):
if self.preparer._double_percents:
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
else:
return self.process(binary.left, **kw) + " % " + \
self.process(binary.right, **kw)
def visit_custom_op_binary(self, element, operator, **kw):
kw['eager_grouping'] = operator.eager_grouping
return self._generate_generic_binary(
element, " " + operator.opstring + " ", **kw)
def visit_custom_op_unary_operator(self, element, operator, **kw):
return self._generate_generic_unary_operator(
element, operator.opstring + " ", **kw)
def visit_custom_op_unary_modifier(self, element, operator, **kw):
return self._generate_generic_unary_modifier(
element, " " + operator.opstring, **kw)
def _generate_generic_binary(
self, binary, opstring, eager_grouping=False, **kw):
_in_binary = kw.get('_in_binary', False)
kw['_in_binary'] = True
text = binary.left._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw) + \
opstring + \
binary.right._compiler_dispatch(
self, eager_grouping=eager_grouping, **kw)
if _in_binary and eager_grouping:
text = "(%s)" % text
return text
def _generate_generic_unary_operator(self, unary, opstring, **kw):
return opstring + unary.element._compiler_dispatch(self, **kw)
def _generate_generic_unary_modifier(self, unary, opstring, **kw):
return unary.element._compiler_dispatch(self, **kw) + opstring
@util.memoized_property
def _like_percent_literal(self):
return elements.literal_column("'%'", type_=sqltypes.STRINGTYPE)
def visit_contains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notcontains_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right).__add__(percent)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_startswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notstartswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__radd__(
binary.right
)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_endswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_like_op_binary(binary, operator, **kw)
def visit_notendswith_op_binary(self, binary, operator, **kw):
binary = binary._clone()
percent = self._like_percent_literal
binary.right = percent.__add__(binary.right)
return self.visit_notlike_op_binary(binary, operator, **kw)
def visit_like_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
# TODO: use ternary here, not "and"/ "or"
return '%s LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notlike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT LIKE %s' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
return 'lower(%s) NOT LIKE lower(%s)' % (
binary.left._compiler_dispatch(self, **kw),
binary.right._compiler_dispatch(self, **kw)) \
+ (
' ESCAPE ' +
self.render_literal_value(escape, sqltypes.STRINGTYPE)
if escape else ''
)
def visit_between_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " BETWEEN SYMMETRIC "
if symmetric else " BETWEEN ", **kw)
def visit_notbetween_op_binary(self, binary, operator, **kw):
symmetric = binary.modifiers.get("symmetric", False)
return self._generate_generic_binary(
binary, " NOT BETWEEN SYMMETRIC "
if symmetric else " NOT BETWEEN ", **kw)
def visit_bindparam(self, bindparam, within_columns_clause=False,
literal_binds=False,
skip_bind_expression=False,
**kwargs):
if not skip_bind_expression and bindparam.type._has_bind_expression:
bind_expression = bindparam.type.bind_expression(bindparam)
return self.process(bind_expression,
skip_bind_expression=True)
if literal_binds or \
(within_columns_clause and
self.ansi_bind_rules):
if bindparam.value is None and bindparam.callable is None:
raise exc.CompileError("Bind parameter '%s' without a "
"renderable value not allowed here."
% bindparam.key)
return self.render_literal_bindparam(
bindparam, within_columns_clause=True, **kwargs)
name = self._truncate_bindparam(bindparam)
if name in self.binds:
existing = self.binds[name]
if existing is not bindparam:
if (existing.unique or bindparam.unique) and \
not existing.proxy_set.intersection(
bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" %
bindparam.key
)
elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
"clause of this "
"insert/update statement. Please use a "
"name other than column name when using bindparam() "
"with insert() or update() (for example, 'b_%s')." %
(bindparam.key, bindparam.key)
)
self.binds[bindparam.key] = self.binds[name] = bindparam
return self.bindparam_string(
name, expanding=bindparam.expanding, **kwargs)
def render_literal_bindparam(self, bindparam, **kw):
value = bindparam.effective_value
return self.render_literal_value(value, bindparam.type)
def render_literal_value(self, value, type_):
"""Render the value of a bind parameter as a quoted literal.
This is used for statement sections that do not accept bind parameters
on the target driver/database.
This should be implemented by subclasses using the quoting services
of the DBAPI.
"""
processor = type_._cached_literal_processor(self.dialect)
if processor:
return processor(value)
else:
raise NotImplementedError(
"Don't know how to literal-quote value %r" % value)
def _truncate_bindparam(self, bindparam):
if bindparam in self.bind_names:
return self.bind_names[bindparam]
bind_name = bindparam.key
if isinstance(bind_name, elements._truncated_label):
bind_name = self._truncated_identifier("bindparam", bind_name)
# add to bind_names for translation
self.bind_names[bindparam] = bind_name
return bind_name
def _truncated_identifier(self, ident_class, name):
if (ident_class, name) in self.truncated_names:
return self.truncated_names[(ident_class, name)]
anonname = name.apply_map(self.anon_map)
if len(anonname) > self.label_length - 6:
counter = self.truncated_names.get(ident_class, 1)
truncname = anonname[0:max(self.label_length - 6, 0)] + \
"_" + hex(counter)[2:]
self.truncated_names[ident_class] = counter + 1
else:
truncname = anonname
self.truncated_names[(ident_class, name)] = truncname
return truncname
def _anonymize(self, name):
return name % self.anon_map
def _process_anon(self, key):
(ident, derived) = key.split(' ', 1)
anonymous_counter = self.anon_map.get(derived, 1)
self.anon_map[derived] = anonymous_counter + 1
return derived + "_" + str(anonymous_counter)
def bindparam_string(
self, name, positional_names=None, expanding=False, **kw):
if self.positional:
if positional_names is not None:
positional_names.append(name)
else:
self.positiontup.append(name)
if expanding:
self.contains_expanding_parameters = True
return "([EXPANDING_%s])" % name
else:
return self.bindtemplate % {'name': name}
def visit_cte(self, cte, asfrom=False, ashint=False,
fromhints=None,
**kwargs):
self._init_cte_state()
if isinstance(cte.name, elements._truncated_label):
cte_name = self._truncated_identifier("alias", cte.name)
else:
cte_name = cte.name
if cte_name in self.ctes_by_name:
existing_cte = self.ctes_by_name[cte_name]
# we've generated a same-named CTE that we are enclosed in,
# or this is the same CTE. just return the name.
if cte in existing_cte._restates or cte is existing_cte:
return self.preparer.format_alias(cte, cte_name)
elif existing_cte in cte._restates:
# we've generated a same-named CTE that is
# enclosed in us - we take precedence, so
# discard the text for the "inner".
del self.ctes[existing_cte]
else:
raise exc.CompileError(
"Multiple, unrelated CTEs found with "
"the same name: %r" %
cte_name)
self.ctes_by_name[cte_name] = cte
# look for embedded DML ctes and propagate autocommit
if 'autocommit' in cte.element._execution_options and \
'autocommit' not in self.execution_options:
self.execution_options = self.execution_options.union(
{"autocommit": cte.element._execution_options['autocommit']})
if cte._cte_alias is not None:
orig_cte = cte._cte_alias
if orig_cte not in self.ctes:
self.visit_cte(orig_cte, **kwargs)
cte_alias_name = cte._cte_alias.name
if isinstance(cte_alias_name, elements._truncated_label):
cte_alias_name = self._truncated_identifier(
"alias", cte_alias_name)
else:
orig_cte = cte
cte_alias_name = None
if not cte_alias_name and cte not in self.ctes:
if cte.recursive:
self.ctes_recursive = True
text = self.preparer.format_alias(cte, cte_name)
if cte.recursive:
if isinstance(cte.original, selectable.Select):
col_source = cte.original
elif isinstance(cte.original, selectable.CompoundSelect):
col_source = cte.original.selects[0]
else:
assert False
recur_cols = [c for c in
util.unique_list(col_source.inner_columns)
if c is not None]
text += "(%s)" % (", ".join(
self.preparer.format_column(ident)
for ident in recur_cols))
if self.positional:
kwargs['positional_names'] = self.cte_positional[cte] = []
text += " AS \n" + \
cte.original._compiler_dispatch(
self, asfrom=True, **kwargs
)
if cte._suffixes:
text += " " + self._generate_prefixes(
cte, cte._suffixes, **kwargs)
self.ctes[cte] = text
if asfrom:
if cte_alias_name:
text = self.preparer.format_alias(cte, cte_alias_name)
text += self.get_render_as_alias_suffix(cte_name)
else:
return self.preparer.format_alias(cte, cte_name)
return text
def visit_alias(self, alias, asfrom=False, ashint=False,
iscrud=False,
fromhints=None, **kwargs):
if asfrom or ashint:
if isinstance(alias.name, elements._truncated_label):
alias_name = self._truncated_identifier("alias", alias.name)
else:
alias_name = alias.name
if ashint:
return self.preparer.format_alias(alias, alias_name)
elif asfrom:
ret = alias.original._compiler_dispatch(self,
asfrom=True, **kwargs) + \
self.get_render_as_alias_suffix(
self.preparer.format_alias(alias, alias_name))
if fromhints and alias in fromhints:
ret = self.format_from_hint_text(ret, alias,
fromhints[alias], iscrud)
return ret
else:
return alias.original._compiler_dispatch(self, **kwargs)
def visit_lateral(self, lateral, **kw):
kw['lateral'] = True
return "LATERAL %s" % self.visit_alias(lateral, **kw)
def visit_tablesample(self, tablesample, asfrom=False, **kw):
text = "%s TABLESAMPLE %s" % (
self.visit_alias(tablesample, asfrom=True, **kw),
tablesample._get_method()._compiler_dispatch(self, **kw))
if tablesample.seed is not None:
text += " REPEATABLE (%s)" % (
tablesample.seed._compiler_dispatch(self, **kw))
return text
def get_render_as_alias_suffix(self, alias_name_text):
return " AS " + alias_name_text
def _add_to_result_map(self, keyname, name, objects, type_):
self._result_columns.append((keyname, name, objects, type_))
def _label_select_column(self, select, column,
populate_result_map,
asfrom, column_clause_args,
name=None,
within_columns_clause=True):
"""produce labeled columns present in a select()."""
if column.type._has_column_expression and \
populate_result_map:
col_expr = column.type.column_expression(column)
add_to_result_map = lambda keyname, name, objects, type_: \
self._add_to_result_map(
keyname, name,
(column,) + objects, type_)
else:
col_expr = column
if populate_result_map:
add_to_result_map = self._add_to_result_map
else:
add_to_result_map = None
if not within_columns_clause:
result_expr = col_expr
elif isinstance(column, elements.Label):
if col_expr is not column:
result_expr = _CompileLabel(
col_expr,
column.name,
alt_names=(column.element,)
)
else:
result_expr = col_expr
elif select is not None and name:
result_expr = _CompileLabel(
col_expr,
name,
alt_names=(column._key_label,)
)
elif \
asfrom and \
isinstance(column, elements.ColumnClause) and \
not column.is_literal and \
column.table is not None and \
not isinstance(column.table, selectable.Select):
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
elif (
not isinstance(column, elements.TextClause) and
(
not isinstance(column, elements.UnaryExpression) or
column.wraps_column_expression
) and
(
not hasattr(column, 'name') or
isinstance(column, functions.Function)
)
):
result_expr = _CompileLabel(col_expr, column.anon_label)
elif col_expr is not column:
# TODO: are we sure "column" has a .name and .key here ?
# assert isinstance(column, elements.ColumnClause)
result_expr = _CompileLabel(col_expr,
elements._as_truncated(column.name),
alt_names=(column.key,))
else:
result_expr = col_expr
column_clause_args.update(
within_columns_clause=within_columns_clause,
add_to_result_map=add_to_result_map
)
return result_expr._compiler_dispatch(
self,
**column_clause_args
)
def format_from_hint_text(self, sqltext, table, hint, iscrud):
hinttext = self.get_from_hint_text(table, hint)
if hinttext:
sqltext += " " + hinttext
return sqltext
def get_select_hint_text(self, byfroms):
return None
def get_from_hint_text(self, table, text):
return None
def get_crud_hint_text(self, table, text):
return None
def get_statement_hint_text(self, hint_texts):
return " ".join(hint_texts)
def _transform_select_for_nested_joins(self, select):
"""Rewrite any "a JOIN (b JOIN c)" expression as
"a JOIN (select * from b JOIN c) AS anon", to support
databases that can't parse a parenthesized join correctly
(i.e. sqlite < 3.7.16).
"""
cloned = {}
column_translate = [{}]
def visit(element, **kw):
if element in column_translate[-1]:
return column_translate[-1][element]
elif element in cloned:
return cloned[element]
newelem = cloned[element] = element._clone()
if newelem.is_selectable and newelem._is_join and \
isinstance(newelem.right, selectable.FromGrouping):
newelem._reset_exported()
newelem.left = visit(newelem.left, **kw)
right = visit(newelem.right, **kw)
selectable_ = selectable.Select(
[right.element],
use_labels=True).alias()
for c in selectable_.c:
c._key_label = c.key
c._label = c.name
translate_dict = dict(
zip(newelem.right.element.c, selectable_.c)
)
# translating from both the old and the new
# because different select() structures will lead us
# to traverse differently
translate_dict[right.element.left] = selectable_
translate_dict[right.element.right] = selectable_
translate_dict[newelem.right.element.left] = selectable_
translate_dict[newelem.right.element.right] = selectable_
# propagate translations that we've gained
# from nested visit(newelem.right) outwards
# to the enclosing select here. this happens
# only when we have more than one level of right
# join nesting, i.e. "a JOIN (b JOIN (c JOIN d))"
for k, v in list(column_translate[-1].items()):
if v in translate_dict:
# remarkably, no current ORM tests (May 2013)
# hit this condition, only test_join_rewriting
# does.
column_translate[-1][k] = translate_dict[v]
column_translate[-1].update(translate_dict)
newelem.right = selectable_
newelem.onclause = visit(newelem.onclause, **kw)
elif newelem._is_from_container:
# if we hit an Alias, CompoundSelect or ScalarSelect, put a
# marker in the stack.
kw['transform_clue'] = 'select_container'
newelem._copy_internals(clone=visit, **kw)
elif newelem.is_selectable and newelem._is_select:
barrier_select = kw.get('transform_clue', None) == \
'select_container'
# if we're still descended from an
# Alias/CompoundSelect/ScalarSelect, we're
# in a FROM clause, so start with a new translate collection
if barrier_select:
column_translate.append({})
kw['transform_clue'] = 'inside_select'
newelem._copy_internals(clone=visit, **kw)
if barrier_select:
del column_translate[-1]
else:
newelem._copy_internals(clone=visit, **kw)
return newelem
return visit(select)
def _transform_result_map_for_nested_joins(
self, select, transformed_select):
inner_col = dict((c._key_label, c) for
c in transformed_select.inner_columns)
d = dict(
(inner_col[c._key_label], c)
for c in select.inner_columns
)
self._result_columns = [
(key, name, tuple([d.get(col, col) for col in objs]), typ)
for key, name, objs, typ in self._result_columns
]
_default_stack_entry = util.immutabledict([
('correlate_froms', frozenset()),
('asfrom_froms', frozenset())
])
def _display_froms_for_select(self, select, asfrom, lateral=False):
# utility method to help external dialects
# get the correct from list for a select.
# specifically the oracle dialect needs this feature
# right now.
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom and not lateral:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
return froms
def visit_select(self, select, asfrom=False, parens=True,
fromhints=None,
compound_index=0,
nested_join_translation=False,
select_wraps_for=None,
lateral=False,
**kwargs):
needs_nested_translation = \
select.use_labels and \
not nested_join_translation and \
not self.stack and \
not self.dialect.supports_right_nested_joins
if needs_nested_translation:
transformed_select = self._transform_select_for_nested_joins(
select)
text = self.visit_select(
transformed_select, asfrom=asfrom, parens=parens,
fromhints=fromhints,
compound_index=compound_index,
nested_join_translation=True, **kwargs
)
toplevel = not self.stack
entry = self._default_stack_entry if toplevel else self.stack[-1]
populate_result_map = toplevel or \
(
compound_index == 0 and entry.get(
'need_result_map_for_compound', False)
) or entry.get('need_result_map_for_nested', False)
# this was first proposed as part of #3372; however, it is not
# reached in current tests and could possibly be an assertion
# instead.
if not populate_result_map and 'add_to_result_map' in kwargs:
del kwargs['add_to_result_map']
if needs_nested_translation:
if populate_result_map:
self._transform_result_map_for_nested_joins(
select, transformed_select)
return text
froms = self._setup_select_stack(select, entry, asfrom, lateral)
column_clause_args = kwargs.copy()
column_clause_args.update({
'within_label_clause': False,
'within_columns_clause': False
})
text = "SELECT " # we're off to a good start !
if select._hints:
hint_text, byfrom = self._setup_select_hints(select)
if hint_text:
text += hint_text + " "
else:
byfrom = None
if select._prefixes:
text += self._generate_prefixes(
select, select._prefixes, **kwargs)
text += self.get_select_precolumns(select, **kwargs)
# the actual list of columns to print in the SELECT column list.
inner_columns = [
c for c in [
self._label_select_column(
select,
column,
populate_result_map, asfrom,
column_clause_args,
name=name)
for name, column in select._columns_plus_names
]
if c is not None
]
if populate_result_map and select_wraps_for is not None:
# if this select is a compiler-generated wrapper,
# rewrite the targeted columns in the result map
translate = dict(
zip(
[name for (key, name) in select._columns_plus_names],
[name for (key, name) in
select_wraps_for._columns_plus_names])
)
self._result_columns = [
(key, name, tuple(translate.get(o, o) for o in obj), type_)
for key, name, obj, type_ in self._result_columns
]
text = self._compose_select_body(
text, select, inner_columns, froms, byfrom, kwargs)
if select._statement_hints:
per_dialect = [
ht for (dialect_name, ht)
in select._statement_hints
if dialect_name in ('*', self.dialect.name)
]
if per_dialect:
text += " " + self.get_statement_hint_text(per_dialect)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
if select._suffixes:
text += " " + self._generate_prefixes(
select, select._suffixes, **kwargs)
self.stack.pop(-1)
if (asfrom or lateral) and parens:
return "(" + text + ")"
else:
return text
def _setup_select_hints(self, select):
byfrom = dict([
(from_, hinttext % {
'name': from_._compiler_dispatch(
self, ashint=True)
})
for (from_, dialect), hinttext in
select._hints.items()
if dialect in ('*', self.dialect.name)
])
hint_text = self.get_select_hint_text(byfrom)
return hint_text, byfrom
def _setup_select_stack(self, select, entry, asfrom, lateral):
correlate_froms = entry['correlate_froms']
asfrom_froms = entry['asfrom_froms']
if asfrom and not lateral:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms.difference(
asfrom_froms),
implicit_correlate_froms=())
else:
froms = select._get_display_froms(
explicit_correlate_froms=correlate_froms,
implicit_correlate_froms=asfrom_froms)
new_correlate_froms = set(selectable._from_objects(*froms))
all_correlate_froms = new_correlate_froms.union(correlate_froms)
new_entry = {
'asfrom_froms': new_correlate_froms,
'correlate_froms': all_correlate_froms,
'selectable': select,
}
self.stack.append(new_entry)
return froms
def _compose_select_body(
self, text, select, inner_columns, froms, byfrom, kwargs):
text += ', '.join(inner_columns)
if froms:
text += " \nFROM "
if select._hints:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True,
fromhints=byfrom, **kwargs)
for f in froms])
else:
text += ', '.join(
[f._compiler_dispatch(self, asfrom=True, **kwargs)
for f in froms])
else:
text += self.default_from()
if select._whereclause is not None:
t = select._whereclause._compiler_dispatch(self, **kwargs)
if t:
text += " \nWHERE " + t
if select._group_by_clause.clauses:
group_by = select._group_by_clause._compiler_dispatch(
self, **kwargs)
if group_by:
text += " GROUP BY " + group_by
if select._having is not None:
t = select._having._compiler_dispatch(self, **kwargs)
if t:
text += " \nHAVING " + t
if select._order_by_clause.clauses:
text += self.order_by_clause(select, **kwargs)
if (select._limit_clause is not None or
select._offset_clause is not None):
text += self.limit_clause(select, **kwargs)
if select._for_update_arg is not None:
text += self.for_update_clause(select, **kwargs)
return text
def _generate_prefixes(self, stmt, prefixes, **kw):
clause = " ".join(
prefix._compiler_dispatch(self, **kw)
for prefix, dialect_name in prefixes
if dialect_name is None or
dialect_name == self.dialect.name
)
if clause:
clause += " "
return clause
def _render_cte_clause(self):
if self.positional:
self.positiontup = sum([
self.cte_positional[cte]
for cte in self.ctes], []) + \
self.positiontup
cte_text = self.get_cte_preamble(self.ctes_recursive) + " "
cte_text += ", \n".join(
[txt for txt in self.ctes.values()]
)
cte_text += "\n "
return cte_text
def get_cte_preamble(self, recursive):
if recursive:
return "WITH RECURSIVE"
else:
return "WITH"
def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list.
"""
return select._distinct and "DISTINCT " or ""
def order_by_clause(self, select, **kw):
order_by = select._order_by_clause._compiler_dispatch(self, **kw)
if order_by:
return " ORDER BY " + order_by
else:
return ""
def for_update_clause(self, select, **kw):
return " FOR UPDATE"
def returning_clause(self, stmt, returning_cols):
raise exc.CompileError(
"RETURNING is not supported by this "
"dialect's statement compiler.")
def limit_clause(self, select, **kw):
text = ""
if select._limit_clause is not None:
text += "\n LIMIT " + self.process(select._limit_clause, **kw)
if select._offset_clause is not None:
if select._limit_clause is None:
text += "\n LIMIT -1"
text += " OFFSET " + self.process(select._offset_clause, **kw)
return text
def visit_table(self, table, asfrom=False, iscrud=False, ashint=False,
fromhints=None, use_schema=True, **kwargs):
if asfrom or ashint:
effective_schema = self.preparer.schema_for_object(table)
if use_schema and effective_schema:
ret = self.preparer.quote_schema(effective_schema) + \
"." + self.preparer.quote(table.name)
else:
ret = self.preparer.quote(table.name)
if fromhints and table in fromhints:
ret = self.format_from_hint_text(ret, table,
fromhints[table], iscrud)
return ret
else:
return ""
def visit_join(self, join, asfrom=False, **kwargs):
if join.full:
join_type = " FULL OUTER JOIN "
elif join.isouter:
join_type = " LEFT OUTER JOIN "
else:
join_type = " JOIN "
return (
join.left._compiler_dispatch(self, asfrom=True, **kwargs) +
join_type +
join.right._compiler_dispatch(self, asfrom=True, **kwargs) +
" ON " +
join.onclause._compiler_dispatch(self, **kwargs)
)
def _setup_crud_hints(self, stmt, table_text):
dialect_hints = dict([
(table, hint_text)
for (table, dialect), hint_text in
stmt._hints.items()
if dialect in ('*', self.dialect.name)
])
if stmt.table in dialect_hints:
table_text = self.format_from_hint_text(
table_text,
stmt.table,
dialect_hints[stmt.table],
True
)
return dialect_hints, table_text
def visit_insert(self, insert_stmt, asfrom=False, **kw):
toplevel = not self.stack
self.stack.append(
{'correlate_froms': set(),
"asfrom_froms": set(),
"selectable": insert_stmt})
crud_params = crud._setup_crud_params(
self, insert_stmt, crud.ISINSERT, **kw)
if not crud_params and \
not self.dialect.supports_default_values and \
not self.dialect.supports_empty_insert:
raise exc.CompileError("The '%s' dialect with current database "
"version settings does not support empty "
"inserts." %
self.dialect.name)
if insert_stmt._has_multi_parameters:
if not self.dialect.supports_multivalues_insert:
raise exc.CompileError(
"The '%s' dialect with current database "
"version settings does not support "
"in-place multirow inserts." %
self.dialect.name)
crud_params_single = crud_params[0]
else:
crud_params_single = crud_params
preparer = self.preparer
supports_default_values = self.dialect.supports_default_values
text = "INSERT "
if insert_stmt._prefixes:
text += self._generate_prefixes(insert_stmt,
insert_stmt._prefixes, **kw)
text += "INTO "
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
insert_stmt, table_text)
else:
dialect_hints = None
text += table_text
if crud_params_single or not supports_default_values:
text += " (%s)" % ', '.join([preparer.format_column(c[0])
for c in crud_params_single])
if self.returning or insert_stmt._returning:
returning_clause = self.returning_clause(
insert_stmt, self.returning or insert_stmt._returning)
if self.returning_precedes_values:
text += " " + returning_clause
else:
returning_clause = None
if insert_stmt.select is not None:
text += " %s" % self.process(self._insert_from_select, **kw)
elif not crud_params and supports_default_values:
text += " DEFAULT VALUES"
elif insert_stmt._has_multi_parameters:
text += " VALUES %s" % (
", ".join(
"(%s)" % (
', '.join(c[1] for c in crud_param_set)
)
for crud_param_set in crud_params
)
)
else:
text += " VALUES (%s)" % \
', '.join([c[1] for c in crud_params])
if insert_stmt._post_values_clause is not None:
post_values_clause = self.process(
insert_stmt._post_values_clause, **kw)
if post_values_clause:
text += " " + post_values_clause
if returning_clause and not self.returning_precedes_values:
text += " " + returning_clause
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom:
return "(" + text + ")"
else:
return text
def update_limit_clause(self, update_stmt):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def update_tables_clause(self, update_stmt, from_table,
extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
MySQL overrides this.
"""
kw['asfrom'] = True
return from_table._compiler_dispatch(self, iscrud=True, **kw)
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Provide a hook to override the generation of an
UPDATE..FROM clause.
MySQL and MSSQL override this.
"""
raise NotImplementedError(
"This backend does not support multiple-table "
"criteria within UPDATE")
def visit_update(self, update_stmt, asfrom=False, **kw):
toplevel = not self.stack
self.stack.append(
{'correlate_froms': {update_stmt.table},
"asfrom_froms": {update_stmt.table},
"selectable": update_stmt})
extra_froms = update_stmt._extra_froms
text = "UPDATE "
if update_stmt._prefixes:
text += self._generate_prefixes(update_stmt,
update_stmt._prefixes, **kw)
table_text = self.update_tables_clause(update_stmt, update_stmt.table,
extra_froms, **kw)
crud_params = crud._setup_crud_params(
self, update_stmt, crud.ISUPDATE, **kw)
if update_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
update_stmt, table_text)
else:
dialect_hints = None
text += table_text
text += ' SET '
include_table = extra_froms and \
self.render_table_with_column_in_update_from
text += ', '.join(
c[0]._compiler_dispatch(self,
include_table=include_table) +
'=' + c[1] for c in crud_params
)
if self.returning or update_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning or update_stmt._returning)
if extra_froms:
extra_from_text = self.update_from_clause(
update_stmt,
update_stmt.table,
extra_froms,
dialect_hints, **kw)
if extra_from_text:
text += " " + extra_from_text
if update_stmt._whereclause is not None:
t = self.process(update_stmt._whereclause, **kw)
if t:
text += " WHERE " + t
limit_clause = self.update_limit_clause(update_stmt)
if limit_clause:
text += " " + limit_clause
if (self.returning or update_stmt._returning) and \
not self.returning_precedes_values:
text += " " + self.returning_clause(
update_stmt, self.returning or update_stmt._returning)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom:
return "(" + text + ")"
else:
return text
@util.memoized_property
def _key_getters_for_crud_column(self):
return crud._key_getters_for_crud_column(self, self.statement)
def delete_extra_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints, **kw):
"""Provide a hook to override the generation of an
DELETE..FROM clause.
This can be used to implement DELETE..USING for example.
MySQL and MSSQL override this.
"""
raise NotImplementedError(
"This backend does not support multiple-table "
"criteria within DELETE")
def delete_table_clause(self, delete_stmt, from_table,
extra_froms):
return from_table._compiler_dispatch(self, asfrom=True, iscrud=True)
def visit_delete(self, delete_stmt, asfrom=False, **kw):
toplevel = not self.stack
self.stack.append({'correlate_froms': {delete_stmt.table},
"asfrom_froms": {delete_stmt.table},
"selectable": delete_stmt})
crud._setup_crud_params(self, delete_stmt, crud.ISDELETE, **kw)
extra_froms = delete_stmt._extra_froms
text = "DELETE "
if delete_stmt._prefixes:
text += self._generate_prefixes(delete_stmt,
delete_stmt._prefixes, **kw)
text += "FROM "
table_text = self.delete_table_clause(delete_stmt, delete_stmt.table,
extra_froms)
if delete_stmt._hints:
dialect_hints, table_text = self._setup_crud_hints(
delete_stmt, table_text)
else:
dialect_hints = None
text += table_text
if delete_stmt._returning:
if self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
if extra_froms:
extra_from_text = self.delete_extra_from_clause(
delete_stmt,
delete_stmt.table,
extra_froms,
dialect_hints, **kw)
if extra_from_text:
text += " " + extra_from_text
if delete_stmt._whereclause is not None:
t = delete_stmt._whereclause._compiler_dispatch(self, **kw)
if t:
text += " WHERE " + t
if delete_stmt._returning and not self.returning_precedes_values:
text += " " + self.returning_clause(
delete_stmt, delete_stmt._returning)
if self.ctes and toplevel:
text = self._render_cte_clause() + text
self.stack.pop(-1)
if asfrom:
return "(" + text + ")"
else:
return text
def visit_savepoint(self, savepoint_stmt):
return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return "ROLLBACK TO SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
def visit_release_savepoint(self, savepoint_stmt):
return "RELEASE SAVEPOINT %s" % \
self.preparer.format_savepoint(savepoint_stmt)
class StrSQLCompiler(SQLCompiler):
""""a compiler subclass with a few non-standard SQL features allowed.
Used for stringification of SQL statements when a real dialect is not
available.
"""
def _fallback_column_name(self, column):
return "<name unknown>"
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def returning_clause(self, stmt, returning_cols):
columns = [
self._label_select_column(None, c, True, False, {})
for c in elements._select_iterables(returning_cols)
]
return 'RETURNING ' + ', '.join(columns)
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in extra_froms)
def delete_extra_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
return ', ' + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in extra_froms)
class DDLCompiler(Compiled):
@util.memoized_property
def sql_compiler(self):
return self.dialect.statement_compiler(self.dialect, None)
@util.memoized_property
def type_compiler(self):
return self.dialect.type_compiler
def construct_params(self, params=None):
return None
def visit_ddl(self, ddl, **kwargs):
# table events can substitute table and schema name
context = ddl.context
if isinstance(ddl.target, schema.Table):
context = context.copy()
preparer = self.preparer
path = preparer.format_table_seq(ddl.target)
if len(path) == 1:
table, sch = path[0], ''
else:
table, sch = path[-1], path[0]
context.setdefault('table', table)
context.setdefault('schema', sch)
context.setdefault('fullname', preparer.format_table(ddl.target))
return self.sql_compiler.post_process_text(ddl.statement % context)
def visit_create_schema(self, create):
schema = self.preparer.format_schema(create.element)
return "CREATE SCHEMA " + schema
def visit_drop_schema(self, drop):
schema = self.preparer.format_schema(drop.element)
text = "DROP SCHEMA " + schema
if drop.cascade:
text += " CASCADE"
return text
def visit_create_table(self, create):
table = create.element
preparer = self.preparer
text = "\nCREATE "
if table._prefixes:
text += " ".join(table._prefixes) + " "
text += "TABLE " + preparer.format_table(table) + " "
create_table_suffix = self.create_table_suffix(table)
if create_table_suffix:
text += create_table_suffix + " "
text += "("
separator = "\n"
# if only one primary key, specify it along with the column
first_pk = False
for create_column in create.columns:
column = create_column.element
try:
processed = self.process(create_column,
first_pk=column.primary_key
and not first_pk)
if processed is not None:
text += separator
separator = ", \n"
text += "\t" + processed
if column.primary_key:
first_pk = True
except exc.CompileError as ce:
util.raise_from_cause(
exc.CompileError(
util.u("(in table '%s', column '%s'): %s") %
(table.description, column.name, ce.args[0])
))
const = self.create_table_constraints(
table, _include_foreign_key_constraints= # noqa
create.include_foreign_key_constraints)
if const:
text += separator + "\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
def visit_create_column(self, create, first_pk=False):
column = create.element
if column.system:
return None
text = self.get_column_specification(
column,
first_pk=first_pk
)
const = " ".join(self.process(constraint)
for constraint in column.constraints)
if const:
text += " " + const
return text
def create_table_constraints(
self, table,
_include_foreign_key_constraints=None):
# On some DB order is significant: visit PK first, then the
# other constraints (engine.ReflectionTest.testbasic failed on FB2)
constraints = []
if table.primary_key:
constraints.append(table.primary_key)
all_fkcs = table.foreign_key_constraints
if _include_foreign_key_constraints is not None:
omit_fkcs = all_fkcs.difference(_include_foreign_key_constraints)
else:
omit_fkcs = set()
constraints.extend([c for c in table._sorted_constraints
if c is not table.primary_key and
c not in omit_fkcs])
return ", \n\t".join(
p for p in
(self.process(constraint)
for constraint in constraints
if (
constraint._create_rule is None or
constraint._create_rule(self))
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
)) if p is not None
)
def visit_drop_table(self, drop):
return "\nDROP TABLE " + self.preparer.format_table(drop.element)
def visit_drop_view(self, drop):
return "\nDROP VIEW " + self.preparer.format_table(drop.element)
def _verify_index_table(self, index):
if index.table is None:
raise exc.CompileError("Index '%s' is not associated "
"with any table." % index.name)
def visit_create_index(self, create, include_schema=False,
include_table_schema=True):
index = create.element
self._verify_index_table(index)
preparer = self.preparer
text = "CREATE "
if index.unique:
text += "UNIQUE "
text += "INDEX %s ON %s (%s)" \
% (
self._prepared_index_name(index,
include_schema=include_schema),
preparer.format_table(index.table,
use_schema=include_table_schema),
', '.join(
self.sql_compiler.process(
expr, include_table=False, literal_binds=True) for
expr in index.expressions)
)
return text
def visit_drop_index(self, drop):
index = drop.element
return "\nDROP INDEX " + self._prepared_index_name(
index, include_schema=True)
def _prepared_index_name(self, index, include_schema=False):
if index.table is not None:
effective_schema = self.preparer.schema_for_object(index.table)
else:
effective_schema = None
if include_schema and effective_schema:
schema_name = self.preparer.quote_schema(effective_schema)
else:
schema_name = None
ident = index.name
if isinstance(ident, elements._truncated_label):
max_ = self.dialect.max_index_name_length or \
self.dialect.max_identifier_length
if len(ident) > max_:
ident = ident[0:max_ - 8] + \
"_" + util.md5_hex(ident)[-4:]
else:
self.dialect.validate_identifier(ident)
index_name = self.preparer.quote(ident)
if schema_name:
index_name = schema_name + "." + index_name
return index_name
def visit_add_constraint(self, create):
return "ALTER TABLE %s ADD %s" % (
self.preparer.format_table(create.element.table),
self.process(create.element)
)
def visit_set_table_comment(self, create):
return "COMMENT ON TABLE %s IS %s" % (
self.preparer.format_table(create.element),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String())
)
def visit_drop_table_comment(self, drop):
return "COMMENT ON TABLE %s IS NULL" % \
self.preparer.format_table(drop.element)
def visit_set_column_comment(self, create):
return "COMMENT ON COLUMN %s IS %s" % (
self.preparer.format_column(
create.element, use_table=True, use_schema=True),
self.sql_compiler.render_literal_value(
create.element.comment, sqltypes.String())
)
def visit_drop_column_comment(self, drop):
return "COMMENT ON COLUMN %s IS NULL" % \
self.preparer.format_column(drop.element, use_table=True)
def visit_create_sequence(self, create):
text = "CREATE SEQUENCE %s" % \
self.preparer.format_sequence(create.element)
if create.element.increment is not None:
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
if create.element.minvalue is not None:
text += " MINVALUE %d" % create.element.minvalue
if create.element.maxvalue is not None:
text += " MAXVALUE %d" % create.element.maxvalue
if create.element.nominvalue is not None:
text += " NO MINVALUE"
if create.element.nomaxvalue is not None:
text += " NO MAXVALUE"
if create.element.cache is not None:
text += " CACHE %d" % create.element.cache
if create.element.order is True:
text += " ORDER"
if create.element.cycle is not None:
text += " CYCLE"
return text
def visit_drop_sequence(self, drop):
return "DROP SEQUENCE %s" % \
self.preparer.format_sequence(drop.element)
def visit_drop_constraint(self, drop):
constraint = drop.element
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
else:
formatted_name = None
if formatted_name is None:
raise exc.CompileError(
"Can't emit DROP CONSTRAINT for constraint %r; "
"it has no name" % drop.element)
return "ALTER TABLE %s DROP CONSTRAINT %s%s" % (
self.preparer.format_table(drop.element.table),
formatted_name,
drop.cascade and " CASCADE" or ""
)
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
self.dialect.type_compiler.process(
column.type, type_expression=column)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def create_table_suffix(self, table):
return ''
def post_create_table(self, table):
return ''
def get_column_default_string(self, column):
if isinstance(column.server_default, schema.DefaultClause):
if isinstance(column.server_default.arg, util.string_types):
return self.sql_compiler.render_literal_value(
column.server_default.arg, sqltypes.STRINGTYPE)
else:
return self.sql_compiler.process(
column.server_default.arg, literal_binds=True)
else:
return None
def visit_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext,
include_table=False,
literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
def visit_column_check_constraint(self, constraint):
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "CHECK (%s)" % self.sql_compiler.process(constraint.sqltext,
include_table=False,
literal_binds=True)
text += self.define_constraint_deferrability(constraint)
return text
def visit_primary_key_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
for c in (constraint.columns_autoinc_first
if constraint._implicit_generated
else constraint.columns))
text += self.define_constraint_deferrability(constraint)
return text
def visit_foreign_key_constraint(self, constraint):
preparer = self.preparer
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
if formatted_name is not None:
text += "CONSTRAINT %s " % formatted_name
remote_table = list(constraint.elements)[0].column.table
text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % (
', '.join(preparer.quote(f.parent.name)
for f in constraint.elements),
self.define_constraint_remote_table(
constraint, remote_table, preparer),
', '.join(preparer.quote(f.column.name)
for f in constraint.elements)
)
text += self.define_constraint_match(constraint)
text += self.define_constraint_cascades(constraint)
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_remote_table(self, constraint, table, preparer):
"""Format the remote table clause of a CREATE CONSTRAINT clause."""
return preparer.format_table(table)
def visit_unique_constraint(self, constraint):
if len(constraint) == 0:
return ''
text = ""
if constraint.name is not None:
formatted_name = self.preparer.format_constraint(constraint)
text += "CONSTRAINT %s " % formatted_name
text += "UNIQUE (%s)" % (
', '.join(self.preparer.quote(c.name)
for c in constraint))
text += self.define_constraint_deferrability(constraint)
return text
def define_constraint_cascades(self, constraint):
text = ""
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
if constraint.onupdate is not None:
text += " ON UPDATE %s" % constraint.onupdate
return text
def define_constraint_deferrability(self, constraint):
text = ""
if constraint.deferrable is not None:
if constraint.deferrable:
text += " DEFERRABLE"
else:
text += " NOT DEFERRABLE"
if constraint.initially is not None:
text += " INITIALLY %s" % constraint.initially
return text
def define_constraint_match(self, constraint):
text = ""
if constraint.match is not None:
text += " MATCH %s" % constraint.match
return text
class GenericTypeCompiler(TypeCompiler):
def visit_FLOAT(self, type_, **kw):
return "FLOAT"
def visit_REAL(self, type_, **kw):
return "REAL"
def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return "NUMERIC"
elif type_.scale is None:
return "NUMERIC(%(precision)s)" % \
{'precision': type_.precision}
else:
return "NUMERIC(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return "DECIMAL"
elif type_.scale is None:
return "DECIMAL(%(precision)s)" % \
{'precision': type_.precision}
else:
return "DECIMAL(%(precision)s, %(scale)s)" % \
{'precision': type_.precision,
'scale': type_.scale}
def visit_INTEGER(self, type_, **kw):
return "INTEGER"
def visit_SMALLINT(self, type_, **kw):
return "SMALLINT"
def visit_BIGINT(self, type_, **kw):
return "BIGINT"
def visit_TIMESTAMP(self, type_, **kw):
return 'TIMESTAMP'
def visit_DATETIME(self, type_, **kw):
return "DATETIME"
def visit_DATE(self, type_, **kw):
return "DATE"
def visit_TIME(self, type_, **kw):
return "TIME"
def visit_CLOB(self, type_, **kw):
return "CLOB"
def visit_NCLOB(self, type_, **kw):
return "NCLOB"
def _render_string_type(self, type_, name):
text = name
if type_.length:
text += "(%d)" % type_.length
if type_.collation:
text += ' COLLATE "%s"' % type_.collation
return text
def visit_CHAR(self, type_, **kw):
return self._render_string_type(type_, "CHAR")
def visit_NCHAR(self, type_, **kw):
return self._render_string_type(type_, "NCHAR")
def visit_VARCHAR(self, type_, **kw):
return self._render_string_type(type_, "VARCHAR")
def visit_NVARCHAR(self, type_, **kw):
return self._render_string_type(type_, "NVARCHAR")
def visit_TEXT(self, type_, **kw):
return self._render_string_type(type_, "TEXT")
def visit_BLOB(self, type_, **kw):
return "BLOB"
def visit_BINARY(self, type_, **kw):
return "BINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY" + (type_.length and "(%d)" % type_.length or "")
def visit_BOOLEAN(self, type_, **kw):
return "BOOLEAN"
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_, **kw)
def visit_boolean(self, type_, **kw):
return self.visit_BOOLEAN(type_, **kw)
def visit_time(self, type_, **kw):
return self.visit_TIME(type_, **kw)
def visit_datetime(self, type_, **kw):
return self.visit_DATETIME(type_, **kw)
def visit_date(self, type_, **kw):
return self.visit_DATE(type_, **kw)
def visit_big_integer(self, type_, **kw):
return self.visit_BIGINT(type_, **kw)
def visit_small_integer(self, type_, **kw):
return self.visit_SMALLINT(type_, **kw)
def visit_integer(self, type_, **kw):
return self.visit_INTEGER(type_, **kw)
def visit_real(self, type_, **kw):
return self.visit_REAL(type_, **kw)
def visit_float(self, type_, **kw):
return self.visit_FLOAT(type_, **kw)
def visit_numeric(self, type_, **kw):
return self.visit_NUMERIC(type_, **kw)
def visit_string(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_unicode(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_unicode_text(self, type_, **kw):
return self.visit_TEXT(type_, **kw)
def visit_enum(self, type_, **kw):
return self.visit_VARCHAR(type_, **kw)
def visit_null(self, type_, **kw):
raise exc.CompileError("Can't generate DDL for %r; "
"did you forget to specify a "
"type on this Column?" % type_)
def visit_type_decorator(self, type_, **kw):
return self.process(type_.type_engine(self.dialect), **kw)
def visit_user_defined(self, type_, **kw):
return type_.get_col_spec(**kw)
class StrSQLTypeCompiler(GenericTypeCompiler):
def __getattr__(self, key):
if key.startswith("visit_"):
return self._visit_unknown
else:
raise AttributeError(key)
def _visit_unknown(self, type_, **kw):
return "%s" % type_.__class__.__name__
class IdentifierPreparer(object):
"""Handle quoting and case-folding of identifiers based on options."""
reserved_words = RESERVED_WORDS
legal_characters = LEGAL_CHARACTERS
illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS
schema_for_object = schema._schema_getter(None)
def __init__(self, dialect, initial_quote='"',
final_quote=None, escape_quote='"', omit_schema=False):
"""Construct a new ``IdentifierPreparer`` object.
initial_quote
Character that begins a delimited identifier.
final_quote
Character that ends a delimited identifier. Defaults to
`initial_quote`.
omit_schema
Prevent prepending schema name. Useful for databases that do
not support schemae.
"""
self.dialect = dialect
self.initial_quote = initial_quote
self.final_quote = final_quote or self.initial_quote
self.escape_quote = escape_quote
self.escape_to_quote = self.escape_quote * 2
self.omit_schema = omit_schema
self._strings = {}
self._double_percents = self.dialect.paramstyle in ('format', 'pyformat')
def _with_schema_translate(self, schema_translate_map):
prep = self.__class__.__new__(self.__class__)
prep.__dict__.update(self.__dict__)
prep.schema_for_object = schema._schema_getter(schema_translate_map)
return prep
def _escape_identifier(self, value):
"""Escape an identifier.
Subclasses should override this to provide database-dependent
escaping behavior.
"""
value = value.replace(self.escape_quote, self.escape_to_quote)
if self._double_percents:
value = value.replace('%', '%%')
return value
def _unescape_identifier(self, value):
"""Canonicalize an escaped identifier.
Subclasses should override this to provide database-dependent
unescaping behavior that reverses _escape_identifier.
"""
return value.replace(self.escape_to_quote, self.escape_quote)
def quote_identifier(self, value):
"""Quote an identifier.
Subclasses should override this to provide database-dependent
quoting behavior.
"""
return self.initial_quote + \
self._escape_identifier(value) + \
self.final_quote
def _requires_quotes(self, value):
"""Return True if the given identifier requires quoting."""
lc_value = value.lower()
return (lc_value in self.reserved_words
or value[0] in self.illegal_initial_characters
or not self.legal_characters.match(util.text_type(value))
or (lc_value != value))
def quote_schema(self, schema, force=None):
"""Conditionally quote a schema.
Subclasses can override this to provide database-dependent
quoting behavior for schema names.
the 'force' flag should be considered deprecated.
"""
return self.quote(schema, force)
def quote(self, ident, force=None):
"""Conditionally quote an identifier.
the 'force' flag should be considered deprecated.
"""
force = getattr(ident, "quote", None)
if force is None:
if ident in self._strings:
return self._strings[ident]
else:
if self._requires_quotes(ident):
self._strings[ident] = self.quote_identifier(ident)
else:
self._strings[ident] = ident
return self._strings[ident]
elif force:
return self.quote_identifier(ident)
else:
return ident
def format_sequence(self, sequence, use_schema=True):
name = self.quote(sequence.name)
effective_schema = self.schema_for_object(sequence)
if (not self.omit_schema and use_schema and
effective_schema is not None):
name = self.quote_schema(effective_schema) + "." + name
return name
def format_label(self, label, name=None):
return self.quote(name or label.name)
def format_alias(self, alias, name=None):
return self.quote(name or alias.name)
def format_savepoint(self, savepoint, name=None):
# Running the savepoint name through quoting is unnecessary
# for all known dialects. This is here to support potential
# third party use cases
ident = name or savepoint.ident
if self._requires_quotes(ident):
ident = self.quote_identifier(ident)
return ident
@util.dependencies("sqlalchemy.sql.naming")
def format_constraint(self, naming, constraint):
if isinstance(constraint.name, elements._defer_name):
name = naming._constraint_name_for_table(
constraint, constraint.table)
if name:
return self.quote(name)
elif isinstance(constraint.name, elements._defer_none_name):
return None
return self.quote(constraint.name)
def format_table(self, table, use_schema=True, name=None):
"""Prepare a quoted table and schema name."""
if name is None:
name = table.name
result = self.quote(name)
effective_schema = self.schema_for_object(table)
if not self.omit_schema and use_schema \
and effective_schema:
result = self.quote_schema(effective_schema) + "." + result
return result
def format_schema(self, name, quote=None):
"""Prepare a quoted schema name."""
return self.quote(name, quote)
def format_column(self, column, use_table=False,
name=None, table_name=None, use_schema=False):
"""Prepare a quoted column name."""
if name is None:
name = column.name
if not getattr(column, 'is_literal', False):
if use_table:
return self.format_table(
column.table, use_schema=use_schema,
name=table_name) + "." + self.quote(name)
else:
return self.quote(name)
else:
# literal textual elements get stuck into ColumnClause a lot,
# which shouldn't get quoted
if use_table:
return self.format_table(
column.table, use_schema=use_schema,
name=table_name) + '.' + name
else:
return name
def format_table_seq(self, table, use_schema=True):
"""Format table name and schema as a tuple."""
# Dialects with more levels in their fully qualified references
# ('database', 'owner', etc.) could override this and return
# a longer sequence.
effective_schema = self.schema_for_object(table)
if not self.omit_schema and use_schema and \
effective_schema:
return (self.quote_schema(effective_schema),
self.format_table(table, use_schema=False))
else:
return (self.format_table(table, use_schema=False), )
@util.memoized_property
def _r_identifiers(self):
initial, final, escaped_final = \
[re.escape(s) for s in
(self.initial_quote, self.final_quote,
self._escape_identifier(self.final_quote))]
r = re.compile(
r'(?:'
r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s'
r'|([^\.]+))(?=\.|$))+' %
{'initial': initial,
'final': final,
'escaped': escaped_final})
return r
def unformat_identifiers(self, identifiers):
"""Unpack 'schema.table.column'-like strings into components."""
r = self._r_identifiers
return [self._unescape_identifier(i)
for i in [a or b for a, b in r.findall(identifiers)]]
|
Haynie-Research-and-Development/jarvis
|
deps/lib/python3.4/site-packages/sqlalchemy/sql/compiler.py
|
Python
|
gpl-2.0
| 113,402
|
[
"VisIt"
] |
e4e803bab48211590ad062ea20655cf6b5616dee9224dfd0333731199f2c956b
|
#
# LAMMPS.py
#
# Interface to LAMMPS (http://lammps.sandia.gov)
#
# Copyright (c) 2017 Terumasa Tadano
#
# This file is distributed under the terms of the MIT license.
# Please see the file 'LICENCE.txt' in the root directory
# or http://opensource.org/licenses/mit-license.php for information.
#
import numpy as np
def read_lammps_structure(file_in):
f = open(file_in, 'r')
header_comment = f.readline()
common_settings = []
for line in f:
if "Atoms" in line:
break
common_settings.append(line.rstrip())
atoms = []
for line in f:
if line.strip():
atoms.append(line.rstrip().split())
atoms = np.array(atoms)
nat = len(atoms)
kd = np.array(atoms[:, 1], dtype=np.int)
x = np.array(atoms[:, 2:5], dtype=np.float64)
return common_settings, nat, x, kd
def write_lammps_structure(prefix, counter, header, nzerofills,
common_settings, nat, kd, x_cart, disp):
filename = prefix + str(counter).zfill(nzerofills) + ".lammps"
f = open(filename, 'w')
f.write("%s\n" % header)
for line in common_settings:
f.write("%s\n" % line)
f.write("%s\n\n" % "Atoms")
for i in range(nat):
f.write("%5d %3d" % (i + 1, kd[i]))
for j in range(3):
f.write("%20.15f" % (x_cart[i][j] + disp[i][j]))
f.write("\n")
f.write("\n")
f.close()
def get_coordinate_LAMMPS(lammps_dump_file):
add_flag = False
coord = []
with open(lammps_dump_file) as f:
for line in f:
if "ITEM:" in line and "ITEM: ATOMS id xu yu zu" not in line:
add_flag = False
continue
elif "ITEM: ATOMS id xu yu zu" in line:
add_flag = True
continue
if add_flag:
if line.strip():
entries = line.strip().split()
coord_atom = [int(entries[0]), [float(t) for t in entries[1:]]]
coord.append(coord_atom)
# This sort is necessary since the order atoms of LAMMPS dump files
# may change from the input structure file.
coord_sorted = sorted(coord)
coord = []
for coord_atom in coord_sorted:
coord.extend(coord_atom[1])
return np.array(coord)
def get_atomicforces_LAMMPS(lammps_dump_file):
add_flag = False
force = []
with open(lammps_dump_file) as f:
for line in f:
if "ITEM:" in line and "ITEM: ATOMS id fx fy fz " not in line:
add_flag = False
continue
elif "ITEM: ATOMS id fx fy fz " in line:
add_flag = True
continue
if add_flag:
if line.strip():
entries = line.strip().split()
force_atom = [int(entries[0]), [float(t) for t in entries[1:]]]
force.append(force_atom)
force_sorted = sorted(force)
force = []
for force_atom in force_sorted:
force.extend(force_atom[1])
return np.array(force)
def get_coordinate_and_force_LAMMPS(lammps_dump_file):
add_flag = False
ret = []
with open(lammps_dump_file) as f:
for line in f:
if "ITEM:" in line and "ITEM: ATOMS id xu yu zu fx fy fz" not in line:
add_flag = False
continue
elif "ITEM: ATOMS id xu yu zu fx fy fz" in line:
add_flag = True
continue
if add_flag:
if line.strip():
entries = line.strip().split()
data_atom = [int(entries[0]),
[float(t) for t in entries[1:4]],
[float(t) for t in entries[4:]]]
ret.append(data_atom)
# This sort is necessary since the order atoms of LAMMPS dump files
# may change from the input structure file.
ret_sorted = sorted(ret)
ret_x = []
ret_f = []
for ret_atom in ret_sorted:
ret_x.extend(ret_atom[1])
ret_f.extend(ret_atom[2])
return np.array(ret_x), np.array(ret_f)
def print_displacements_LAMMPS(lammps_files, nat, x_cart0,
conversion_factor, file_offset):
if file_offset is None:
disp_offset = np.zeros((nat, 3))
else:
_, nat_tmp, x0_offset,_ = read_lammps_structure(file_offset)
if nat_tmp != nat:
print(
"File %s contains too many/few position entries" % file_offset)
disp_offset = x0_offset - x_cart0
# Automatic detection of the input format
is_dumped_file = False
f = open(lammps_files[0], 'r')
for line in f:
if "ITEM: TIMESTEP" in line:
is_dumped_file = True
break
f.close()
if is_dumped_file:
## This version supports reading the data from MD trajectory
for search_target in lammps_files:
x = get_coordinate_LAMMPS(search_target)
ndata = len(x) // (3 * nat)
x = np.reshape(x, (ndata, nat, 3))
for idata in range(ndata):
disp = x[idata, :, :] - x_cart0 - disp_offset
disp *= conversion_factor
for i in range(nat):
print("%20.14f %20.14f %20.14f" % (disp[i, 0],
disp[i, 1],
disp[i, 2]))
else:
for search_target in lammps_files:
_, nat_tmp, x_cart, _ = read_lammps_structure(search_target)
if nat_tmp != nat:
print("File %s contains too many/few position entries" %
search_target)
disp = x_cart - x_cart0 - disp_offset
disp *= conversion_factor
for i in range(nat):
print("%20.14f %20.14f %20.14f" % (disp[i, 0],
disp[i, 1],
disp[i, 2]))
def print_atomicforces_LAMMPS(lammps_files, nat,
conversion_factor, file_offset):
if file_offset is None:
force_offset = np.zeros((nat, 3))
else:
data = get_atomicforces_LAMMPS(file_offset)
try:
force_offset = np.reshape(data, (nat, 3))
except:
print("File %s contains too many position entries" % file_offset)
# Automatic detection of the input format
is_dumped_file = False
f = open(lammps_files[0], 'r')
for line in f:
if "ITEM: TIMESTEP" in line:
is_dumped_file = True
break
f.close()
for search_target in lammps_files:
force = get_atomicforces_LAMMPS(search_target)
ndata = len(force) // (3 * nat)
force = np.reshape(force, (ndata, nat, 3))
for idata in range(ndata):
f = force[idata, :, :] - force_offset
f *= conversion_factor
for i in range(nat):
print("%19.11E %19.11E %19.11E" % (f[i][0], f[i][1], f[i][2]))
def print_displacements_and_forces_LAMMPS(lammps_files, nat,
x_cart0,
conversion_factor_disp,
conversion_factor_force,
file_offset):
if file_offset is None:
disp_offset = np.zeros((nat, 3))
force_offset = np.zeros((nat, 3))
else:
x0_offset, force_offset = get_coordinate_and_force_LAMMPS(file_offset)
try:
x0_offset = np.reshape(x0_offset, (nat, 3))
force_offset = np.reshape(force_offset, (nat, 3))
except:
print("File %s contains too many/few entries" % file_offset)
disp_offset = x0_offset - x_cart0
# Automatic detection of the input format
is_dumped_file = False
f = open(lammps_files[0], 'r')
for line in f:
if "ITEM: TIMESTEP" in line:
is_dumped_file = True
break
f.close()
if is_dumped_file:
## This version supports reading the data from MD trajectory
for search_target in lammps_files:
x, force = get_coordinate_and_force_LAMMPS(search_target)
ndata = len(x) // (3 * nat)
x = np.reshape(x, (ndata, nat, 3))
force = np.reshape(force, (ndata, nat, 3))
for idata in range(ndata):
disp = x[idata, :, :] - x_cart0 - disp_offset
disp *= conversion_factor_disp
f = force[idata, :, :] - force_offset
f *= conversion_factor_force
for i in range(nat):
print("%20.14f %20.14f %20.14f %20.8E %15.8E %15.8E" % (disp[i, 0],
disp[i, 1],
disp[i, 2],
f[i, 0],
f[i, 1],
f[i, 2]))
def get_unit_conversion_factor(str_unit):
Bohr_radius = 0.52917721067
Rydberg_to_eV = 13.60569253
disp_conv_factor = 1.0
energy_conv_factor = 1.0
force_conv_factor = 1.0
if str_unit== "ev":
disp_conv_factor = 1.0
energy_conv_factor = 1.0
elif str_unit == "rydberg":
disp_conv_factor = 1.0 / Bohr_radius
energy_conv_factor = 1.0 / Rydberg_to_eV
elif str_unit == "hartree":
disp_conv_factor = 1.0 / Bohr_radius
energy_conv_factor = 0.5 / Rydberg_to_eV
else:
print("This cannot happen")
exit(1)
force_conv_factor = energy_conv_factor / disp_conv_factor
return disp_conv_factor, force_conv_factor, energy_conv_factor
def parse(lammps_init, dump_files, dump_file_offset, str_unit,
print_disp, print_force, print_energy):
_, nat, x_cart0, _ = read_lammps_structure(lammps_init)
scale_disp, scale_force, _ = get_unit_conversion_factor(str_unit)
if print_disp == True and print_force == True:
print_displacements_and_forces_LAMMPS(dump_files, nat,
x_cart0,
scale_disp,
scale_force,
dump_file_offset)
elif print_disp == True:
print_displacements_LAMMPS(dump_files, nat, x_cart0,
scale_disp,
dump_file_offset)
elif print_force == True:
print_atomicforces_LAMMPS(dump_files, nat,
scale_force,
dump_file_offset)
elif print_energy == True:
print("Error: --get energy is not supported for LAMMPS")
exit(1)
|
ttadano/ALM
|
tools/interface/LAMMPS.py
|
Python
|
mit
| 11,171
|
[
"LAMMPS"
] |
7a180d028e54c9b9d90dba6a9b9e10ca687e152676ec24501791d446fcd98526
|
from flask import Flask, render_template, session, request, redirect
import random
app = Flask(__name__)
app.secret_key = 'my_secret_key'
@app.route('/')
def index():
if not 'gold' in session:
session['gold'] = 0
if not 'activities' in session:
session['activities'] = []
return render_template('index.html')
@app.route('/process', methods = ['POST'])
def process():
buildings = {
'farm':random.randint(10,20),
'casino':random.randint(-50,50),
'cave':random.randint(5,10),
'house':random.randint(2,5)
}
if request.form['building'] in buildings:
result = buildings[request.form['building']]
session['gold'] = session['gold']+result
result_dictionary = {
'class': ('red','green')[result > 0],
'activity': "You went to the {} and {} {} gold!".format(request.form['building'], ('lost','gained')[result > 0], result)
}
session['activities'].append(result_dictionary)
return redirect('/')
if __name__ == '__main__':
app.run(debug = True)
|
jiobert/python
|
Nelson_Alvarez/Assignments/flask_fund/ninja_gold/server.py
|
Python
|
mit
| 1,138
|
[
"CASINO"
] |
84ae50afbf21248879a4c3545998622f015dba6315f83b93443394665943b237
|
""" NotificationDB class is a front-end to the Notifications database
"""
import time
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities.Mail import Mail
from DIRAC.Core.Base.DB import DB
from DIRAC.Core.Utilities import DEncode
from DIRAC.ConfigurationSystem.Client.Helpers import Registry
class NotificationDB(DB):
def __init__(self):
DB.__init__(self, "NotificationDB", "Framework/NotificationDB")
result = self.__initializeDB()
if not result["OK"]:
self.log.fatal("Cannot initialize DB!", result["Message"])
self.__alarmQueryFields = [
"alarmid",
"author",
"creationtime",
"modtime",
"subject",
"status",
"priority",
"notifications",
"body",
"assignee",
"alarmkey",
]
self.__alarmLogFields = ["timestamp", "author", "comment", "modifications"]
self.__notificationQueryFields = ("id", "user", "seen", "message", "timestamp")
self.__newAlarmMandatoryFields = [
"author",
"subject",
"status",
"notifications",
"body",
"assignee",
"priority",
]
self.__updateAlarmIdentificationFields = ["id", "alarmKey"]
self.__updateAlarmMandatoryFields = ["author"]
self.__updateAlarmAtLeastOneField = ["comment", "modifications"]
self.__updateAlarmModificableFields = ["status", "assignee", "priority"]
self.__validAlarmStatus = ["Open", "OnGoing", "Closed", "Testing"]
self.__validAlarmNotifications = ["Web", "Mail", "SMS"]
self.__validAlarmPriorities = ["Low", "Medium", "High", "Extreme"]
def __initializeDB(self):
retVal = self._query("show tables")
if not retVal["OK"]:
return retVal
tablesInDB = [t[0] for t in retVal["Value"]]
tablesToCreate = {}
if "ntf_Alarms" not in tablesInDB:
tablesToCreate["ntf_Alarms"] = {
"Fields": {
"AlarmId": "INTEGER UNSIGNED AUTO_INCREMENT NOT NULL",
"AlarmKey": "VARCHAR(32) NOT NULL",
"Author": "VARCHAR(64) NOT NULL",
"CreationTime": "DATETIME NOT NULL",
"ModTime": "DATETIME NOT NULL",
"Subject": "VARCHAR(255) NOT NULL",
"Status": "VARCHAR(64) NOT NULL",
"Priority": "VARCHAR(32) NOT NULL",
"Body": "BLOB",
"Assignee": "VARCHAR(64) NOT NULL",
"Notifications": "VARCHAR(128) NOT NULL",
},
"PrimaryKey": "AlarmId",
"Indexes": {"Status": ["Status"], "Assignee": ["Assignee"]},
}
if "ntf_AssigneeGroups" not in tablesInDB:
tablesToCreate["ntf_AssigneeGroups"] = {
"Fields": {
"AssigneeGroup": "VARCHAR(64) NOT NULL",
"User": "VARCHAR(64) NOT NULL",
},
"Indexes": {"ag": ["AssigneeGroup"]},
}
if "ntf_AlarmLog" not in tablesInDB:
tablesToCreate["ntf_AlarmLog"] = {
"Fields": {
"AlarmId": "INTEGER UNSIGNED NOT NULL",
"Timestamp": "DATETIME NOT NULL",
"Author": "VARCHAR(64) NOT NULL",
"Comment": "BLOB",
"Modifications": "VARCHAR(255)",
},
"Indexes": {"AlarmID": ["AlarmId"]},
}
if "ntf_AlarmFollowers" not in tablesInDB:
tablesToCreate["ntf_AlarmFollowers"] = {
"Fields": {
"AlarmId": "INTEGER UNSIGNED NOT NULL",
"User": "VARCHAR(64) NOT NULL",
"Mail": "TINYINT(1) DEFAULT 0",
"Notification": "TINYINT(1) DEFAULT 1",
"SMS": "TINYINT(1) DEFAULT 0",
},
"Indexes": {"AlarmID": ["AlarmId"]},
}
if "ntf_Notifications" not in tablesInDB:
tablesToCreate["ntf_Notifications"] = {
"Fields": {
"Id": "INTEGER UNSIGNED AUTO_INCREMENT NOT NULL",
"User": "VARCHAR(64) NOT NULL",
"Message": "BLOB NOT NULL",
"Seen": "TINYINT(1) NOT NULL DEFAULT 0",
"Expiration": "DATETIME",
"Timestamp": "DATETIME",
"DeferToMail": "TINYINT(1) NOT NULL DEFAULT 1",
},
"PrimaryKey": "Id",
}
if tablesToCreate:
return self._createTables(tablesToCreate)
return S_OK()
def __checkAlarmField(self, name, value):
name = name.lower()
if name == "status":
if value not in self.__validAlarmStatus:
return S_ERROR("Status %s is invalid. Valid ones are: %s" % (value, self.__validAlarmStatus))
elif name == "priority":
if value not in self.__validAlarmPriorities:
return S_ERROR("Type %s is invalid. Valid ones are: %s" % (value, self.__validAlarmPriorities))
elif name == "assignee":
result = self.getUserAsignees(value)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("%s is not a known assignee" % value)
return result
return S_OK()
def newAlarm(self, alarmDef):
"""Create a new alarm record"""
followers = []
for field in self.__newAlarmMandatoryFields:
if field not in alarmDef:
return S_ERROR("Oops. Missing %s" % field)
result = self.__checkAlarmField(field, alarmDef[field])
if not result["OK"]:
return result
if field == "assignee":
followers = result["Value"]
author = alarmDef["author"]
if author not in followers:
followers.append(author)
sqlFieldsName = []
sqlFieldsValue = []
for field in self.__newAlarmMandatoryFields:
if field == "notifications":
notifications = {}
for nType in self.__validAlarmNotifications:
if nType in alarmDef[field]:
notifications[nType] = 1
else:
notifications[nType] = 0
val = DEncode.encode(notifications)
else:
val = alarmDef[field]
# Add to the list of fields to add
sqlFieldsName.append(field)
result = self._escapeString(val)
if result["OK"]:
sqlFieldsValue.append(result["Value"])
else:
return S_ERROR("Failed to escape value %s" % val)
sqlFieldsName.extend(["CreationTime", "ModTime"])
sqlFieldsValue.extend(["UTC_TIMESTAMP()", "UTC_TIMESTAMP()"])
# Get the defined alarmkey and generate a random one if not defined
if "alarmKey" in alarmDef:
result = self._escapeString(alarmDef["alarmKey"])
if result["OK"]:
alarmKey = result["Value"]
else:
return S_ERROR("Failed to escape value %s for key AlarmKey" % val)
gLogger.info("Checking there are no alarms with key %s" % alarmKey)
result = self._query("SELECT AlarmId FROM `ntf_Alarms` WHERE AlarmKey=%s" % alarmKey)
if not result["OK"]:
return result
if result["Value"]:
return S_ERROR("Oops, alarm with id %s has the same alarm key!" % result["Value"][0][0])
else:
alarmKey = str(time.time())[-31:]
sqlFieldsName.append("AlarmKey")
sqlFieldsValue.append(alarmKey)
sqlInsert = "INSERT INTO `ntf_Alarms` (%s) VALUES (%s)" % (",".join(sqlFieldsName), ",".join(sqlFieldsValue))
result = self._update(sqlInsert)
if not result["OK"]:
return result
alarmId = result["lastRowId"]
for follower in followers:
result = self.modifyFollowerForAlarm(alarmId, follower, notifications)
if not result["OK"]:
varMsg = "\nFollower: %s\nAlarm: %s\nError: %s" % (follower, alarmId, result["Message"])
self.log.error("Couldn't set follower for alarm", varMsg)
self.__notifyAlarm(alarmId)
return S_OK(alarmId)
def deleteAlarmsByAlarmKey(self, alarmKeyList):
alarmsIdList = []
for alarmKey in alarmKeyList:
result = self.__getAlarmIdFromKey(alarmKey)
if not result["OK"]:
return result
alarmId = result["Value"]
alarmsIdList.append(alarmId)
self.log.info("Trying to delete alarms with:\n alamKey %s\n alarmId %s" % (alarmKeyList, alarmsIdList))
return self.deleteAlarmsByAlarmId(alarmsIdList)
def deleteAlarmsByAlarmId(self, alarmIdList):
self.log.info("Trying to delete alarms with ids %s" % alarmIdList)
try:
alarmId = int(alarmIdList)
alarmIdList = [alarmId]
except Exception:
pass
try:
alarmIdList = [int(alarmId) for alarmId in alarmIdList]
except Exception:
self.log.error("At least one alarmId is not a number", str(alarmIdList))
return S_ERROR("At least one alarmId is not a number: %s" % str(alarmIdList))
tablesToCheck = ("ntf_AlarmLog", "ntf_AlarmFollowers", "ntf_Alarms")
alamsSQLList = ",".join(["%d" % alarmId for alarmId in alarmIdList])
for tableName in tablesToCheck:
delSql = "DELETE FROM `%s` WHERE AlarmId in ( %s )" % (tableName, alamsSQLList)
result = self._update(delSql)
if not result["OK"]:
self.log.error("Could not delete alarm", "from table %s: %s" % (tableName, result["Message"]))
return S_OK()
def __processUpdateAlarmModifications(self, modifications):
if not isinstance(modifications, dict):
return S_ERROR("Modifications must be a dictionary")
updateFields = []
followers = []
for field in modifications:
if field not in self.__updateAlarmModificableFields:
return S_ERROR("%s is not a valid modificable field" % field)
value = modifications[field]
result = self.__checkAlarmField(field, value)
if not result["OK"]:
return result
if field == "assignee":
followers = result["Value"]
result = self._escapeString(modifications[field])
if not result["OK"]:
return result
updateFields.append("%s=%s" % (field, result["Value"]))
return S_OK((", ".join(updateFields), DEncode.encode(modifications), followers))
def __getAlarmIdFromKey(self, alarmKey):
result = self._escapeString(alarmKey)
if not result["OK"]:
return S_ERROR("Cannot escape alarmKey %s" % alarmKey)
alarmKey = result["Value"]
sqlQuery = "SELECT AlarmId FROM `ntf_Alarms` WHERE AlarmKey=%s" % alarmKey
result = self._query(sqlQuery)
if result["OK"]:
result["Value"] = result["Value"][0][0]
return result
def updateAlarm(self, updateReq):
# Discover alarm identification
idOK = False
for field in self.__updateAlarmIdentificationFields:
if field in updateReq:
idOK = True
if not idOK:
return S_ERROR(
"Need at least one field to identify which alarm to update! %s" % self.__updateAlarmIdentificationFields
)
if "alarmKey" in updateReq:
alarmKey = updateReq["alarmKey"]
result = self.__getAlarmIdFromKey(alarmKey)
if not result["OK"]:
self.log.error("Could not get alarm id for key", " %s: %s" % (alarmKey, result["Value"]))
return result
updateReq["id"] = result["Value"]
self.log.info("Retrieving alarm key %s maps to id %s" % (alarmKey, updateReq["id"]))
# Check fields
for field in self.__updateAlarmMandatoryFields:
if field not in updateReq:
return S_ERROR("Oops. Missing %s" % field)
validReq = False
for field in self.__updateAlarmAtLeastOneField:
if field in updateReq:
validReq = True
if not validReq:
return S_OK("Requirement needs at least one of %s" % " ".join(self.__updateAlarmAtLeastOneField))
author = updateReq["author"]
followers = [author]
if author not in Registry.getAllUsers():
return S_ERROR("%s is not a known user" % author)
result = self._escapeString(author)
if not result["OK"]:
return result
author = result["Value"]
try:
alarmId = int(updateReq["id"])
except Exception:
return S_ERROR("Oops, Alarm id is not valid!")
result = self._query("SELECT AlarmId FROM `ntf_Alarms` WHERE AlarmId=%d" % alarmId)
if not result["OK"]:
return result
if not result["Value"]:
return S_ERROR("Alarm %s does not exist!" % alarmId)
sqlFields = ["AlarmId", "Author", "Timestamp"]
sqlValues = ["%d" % alarmId, author, "UTC_TIMESTAMP()"]
rawComment = ""
if "comment" in updateReq:
rawComment = updateReq["comment"]
result = self._escapeString(rawComment)
if not result["OK"]:
return result
sqlFields.append("Comment")
sqlValues.append(result["Value"])
modifications = False
if "modifications" in updateReq:
modifications = updateReq["modifications"]
result = self.__processUpdateAlarmModifications(modifications)
if not result["OK"]:
return result
alarmModsSQL, encodedMods, newFollowers = result["Value"]
sqlFields.append("Modifications")
result = self._escapeString(encodedMods)
if not result["OK"]:
return result
sqlValues.append(result["Value"])
if newFollowers:
followers.extend(newFollowers)
logSQL = "INSERT INTO `ntf_AlarmLog` (%s) VALUES (%s)" % (",".join(sqlFields), ",".join(sqlValues))
result = self._update(logSQL)
if not result["OK"]:
return result
modSQL = "ModTime=UTC_TIMESTAMP()"
if modifications:
modSQL = "%s, %s" % (modSQL, alarmModsSQL)
updateSQL = "UPDATE `ntf_Alarms` SET %s WHERE AlarmId=%d" % (modSQL, alarmId)
result = self._update(updateSQL)
if not result["OK"]:
return result
# Get notifications config
sqlQuery = "SELECT Notifications FROM `ntf_Alarms` WHERE AlarmId=%s" % alarmId
result = self._query(sqlQuery)
if not result["OK"] or not result["Value"]:
self.log.error("Could not retrieve default notifications for alarm", "%s" % alarmId)
return S_OK(alarmId)
notificationsDict = DEncode.decode(result["Value"][0][0])[0]
for v in self.__validAlarmNotifications:
if v not in notificationsDict:
notificationsDict[v] = 0
for follower in followers:
result = self.modifyFollowerForAlarm(alarmId, follower, notificationsDict, overwrite=False)
if not result["OK"]:
varMsg = "\nFollower: %s\nAlarm: %s\nError: %s" % (follower, alarmId, result["Message"])
self.log.error("Couldn't set follower for alarm", varMsg)
return self.__notifyAlarm(alarmId)
def __notifyAlarm(self, alarmId):
result = self.getSubscribersForAlarm(alarmId)
if not result["OK"]:
return result
subscribers = result["Value"]
needLongText = False
if subscribers["mail"]:
needLongText = True
result = self.getAlarmInfo(alarmId)
if not result["OK"]:
return result
alarmInfo = result["Value"]
result = self.getAlarmLog(alarmId)
if not result["OK"]:
return result
alarmLog = result["Value"]
if subscribers["notification"]:
msg = self.__generateAlarmInfoMessage(alarmInfo)
logMsg = self.__generateAlarmLogMessage(alarmLog, True)
if logMsg:
msg = "%s\n\n%s\nLast modification:\n%s" % (msg, "*" * 30, logMsg)
for user in subscribers["notification"]:
self.addNotificationForUser(user, msg, 86400, deferToMail=True)
if subscribers["mail"]:
msg = self.__generateAlarmInfoMessage(alarmInfo)
logMsg = self.__generateAlarmLogMessage(alarmLog)
if logMsg:
msg = "%s\n\n%s\nAlarm Log:\n%s" % (msg, "*" * 30, logMsg)
subject = "Update on alarm %s" % alarmId
else:
subject = "New alarm %s" % alarmId
for user in subscribers["mail"]:
self.__sendMailToUser(user, subject, msg)
if subscribers["sms"]:
# TODO
pass
return S_OK()
def __generateAlarmLogMessage(self, alarmLog, showOnlyLast=False):
if len(alarmLog["Records"]) == 0:
return ""
records = alarmLog["Records"]
if showOnlyLast:
logToShow = [-1]
else:
logToShow = list(range(len(records) - 1, -1, -1))
finalMessage = []
for iD in logToShow:
rec = records[iD]
data = {}
for i in range(len(alarmLog["ParameterNames"])):
if rec[i]:
data[alarmLog["ParameterNames"][i]] = rec[i]
# [ 'timestamp', 'author', 'comment', 'modifications' ]
msg = [" Entry by : %s" % data["author"]]
msg.append(" On : %s" % data["timestamp"].strftime("%Y/%m/%d %H:%M:%S"))
if "modifications" in data:
mods = data["modifications"]
keys = sorted(mods)
msg.append(" Modificaitons:")
for key in keys:
msg.append(" %s -> %s" % (key, mods[key]))
if "comment" in data:
msg.append(" Comment:\n\n%s" % data["comment"])
finalMessage.append("\n".join(msg))
return "\n\n===============\n".join(finalMessage)
def __generateAlarmInfoMessage(self, alarmInfo):
# [ 'alarmid', 'author', 'creationtime', 'modtime', 'subject', 'status', 'type', 'body', 'assignee' ]
msg = " Alarm %6d\n" % alarmInfo["alarmid"]
msg += " Author : %s\n" % alarmInfo["author"]
msg += " Subject : %s\n" % alarmInfo["subject"]
msg += " Status : %s\n" % alarmInfo["status"]
msg += " Priority : %s\n" % alarmInfo["priority"]
msg += " Assignee : %s\n" % alarmInfo["assignee"]
msg += " Creation date : %s UTC\n" % alarmInfo["creationtime"].strftime("%Y/%m/%d %H:%M:%S")
msg += " Last modificaiton : %s UTC\n" % alarmInfo["modtime"].strftime("%Y/%m/%d %H:%M:%S")
msg += " Body:\n\n%s" % alarmInfo["body"]
return msg
def __sendMailToUser(self, user, subject, message):
address = gConfig.getValue("/Registry/Users/%s/Email" % user, "")
if not address:
self.log.error("User does not have an email registered", user)
return S_ERROR("User %s does not have an email registered" % user)
self.log.info("Sending mail (%s) to user %s at %s" % (subject, user, address))
m = Mail()
m._subject = "[DIRAC] %s" % subject
m._message = message
m._mailAddress = address
result = m._send()
if not result["OK"]:
gLogger.warn("Could not send mail with the following message:\n%s" % result["Message"])
return result
def getAlarms(self, condDict={}, sortList=False, start=0, limit=0, modifiedAfter=None):
condSQL = []
for field in self.__alarmQueryFields:
if field in condDict:
fieldValues = []
rawValue = condDict[field]
if field == "assignee":
expandedValue = []
for user in rawValue:
result = self.getAssigneeGroupsForUser(user)
if not result["OK"]:
return result
for ag in result["Value"]:
if ag not in expandedValue:
expandedValue.append(ag)
rawValue = expandedValue
for value in rawValue:
result = self._escapeString(value)
if not result["OK"]:
return result
fieldValues.append(result["Value"])
condSQL.append("%s in ( %s )" % (field, ",".join(fieldValues)))
selSQL = "SELECT %s FROM `ntf_Alarms`" % ",".join(self.__alarmQueryFields)
if modifiedAfter:
condSQL.append("ModTime >= %s" % modifiedAfter.strftime("%Y-%m-%d %H:%M:%S"))
if condSQL:
selSQL = "%s WHERE %s" % (selSQL, " AND ".join(condSQL))
if sortList:
selSQL += " ORDER BY %s" % ", ".join(["%s %s" % (sort[0], sort[1]) for sort in sortList])
if limit:
selSQL += " LIMIT %d,%d" % (start, limit)
result = self._query(selSQL)
if not result["OK"]:
return result
resultDict = {}
resultDict["ParameterNames"] = self.__alarmQueryFields
resultDict["Records"] = [list(v) for v in result["Value"]]
return S_OK(resultDict)
def getAlarmInfo(self, alarmId):
result = self.getAlarms({"alarmId": alarmId})
if not result["OK"]:
return result
alarmInfo = {}
data = result["Value"]
if len(data["Records"]) == 0:
return S_OK({})
for i in range(len(data["ParameterNames"])):
alarmInfo[data["ParameterNames"][i]] = data["Records"][0][i]
return S_OK(alarmInfo)
def getAlarmLog(self, alarmId):
try:
alarmId = int(alarmId)
except Exception:
return S_ERROR("Alarm id must be a non decimal number")
sqlSel = "SELECT %s FROM `ntf_AlarmLog` WHERE AlarmId=%d ORDER BY Timestamp ASC" % (
",".join(self.__alarmLogFields),
alarmId,
)
result = self._query(sqlSel)
if not result["OK"]:
return result
decodedRows = []
for row in result["Value"]:
decodedRows.append(list(row))
if not row[3]:
decodedRows.append(list(row))
continue
dec = DEncode.decode(row[3])
decodedRows[-1][3] = dec[0]
resultDict = {}
resultDict["ParameterNames"] = self.__alarmLogFields
resultDict["Records"] = decodedRows
return S_OK(resultDict)
###
# Followers management
###
def modifyFollowerForAlarm(self, alarmId, user, notificationsDict, overwrite=True):
rawUser = user
if rawUser not in Registry.getAllUsers():
return S_OK()
result = self._escapeString(user)
if not result["OK"]:
return result
user = result["Value"]
subscriber = False
for k in notificationsDict:
if notificationsDict[k]:
subscriber = True
break
selSQL = "SELECT Notification, Mail, SMS FROM `ntf_AlarmFollowers` WHERE AlarmId=%d AND User=%s" % (
alarmId,
user,
)
result = self._query(selSQL)
if not result["OK"]:
return result
if not result["Value"]:
if not subscriber:
return S_OK()
sqlValues = ["%d" % alarmId, user]
for k in self.__validAlarmNotifications:
if notificationsDict[k]:
sqlValues.append("1")
else:
sqlValues.append("0")
inSQL = (
"INSERT INTO `ntf_AlarmFollowers` ( AlarmId, User, Notification, Mail, SMS ) VALUES (%s)"
% ",".join(sqlValues)
)
return self._update(inSQL)
sqlCond = "AlarmId=%d AND User=%s" % (alarmId, user)
# Need to delete
if not subscriber:
return self._update("DELETE FROM `ntf_AlarmFollowers` WHERE %s" % sqlCond)
if not overwrite:
return S_OK()
# Need to update
modSQL = []
for k in self.__validAlarmNotifications:
if notificationsDict[k]:
modSQL.append("%s=1" % k)
else:
modSQL.append("%s=0" % k)
return self._update("UPDATE `ntf_AlarmFollowers` SET %s WHERE %s" % (modSQL, sqlCond))
def getSubscribersForAlarm(self, alarmId):
selSQL = "SELECT User, Mail, Notification, SMS FROM `ntf_AlarmFollowers` WHERE AlarmId=%d" % alarmId
result = self._query(selSQL)
if not result["OK"]:
return result
fw = result["Value"]
followWays = {"mail": [], "notification": [], "sms": []}
followers = []
for user, mail, Notification, SMS in fw:
if user in followers:
continue
followers.append(user)
if mail:
followWays["mail"].append(user)
if Notification:
followWays["notification"].append(user)
if SMS:
followWays["sms"].append(user)
return S_OK(followWays)
###
# Assignee groups management
###
def getUserAsignees(self, assignee):
# Check if it is a user
if assignee in Registry.getAllUsers():
return S_OK([assignee])
result = self._escapeString(assignee)
if not result["OK"]:
return result
escAG = result["Value"]
sqlSel = "SELECT User FROM `ntf_AssigneeGroups` WHERE AssigneeGroup = %s" % escAG
result = self._query(sqlSel)
if not result["OK"]:
return result
users = [row[0] for row in result["Value"]]
if not users:
return S_OK([])
return S_OK(users)
def setAssigneeGroup(self, groupName, usersList):
validUsers = Registry.getAllUsers()
result = self._escapeString(groupName)
if not result["OK"]:
return result
escGroup = result["Value"]
sqlSel = "SELECT User FROM `ntf_AssigneeGroups` WHERE AssigneeGroup = %s" % escGroup
result = self._query(sqlSel)
if not result["OK"]:
return result
currentUsers = [row[0] for row in result["Value"]]
usersToDelete = []
usersToAdd = []
finalUsersInGroup = len(currentUsers)
for user in currentUsers:
if user not in usersList:
result = self._escapeString(user)
if not result["OK"]:
return result
usersToDelete.append(result["Value"])
finalUsersInGroup -= 1
for user in usersList:
if user not in validUsers:
continue
if user not in currentUsers:
result = self._escapeString(user)
if not result["OK"]:
return result
usersToAdd.append("( %s, %s )" % (escGroup, result["Value"]))
finalUsersInGroup += 1
if not finalUsersInGroup:
return S_ERROR("Group must have at least one user!")
# Delete old users
if usersToDelete:
sqlDel = "DELETE FROM `ntf_AssigneeGroups` WHERE User in ( %s )" % ",".join(usersToDelete)
result = self._update(sqlDel)
if not result["OK"]:
return result
# Add new users
if usersToAdd:
sqlInsert = "INSERT INTO `ntf_AssigneeGroups` ( AssigneeGroup, User ) VALUES %s" % ",".join(usersToAdd)
result = self._update(sqlInsert)
if not result["OK"]:
return result
return S_OK()
def deleteAssigneeGroup(self, groupName):
result = self._escapeString(groupName)
if not result["OK"]:
return result
escGroup = result["Value"]
sqlSel = "SELECT AlarmId FROM `ntf_Alarms` WHERE Assignee=%s" % escGroup
result = self._query(sqlSel)
if not result["OK"]:
return result
if result["Value"]:
alarmIds = [row[0] for row in result["Value"]]
return S_ERROR("There are %s alarms assigned to this group" % len(alarmIds))
sqlDel = "DELETE FROM `ntf_AssigneeGroups` WHERE AssigneeGroup=%s" % escGroup
return self._update(sqlDel)
def getAssigneeGroups(self):
result = self._query("SELECT AssigneeGroup, User from `ntf_AssigneeGroups` ORDER BY User")
if not result["OK"]:
return result
agDict = {}
for row in result["Value"]:
ag = row[0]
user = row[1]
if ag not in agDict:
agDict[ag] = []
agDict[ag].append(user)
return S_OK(agDict)
def getAssigneeGroupsForUser(self, user):
if user not in Registry.getAllUsers():
return S_ERROR("%s is an unknown user" % user)
result = self._escapeString(user)
if not result["OK"]:
return result
user = result["Value"]
result = self._query("SELECT AssigneeGroup from `ntf_AssigneeGroups` WHERE User=%s" % user)
if not result["OK"]:
return result
return S_OK([row[0] for row in result["Value"]])
###
# Notifications
###
def addNotificationForUser(self, user, message, lifetime=0, deferToMail=1):
if user not in Registry.getAllUsers():
return S_ERROR("%s is an unknown user" % user)
self.log.info("Adding a notification for user %s (msg is %s chars)" % (user, len(message)))
result = self._escapeString(user)
if not result["OK"]:
return result
user = result["Value"]
result = self._escapeString(message)
if not result["OK"]:
return result
message = result["Value"]
sqlFields = ["User", "Message", "Timestamp"]
sqlValues = [user, message, "UTC_TIMESTAMP()"]
if not deferToMail:
sqlFields.append("DeferToMail")
sqlValues.append("0")
if lifetime:
sqlFields.append("Expiration")
sqlValues.append("TIMESTAMPADD( SECOND, %d, UTC_TIMESTAMP() )" % int(lifetime))
sqlInsert = "INSERT INTO `ntf_Notifications` (%s) VALUES (%s) " % (",".join(sqlFields), ",".join(sqlValues))
result = self._update(sqlInsert)
if not result["OK"]:
return result
return S_OK(result["lastRowId"])
def removeNotificationsForUser(self, user, msgIds=False):
if user not in Registry.getAllUsers():
return S_ERROR("%s is an unknown user" % user)
result = self._escapeString(user)
if not result["OK"]:
return result
user = result["Value"]
delSQL = "DELETE FROM `ntf_Notifications` WHERE User=%s" % user
escapedIDs = []
if msgIds:
for iD in msgIds:
result = self._escapeString(str(iD))
if not result["OK"]:
return result
escapedIDs.append(result["Value"])
delSQL = "%s AND Id in ( %s ) " % (delSQL, ",".join(escapedIDs))
return self._update(delSQL)
def markNotificationsSeen(self, user, seen=True, msgIds=False):
if user not in Registry.getAllUsers():
return S_ERROR("%s is an unknown user" % user)
result = self._escapeString(user)
if not result["OK"]:
return result
user = result["Value"]
if seen:
seen = 1
else:
seen = 0
updateSQL = "UPDATE `ntf_Notifications` SET Seen=%d WHERE User=%s" % (seen, user)
escapedIDs = []
if msgIds:
for iD in msgIds:
result = self._escapeString(str(iD))
if not result["OK"]:
return result
escapedIDs.append(result["Value"])
updateSQL = "%s AND Id in ( %s ) " % (updateSQL, ",".join(escapedIDs))
return self._update(updateSQL)
def getNotifications(self, condDict={}, sortList=False, start=0, limit=0):
condSQL = []
for field in self.__notificationQueryFields:
if field in condDict:
fieldValues = []
for value in condDict[field]:
result = self._escapeString(value)
if not result["OK"]:
return result
fieldValues.append(result["Value"])
condSQL.append("%s in ( %s )" % (field, ",".join(fieldValues)))
eSortList = []
for field, order in sortList:
if order.lower() in ["asc", "desc"]:
eSortList.append(("`%s`" % field.replace("`", ""), order))
selSQL = "SELECT %s FROM `ntf_Notifications`" % ",".join(self.__notificationQueryFields)
if condSQL:
selSQL = "%s WHERE %s" % (selSQL, " AND ".join(condSQL))
if eSortList:
selSQL += " ORDER BY %s" % ", ".join(["%s %s" % (sort[0], sort[1]) for sort in eSortList])
else:
selSQL += " ORDER BY Id DESC"
if limit:
selSQL += " LIMIT %d,%d" % (start, limit)
result = self._query(selSQL)
if not result["OK"]:
return result
resultDict = {}
resultDict["ParameterNames"] = self.__notificationQueryFields
resultDict["Records"] = [list(v) for v in result["Value"]]
return S_OK(resultDict)
def purgeExpiredNotifications(self):
self.log.info("Purging expired notifications")
delConds = ["(Seen=1 OR DeferToMail=0)", "(TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), Expiration ) < 0 )"]
delSQL = "DELETE FROM `ntf_Notifications` WHERE %s" % " AND ".join(delConds)
result = self._update(delSQL)
if not result["OK"]:
return result
self.log.info("Purged %s notifications" % result["Value"])
deferCond = ["Seen=0", "DeferToMail=1", "TIMESTAMPDIFF( SECOND, UTC_TIMESTAMP(), Expiration ) < 0"]
selSQL = "SELECT Id, User, Message FROM `ntf_Notifications` WHERE %s" % " AND ".join(deferCond)
result = self._query(selSQL)
if not result["OK"]:
return result
messages = result["Value"]
if not messages:
return S_OK()
ids = []
for msg in messages:
self.__sendMailToUser(msg[1], "Notification defered to mail", msg[2])
ids.append(str(msg[0]))
self.log.info("Deferred %s notifications" % len(ids))
return self._update("DELETE FROM `ntf_Notifications` WHERE Id in (%s)" % ",".join(ids))
|
DIRACGrid/DIRAC
|
src/DIRAC/FrameworkSystem/DB/NotificationDB.py
|
Python
|
gpl-3.0
| 35,664
|
[
"DIRAC"
] |
6bb3e8d29d98c4b753a739c7912b4d98eb4dcc8125053175b01b8c5bdc1a28e5
|
# -*- coding: utf-8 -*-
# @Author: twankim
# @Date: 2017-02-24 17:46:51
# @Last Modified by: twankim
# @Last Modified time: 2018-03-09 22:15:51
import numpy as np
import time
import sys
import os
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from ssac import weakSSAC
from ssac_org import SSAC
from gen_data import genData
from utils import *
weak = "global"
delta = 0.99
base_dir= os.path.join('./results',weak+'_compare')
def main(args):
plotted = False
rep = args.rep
k = args.k
n = args.n
m = args.m
std = args.std
# qs = [float(q) for q in args.qs.split(',')]
etas = [float(eta) for eta in args.etas.split(',')]
beta = args.beta
i_plot = np.random.randint(0,rep) # Index of experiment to plot the figure
verbose = args.verbose
cs = [float(q) for q in args.cs.split(',')]
res_acc = np.zeros((rep,len(cs),len(etas))) # Accuracy of clustering
res_mean_acc = np.zeros((rep,len(cs),len(etas))) # Mean accuracy of clustering (per cluster)
# res_err = np.zeros((rep,len(qs),len(etas))) # Number of misclustered points
res_fail = np.zeros((rep,len(cs),len(etas))) # Number of Failure
res_acc_org = np.zeros((rep,len(cs),len(etas))) # Accuracy of clustering
res_mean_acc_org = np.zeros((rep,len(cs),len(etas))) # Mean accuracy of clustering (per cluster)
# res_err = np.zeros((rep,len(qs),len(etas))) # Number of misclustered points
res_fail_org = np.zeros((rep,len(cs),len(etas))) # Number of Failure
gammas = np.zeros(rep)
rhos = np.zeros((rep,len(cs)))
# Make directories to save results
if not os.path.exists(base_dir):
os.makedirs(base_dir)
res_dir = base_dir + '/{}_{}'.format(args.min_gamma,args.max_gamma)
if not os.path.exists(res_dir):
os.makedirs(res_dir)
for i_rep in xrange(rep):
# Generate Synthetic data
# m dimensional, n points, k cluster
# min_gamma: minimum gamma margin
if verbose:
print "({}/{})... Generating data".format(i_rep+1,rep)
dataset = genData(n,m,k,args.min_gamma,args.max_gamma,std)
X,y_true,ris = dataset.gen()
gamma = dataset.gamma
gammas[i_rep] = gamma
print "({}/{})... Synthetic data is generated: gamma={}, (n,m,k,std)=({},{},{},{})".format(
i_rep+1,rep,gamma,n,m,k,std)
algo = weakSSAC(X,y_true,k,wtype=weak,ris=ris)
algo_org = SSAC(X,y_true,k,wtype=weak,ris=ris)
# Test SSAC algorithm for different c's and eta's (fix beta in this case)
for i_c,c_dist in enumerate(cs):
assert (c_dist>0.5) & (c_dist<=1.0), "c_dist must be in (0.5,1]"
rhos[i_rep,i_c] = c_dist
# Calculate proper eta and beta based on parameters including delta
if verbose:
print " - Proper eta={}, beta={} (delta={})".format(
dataset.calc_eta(delta,weak=weak,rho=rhos[i_rep,i_c]),
dataset.calc_beta(delta,weak=weak,rho=rhos[i_rep,i_c]),
delta)
for i_eta,eta in enumerate(etas):
if verbose:
print " <Test: c_dist={}, eta={}, beta={}>".format(c_dist,eta,beta)
algo.set_params(eta,beta,rho=rhos[i_rep,i_c])
algo_org.set_params(eta,beta,rho=rhos[i_rep,i_c])
if not algo.fit():
# Algorithm has failed
res_fail[i_rep,i_c,i_eta] = 1
if not plotted:
i_plot = np.random.randint(i_rep+1,rep) # Index of experiment to plot the figure
if not algo_org.fit():
# Algorithm has failed
res_fail_org[i_rep,i_c,i_eta] = 1
# i_plot = np.random.randint(i_rep+1,rep) # Index of experiment to plot the figure
y_pred = algo.y
mpps = algo.mpps # Estimated cluster centers
# print " ... Clustering is done. Number of binary search steps = {}\n".format(algo.bs_num)
y_pred_org = algo_org.y
mpps_org = algo_org.mpps # Estimated cluster centers
# For evaluation & plotting, find best permutation of cluster assignment
y_pred_perm = find_permutation(dataset,algo)
y_pred_perm_org = find_permutation(dataset,algo_org)
# Calculate accuracy and mean accuracy
res_acc[i_rep,i_c,i_eta] = accuracy(y_true,y_pred_perm)
res_mean_acc[i_rep,i_c,i_eta] = mean_accuracy(y_true,y_pred_perm)
res_acc_org[i_rep,i_c,i_eta] = accuracy(y_true,y_pred_perm_org)
res_mean_acc_org[i_rep,i_c,i_eta] = mean_accuracy(y_true,y_pred_perm_org)
# # Calculate number of errors
# res_err[i_rep,i_c,i_eta] = error(y_true,y_pred_perm)
if (i_rep == i_plot) and (m<=2) and (not plotted):
if (i_eta==len(etas)-1) and (i_c==len(cs)-1):
plotted = True
title = r"SSAC with {} weak oracle ($\eta={}, \beta={}, \rho={:.2f}$)".format(
weak,eta,beta,rhos[i_rep,i_c])
f_name = res_dir+'/fig_n{}_m{}_k{}_c{:03d}_e{:d}.png'.format(n,m,k,int(100*c_dist),int(eta))
plot_cluster(X,y_true,y_pred_perm,k,mpps,gamma,
title,f_name,verbose)
# title_org = r"SSAC(original) with {} weak oracle ($\eta={}, \beta={}, \rho={:.2f}$)".format(
# weak,eta,beta,rhos[i_rep,i_c])
# f_name_org = res_dir+'/fig_org_n{}_m{}_k{}_c{:03d}_e{:d}.png'.format(n,m,k,int(100*c_dist),int(eta))
# plot_cluster(X,y_true,y_pred_perm_org,k,mpps_org,gamma,
# title_org,f_name_org,verbose)
# Write result as table
print_eval("Accuracy(%)",res_acc,etas,
res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("acc",n,m,k),weak=weak,params=cs)
print_eval("Mean Accuracy(%)",res_mean_acc,etas,
res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("meanacc",n,m,k),weak=weak,params=cs)
# print_eval("# Error(%)",res_err,qs,etas,
# res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("err",n,m,k))
print_eval("# Failures",res_fail,etas,
res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("fail",n,m,k),
is_sum=True,weak=weak,params=cs)
print_eval("Accuracy(%)",res_acc_org,etas,
res_dir+'/res_org_{}_n{}_m{}_k{}.csv'.format("acc",n,m,k),weak=weak,params=cs)
print_eval("Mean Accuracy(%)",res_mean_acc_org,etas,
res_dir+'/res_org_{}_n{}_m{}_k{}.csv'.format("meanacc",n,m,k),weak=weak,params=cs)
# print_eval("# Error(%)",res_err,qs,etas,
# res_dir+'/res_{}_n{}_m{}_k{}.csv'.format("err",n,m,k))
print_eval("# Failures",res_fail_org,etas,
res_dir+'/res_org_{}_n{}_m{}_k{}.csv'.format("fail",n,m,k),
is_sum=True,weak=weak,params=cs)
# if args.isplot:
# Plot Accuracy vs. eta
fig_name = res_dir+'/fig_{}_n{}_m{}_k{}.pdf'.format("acc",n,m,k)
plot_eval("Accuracy(%)",res_acc,etas,fig_name,weak=weak,params=cs,res_org=res_acc_org)
# Plot Mean Accuracy vs. eta
fig_name = res_dir+'/fig_{}_n{}_m{}_k{}.pdf'.format("meanacc",n,m,k)
plot_eval("Mean Accuracy(%)",res_mean_acc,etas,fig_name,weak=weak,params=cs,res_org=res_mean_acc_org)
# Plot Failure vs. eta
fig_name = res_dir+'/fig_{}_n{}_m{}_k{}.pdf'.format("fail",n,m,k)
plot_eval("# Failures",res_fail,etas,fig_name,is_sum=True,weak=weak,params=cs,res_org=res_fail_org)
# Plot histogram of gammas
fig_name = res_dir+'/fig_gamma_hist.pdf'
plot_hist(gammas,args.min_gamma,args.max_gamma,fig_name)
if args.isplot:
plt.show()
def parse_args():
def str2bool(v):
return v.lower() in ('true', '1')
parser = argparse.ArgumentParser(description=
'Test Semi-Supervised Active Clustering with Weak Oracles: Random-weak model')
parser.add_argument('-rep', dest='rep',
help='Number of experiments to repeat',
default = 10000, type = int)
parser.add_argument('-k', dest='k',
help='Number of clusters in synthetic data',
default = 3, type = int)
parser.add_argument('-n', dest='n',
help='Number of data points in synthetic data',
default = 600, type = int)
parser.add_argument('-m', dest='m',
help='Dimension of data points in synthetic data',
default = 2, type = int)
parser.add_argument('-std', dest='std',
help='standard deviation of Gaussian distribution (default:1.5)',
default = 2.0, type = float)
parser.add_argument('-qs', dest='qs',
help='Probabilities q (not-sure with 1-q) ex) 0.7,0.85,1',
default = '0.7,0.85,1', type = str)
parser.add_argument('-etas', dest='etas',
help='etas: parameter for sampling (phase 1) ex) 10,50',
default = '2,5,10,20,30', type = str)
parser.add_argument('-beta', dest='beta',
help='beta: parameter for sampling (phase 2)',
default = 1, type = int)
parser.add_argument('-g_min', dest='min_gamma',
help='minimum gamma margin (default:1)',
default = 1.0, type = float)
parser.add_argument('-g_max', dest='max_gamma',
help='minimum gamma margin (default:1)',
default = 1.1, type = float)
parser.add_argument('-cs', dest='cs',
help='Fractions to set distance-weak parameters (0.5,1] ex) 0.7,0.85,1',
default = '0.7,0.85,1', type = str)
parser.add_argument('-isplot', dest='isplot',
help='plot the result: True/False',
default = False, type = str2bool)
parser.add_argument('-verbose', dest='verbose',
help='verbose: True/False',
default = False, type = str2bool)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print "Called with args:"
print args
sys.exit(main(args))
|
twankim/weaksemi
|
compare_global.py
|
Python
|
mit
| 10,611
|
[
"Gaussian"
] |
a8178fc5276cf3c79481db9938a30f9f602fbe38266c387ba5dceb3aa76e39c1
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a set of audio files to test FAD calculation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import errno
import os
from absl import app
from absl import flags
import numpy as np
import scipy.io.wavfile
_SAMPLE_RATE = 16000
FLAGS = flags.FLAGS
flags.DEFINE_string("test_files", "",
"Directory where the test files should be located")
def create_dir(output_dir):
"""Ignore directory exists error."""
try:
os.makedirs(output_dir)
except OSError as exception:
if exception.errno == errno.EEXIST and os.path.isdir(output_dir):
pass
else:
raise
def add_noise(data, stddev):
"""Adds Gaussian noise to the samples.
Args:
data: 1d Numpy array containing floating point samples. Not necessarily
normalized.
stddev: The standard deviation of the added noise.
Returns:
1d Numpy array containing the provided floating point samples with added
Gaussian noise.
Raises:
ValueError: When data is not a 1d numpy array.
"""
if len(data.shape) != 1:
raise ValueError("expected 1d numpy array.")
max_value = np.amax(np.abs(data))
num_samples = data.shape[0]
gauss = np.random.normal(0, stddev, (num_samples)) * max_value
return data + gauss
def gen_sine_wave(freq=600,
length_seconds=6,
sample_rate=_SAMPLE_RATE,
param=None):
"""Creates sine wave of the specified frequency, sample_rate and length."""
t = np.linspace(0, length_seconds, int(length_seconds * sample_rate))
samples = np.sin(2 * np.pi * t * freq)
if param:
samples = add_noise(samples, param)
return np.asarray(2**15 * samples, dtype=np.int16)
def main(argv):
del argv # Unused.
for traget, count, param in [("background", 10, None), ("test1", 5, 0.0001),
("test2", 5, 0.00001)]:
output_dir = os.path.join(FLAGS.test_files, traget)
create_dir(output_dir)
print("output_dir:", output_dir)
frequencies = np.linspace(100, 1000, count).tolist()
for freq in frequencies:
samples = gen_sine_wave(freq, param=param)
filename = os.path.join(output_dir, "sin_%.0f.wav" % freq)
print("Creating: %s with %i samples." % (filename, samples.shape[0]))
scipy.io.wavfile.write(filename, _SAMPLE_RATE, samples)
if __name__ == "__main__":
app.run(main)
|
google-research/google-research
|
frechet_audio_distance/gen_test_files.py
|
Python
|
apache-2.0
| 3,034
|
[
"Gaussian"
] |
c174ddb3a91f04ecb959114343a16c96adee87ad110e0f3dfe8f82984413cb46
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
from torch.distributions import constraints
from torch.nn import Parameter
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.contrib.gp.models.model import GPModel
from pyro.contrib.gp.util import conditional
from pyro.distributions.util import eye_like
from pyro.nn.module import PyroParam, pyro_method
class VariationalSparseGP(GPModel):
r"""
Variational Sparse Gaussian Process model.
In :class:`.VariationalGP` model, when the number of input data :math:`X` is large,
the covariance matrix :math:`k(X, X)` will require a lot of computational steps to
compute its inverse (for log likelihood and for prediction). This model introduces
an additional inducing-input parameter :math:`X_u` to solve that problem. Given
inputs :math:`X`, their noisy observations :math:`y`, and the inducing-input
parameters :math:`X_u`, the model takes the form:
.. math::
[f, u] &\sim \mathcal{GP}(0, k([X, X_u], [X, X_u])),\\
y & \sim p(y) = p(y \mid f) p(f),
where :math:`p(y \mid f)` is the likelihood.
We will use a variational approach in this model by approximating :math:`q(f,u)`
to the posterior :math:`p(f,u \mid y)`. Precisely, :math:`q(f) = p(f\mid u)q(u)`,
where :math:`q(u)` is a multivariate normal distribution with two parameters
``u_loc`` and ``u_scale_tril``, which will be learned during a variational
inference process.
.. note:: This model can be learned using MCMC method as in reference [2]. See also
:class:`.GPModel`.
.. note:: This model has :math:`\mathcal{O}(NM^2)` complexity for training,
:math:`\mathcal{O}(M^3)` complexity for testing. Here, :math:`N` is the number
of train inputs, :math:`M` is the number of inducing inputs. Size of
variational parameters is :math:`\mathcal{O}(M^2)`.
References:
[1] `Scalable variational Gaussian process classification`,
James Hensman, Alexander G. de G. Matthews, Zoubin Ghahramani
[2] `MCMC for Variationally Sparse Gaussian Processes`,
James Hensman, Alexander G. de G. Matthews, Maurizio Filippone, Zoubin Ghahramani
:param torch.Tensor X: A input data for training. Its first dimension is the number
of data points.
:param torch.Tensor y: An output data for training. Its last dimension is the
number of data points.
:param ~pyro.contrib.gp.kernels.kernel.Kernel kernel: A Pyro kernel object, which
is the covariance function :math:`k`.
:param torch.Tensor Xu: Initial values for inducing points, which are parameters
of our model.
:param ~pyro.contrib.gp.likelihoods.likelihood Likelihood likelihood: A likelihood
object.
:param callable mean_function: An optional mean function :math:`m` of this Gaussian
process. By default, we use zero mean.
:param torch.Size latent_shape: Shape for latent processes (`batch_shape` of
:math:`q(u)`). By default, it equals to output batch shape ``y.shape[:-1]``.
For the multi-class classification problems, ``latent_shape[-1]`` should
corresponse to the number of classes.
:param int num_data: The size of full training dataset. It is useful for training
this model with mini-batch.
:param bool whiten: A flag to tell if variational parameters ``u_loc`` and
``u_scale_tril`` are transformed by the inverse of ``Luu``, where ``Luu`` is
the lower triangular decomposition of :math:`kernel(X_u, X_u)`. Enable this
flag will help optimization.
:param float jitter: A small positive term which is added into the diagonal part of
a covariance matrix to help stablize its Cholesky decomposition.
"""
def __init__(
self,
X,
y,
kernel,
Xu,
likelihood,
mean_function=None,
latent_shape=None,
num_data=None,
whiten=False,
jitter=1e-6,
):
super().__init__(X, y, kernel, mean_function, jitter)
self.likelihood = likelihood
self.Xu = Parameter(Xu)
y_batch_shape = self.y.shape[:-1] if self.y is not None else torch.Size([])
self.latent_shape = latent_shape if latent_shape is not None else y_batch_shape
M = self.Xu.size(0)
u_loc = self.Xu.new_zeros(self.latent_shape + (M,))
self.u_loc = Parameter(u_loc)
identity = eye_like(self.Xu, M)
u_scale_tril = identity.repeat(self.latent_shape + (1, 1))
self.u_scale_tril = PyroParam(u_scale_tril, constraints.lower_cholesky)
self.num_data = num_data if num_data is not None else self.X.size(0)
self.whiten = whiten
self._sample_latent = True
@pyro_method
def model(self):
self.set_mode("model")
M = self.Xu.size(0)
Kuu = self.kernel(self.Xu).contiguous()
Kuu.view(-1)[:: M + 1] += self.jitter # add jitter to the diagonal
Luu = torch.linalg.cholesky(Kuu)
zero_loc = self.Xu.new_zeros(self.u_loc.shape)
if self.whiten:
identity = eye_like(self.Xu, M)
pyro.sample(
self._pyro_get_fullname("u"),
dist.MultivariateNormal(zero_loc, scale_tril=identity).to_event(
zero_loc.dim() - 1
),
)
else:
pyro.sample(
self._pyro_get_fullname("u"),
dist.MultivariateNormal(zero_loc, scale_tril=Luu).to_event(
zero_loc.dim() - 1
),
)
f_loc, f_var = conditional(
self.X,
self.Xu,
self.kernel,
self.u_loc,
self.u_scale_tril,
Luu,
full_cov=False,
whiten=self.whiten,
jitter=self.jitter,
)
f_loc = f_loc + self.mean_function(self.X)
if self.y is None:
return f_loc, f_var
else:
# we would like to load likelihood's parameters outside poutine.scale context
self.likelihood._load_pyro_samples()
with poutine.scale(scale=self.num_data / self.X.size(0)):
return self.likelihood(f_loc, f_var, self.y)
@pyro_method
def guide(self):
self.set_mode("guide")
self._load_pyro_samples()
pyro.sample(
self._pyro_get_fullname("u"),
dist.MultivariateNormal(self.u_loc, scale_tril=self.u_scale_tril).to_event(
self.u_loc.dim() - 1
),
)
def forward(self, Xnew, full_cov=False):
r"""
Computes the mean and covariance matrix (or variance) of Gaussian Process
posterior on a test input data :math:`X_{new}`:
.. math:: p(f^* \mid X_{new}, X, y, k, X_u, u_{loc}, u_{scale\_tril})
= \mathcal{N}(loc, cov).
.. note:: Variational parameters ``u_loc``, ``u_scale_tril``, the
inducing-point parameter ``Xu``, together with kernel's parameters have
been learned from a training procedure (MCMC or SVI).
:param torch.Tensor Xnew: A input data for testing. Note that
``Xnew.shape[1:]`` must be the same as ``self.X.shape[1:]``.
:param bool full_cov: A flag to decide if we want to predict full covariance
matrix or just variance.
:returns: loc and covariance matrix (or variance) of :math:`p(f^*(X_{new}))`
:rtype: tuple(torch.Tensor, torch.Tensor)
"""
self._check_Xnew_shape(Xnew)
self.set_mode("guide")
loc, cov = conditional(
Xnew,
self.Xu,
self.kernel,
self.u_loc,
self.u_scale_tril,
full_cov=full_cov,
whiten=self.whiten,
jitter=self.jitter,
)
return loc + self.mean_function(Xnew), cov
|
uber/pyro
|
pyro/contrib/gp/models/vsgp.py
|
Python
|
apache-2.0
| 7,988
|
[
"Gaussian"
] |
1d90aebf1f8ca7777d8a390c6f53b4229b56c9fa52e27a76b9b9eb01dc1f12c7
|
# -*- coding: utf-8 -*-
"""Factories for the OSF models, including an abstract ModularOdmFactory.
Example usage: ::
>>> from tests.factories import UserFactory
>>> user1 = UserFactory()
>>> user1.username
fred0@example.com
>>> user2 = UserFactory()
fred1@example.com
Factory boy docs: http://factoryboy.readthedocs.org/
"""
import datetime
from factory import base, Sequence, SubFactory, post_generation, LazyAttribute
from mock import patch
from framework.mongo import StoredObject
from framework.auth import User, Auth
from framework.auth.utils import impute_names_model
from framework.sessions.model import Session
from website.addons import base as addons_base
from website.oauth.models import ExternalAccount
from website.oauth.models import ExternalProvider
from website.project.model import (
ApiKey, Node, NodeLog, WatchConfig, Tag, Pointer, Comment, PrivateLink,
Retraction, Embargo,
)
from website.notifications.model import NotificationSubscription, NotificationDigest
from website.archiver import utils as archiver_utils
from website.archiver.model import ArchiveTarget, ArchiveJob
from website.addons.wiki.model import NodeWikiPage
from tests.base import fake
# TODO: This is a hack. Check whether FactoryBoy can do this better
def save_kwargs(**kwargs):
for value in kwargs.itervalues():
if isinstance(value, StoredObject) and not value._is_loaded:
value.save()
def FakerAttribute(provider, **kwargs):
"""Attribute that lazily generates a value using the Faker library.
Example: ::
class UserFactory(ModularOdmFactory):
name = FakerAttribute('name')
"""
fake_gen = getattr(fake, provider)
if not fake_gen:
raise ValueError('{0!r} is not a valid faker provider.'.format(provider))
return LazyAttribute(lambda x: fake_gen(**kwargs))
class ModularOdmFactory(base.Factory):
"""Base factory for modular-odm objects.
"""
ABSTRACT_FACTORY = True
@classmethod
def _build(cls, target_class, *args, **kwargs):
"""Build an object without saving it."""
save_kwargs(**kwargs)
return target_class(*args, **kwargs)
@classmethod
def _create(cls, target_class, *args, **kwargs):
save_kwargs(**kwargs)
instance = target_class(*args, **kwargs)
instance.save()
return instance
class UserFactory(ModularOdmFactory):
FACTORY_FOR = User
username = Sequence(lambda n: "fred{0}@example.com".format(n))
# Don't use post generation call to set_password because
# It slows down the tests dramatically
password = "password"
fullname = Sequence(lambda n: "Freddie Mercury{0}".format(n))
is_registered = True
is_claimed = True
api_keys = []
date_confirmed = datetime.datetime(2014, 2, 21)
merged_by = None
email_verifications = {}
verification_key = None
@post_generation
def set_names(self, create, extracted):
parsed = impute_names_model(self.fullname)
for key, value in parsed.items():
setattr(self, key, value)
if create:
self.save()
@post_generation
def set_emails(self, create, extracted):
if self.username not in self.emails:
self.emails.append(self.username)
self.save()
class AuthUserFactory(UserFactory):
"""A user that automatically has an api key, for quick authentication.
Example: ::
user = AuthUserFactory()
res = self.app.get(url, auth=user.auth) # user is "logged in"
"""
@post_generation
def add_api_key(self, create, extracted):
key = ApiKeyFactory()
self.api_keys.append(key)
self.save()
self.auth = ('test', key._primary_key)
class TagFactory(ModularOdmFactory):
FACTORY_FOR = Tag
_id = Sequence(lambda n: "scientastic-{}".format(n))
class ApiKeyFactory(ModularOdmFactory):
FACTORY_FOR = ApiKey
class PrivateLinkFactory(ModularOdmFactory):
FACTORY_FOR = PrivateLink
name = "link"
key = "foobarblaz"
anonymous = False
creator = SubFactory(AuthUserFactory)
class AbstractNodeFactory(ModularOdmFactory):
FACTORY_FOR = Node
title = 'The meaning of life'
description = 'The meaning of life is 42.'
creator = SubFactory(AuthUserFactory)
class ProjectFactory(AbstractNodeFactory):
category = 'project'
class FolderFactory(ProjectFactory):
is_folder = True
class DashboardFactory(FolderFactory):
is_dashboard = True
class NodeFactory(AbstractNodeFactory):
category = 'hypothesis'
parent = SubFactory(ProjectFactory)
class RegistrationFactory(AbstractNodeFactory):
# Default project is created if not provided
category = 'project'
@classmethod
def _build(cls, target_class, *args, **kwargs):
raise Exception("Cannot build registration without saving.")
@classmethod
def _create(cls, target_class, project=None, schema=None, user=None,
template=None, data=None, archive=False, *args, **kwargs):
save_kwargs(**kwargs)
# Original project to be registered
project = project or target_class(*args, **kwargs)
project.save()
# Default registration parameters
#schema = schema or MetaSchema.find_one(
# Q('name', 'eq', 'Open-Ended_Registration')
#)
schema = None
user = user or project.creator
template = template or "Template1"
data = data or "Some words"
auth = Auth(user=user)
register = lambda: project.register_node(
schema=schema,
auth=auth,
template=template,
data=data,
)
ArchiveJob(
src_node=project,
dst_node=register,
initiator=user,
)
if archive:
return register()
else:
with patch('framework.tasks.handlers.enqueue_task'):
reg = register()
archiver_utils.archive_success(
reg,
reg.registered_user
)
return reg
class PointerFactory(ModularOdmFactory):
FACTORY_FOR = Pointer
node = SubFactory(NodeFactory)
class NodeLogFactory(ModularOdmFactory):
FACTORY_FOR = NodeLog
action = 'file_added'
user = SubFactory(UserFactory)
class WatchConfigFactory(ModularOdmFactory):
FACTORY_FOR = WatchConfig
node = SubFactory(NodeFactory)
class RetractionFactory(ModularOdmFactory):
FACTORY_FOR = Retraction
user = SubFactory(UserFactory)
class EmbargoFactory(ModularOdmFactory):
FACTORY_FOR = Embargo
user = SubFactory(UserFactory)
class NodeWikiFactory(ModularOdmFactory):
FACTORY_FOR = NodeWikiPage
page_name = 'home'
content = 'Some content'
version = 1
user = SubFactory(UserFactory)
node = SubFactory(NodeFactory)
@post_generation
def set_node_keys(self, create, extracted):
self.node.wiki_pages_current[self.page_name] = self._id
self.node.wiki_pages_versions[self.page_name] = [self._id]
self.node.save()
class UnregUserFactory(ModularOdmFactory):
"""Factory for an unregistered user. Uses User.create_unregistered()
to create an instance.
"""
FACTORY_FOR = User
email = Sequence(lambda n: "brian{0}@queen.com".format(n))
fullname = Sequence(lambda n: "Brian May{0}".format(n))
@classmethod
def _build(cls, target_class, *args, **kwargs):
'''Build an object without saving it.'''
return target_class.create_unregistered(*args, **kwargs)
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = target_class.create_unregistered(*args, **kwargs)
instance.save()
return instance
class UnconfirmedUserFactory(ModularOdmFactory):
"""Factory for a user that has not yet confirmed their primary email
address (username).
"""
FACTORY_FOR = User
username = Sequence(lambda n: 'roger{0}@queen.com'.format(n))
fullname = Sequence(lambda n: 'Roger Taylor{0}'.format(n))
password = 'killerqueen'
@classmethod
def _build(cls, target_class, username, password, fullname):
'''Build an object without saving it.'''
return target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
@classmethod
def _create(cls, target_class, username, password, fullname):
instance = target_class.create_unconfirmed(
username=username, password=password, fullname=fullname
)
instance.save()
return instance
class AuthFactory(base.Factory):
FACTORY_FOR = Auth
user = SubFactory(UserFactory)
api_key = SubFactory(ApiKeyFactory)
class ProjectWithAddonFactory(ProjectFactory):
"""Factory for a project that has an addon. The addon will be added to
both the Node and the creator records. ::
p = ProjectWithAddonFactory(addon='github')
p.get_addon('github') # => github node settings object
p.creator.get_addon('github') # => github user settings object
"""
# TODO: Should use mock addon objects
@classmethod
def _build(cls, target_class, addon='s3', *args, **kwargs):
'''Build an object without saving it.'''
instance = ProjectFactory._build(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
return instance
@classmethod
def _create(cls, target_class, addon='s3', *args, **kwargs):
instance = ProjectFactory._create(target_class, *args, **kwargs)
auth = Auth(user=instance.creator)
instance.add_addon(addon, auth)
instance.creator.add_addon(addon)
instance.save()
return instance
# Deprecated unregistered user factory, used mainly for testing migration
class DeprecatedUnregUser(object):
'''A dummy "model" for an unregistered user.'''
def __init__(self, nr_name, nr_email):
self.nr_name = nr_name
self.nr_email = nr_email
def to_dict(self):
return {"nr_name": self.nr_name, "nr_email": self.nr_email}
class DeprecatedUnregUserFactory(base.Factory):
"""Generates a dictonary represenation of an unregistered user, in the
format expected by the OSF.
::
>>> from tests.factories import UnregUserFactory
>>> UnregUserFactory()
{'nr_name': 'Tom Jones0', 'nr_email': 'tom0@example.com'}
>>> UnregUserFactory()
{'nr_name': 'Tom Jones1', 'nr_email': 'tom1@example.com'}
"""
FACTORY_FOR = DeprecatedUnregUser
nr_name = Sequence(lambda n: "Tom Jones{0}".format(n))
nr_email = Sequence(lambda n: "tom{0}@example.com".format(n))
@classmethod
def _create(cls, target_class, *args, **kwargs):
return target_class(*args, **kwargs).to_dict()
_build = _create
class CommentFactory(ModularOdmFactory):
FACTORY_FOR = Comment
content = Sequence(lambda n: 'Comment {0}'.format(n))
is_public = True
@classmethod
def _build(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or node
instance = target_class(
node=node,
user=user,
target=target,
*args, **kwargs
)
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
node = kwargs.pop('node', None) or NodeFactory()
user = kwargs.pop('user', None) or node.creator
target = kwargs.pop('target', None) or node
instance = target_class(
node=node,
user=user,
target=target,
*args, **kwargs
)
instance.save()
return instance
class NotificationSubscriptionFactory(ModularOdmFactory):
FACTORY_FOR = NotificationSubscription
class NotificationDigestFactory(ModularOdmFactory):
FACTORY_FOR = NotificationDigest
class ExternalAccountFactory(ModularOdmFactory):
FACTORY_FOR = ExternalAccount
provider = 'mock2'
provider_id = Sequence(lambda n: 'user-{0}'.format(n))
provider_name = 'Fake Provider'
display_name = Sequence(lambda n: 'user-{0}'.format(n))
class SessionFactory(ModularOdmFactory):
FACTORY_FOR = Session
@classmethod
def _build(cls, target_class, *args, **kwargs):
user = kwargs.pop('user', None)
instance = target_class(*args, **kwargs)
if user:
instance.data['auth_user_username'] = user.username
instance.data['auth_user_id'] = user._primary_key
instance.data['auth_user_fullname'] = user.fullname
return instance
@classmethod
def _create(cls, target_class, *args, **kwargs):
instance = cls._build(target_class, *args, **kwargs)
instance.save()
return instance
class MockOAuth2Provider(ExternalProvider):
name = "Mock OAuth 2.0 Provider"
short_name = "mock2"
client_id = "mock2_client_id"
client_secret = "mock2_client_secret"
auth_url_base = "https://mock2.com/auth"
callback_url = "https://mock2.com/callback"
def handle_callback(self, response):
return {
'provider_id': 'mock_provider_id'
}
class MockAddonNodeSettings(addons_base.AddonNodeSettingsBase):
pass
class MockAddonUserSettings(addons_base.AddonUserSettingsBase):
pass
class MockAddonUserSettingsMergeable(addons_base.AddonUserSettingsBase):
def merge(self):
pass
class MockOAuthAddonUserSettings(addons_base.AddonOAuthUserSettingsBase):
oauth_provider = MockOAuth2Provider
class MockOAuthAddonNodeSettings(addons_base.AddonOAuthNodeSettingsBase):
oauth_provider = MockOAuth2Provider
class ArchiveTargetFactory(ModularOdmFactory):
FACTORY_FOR = ArchiveTarget
class ArchiveJobFactory(ModularOdmFactory):
FACTORY_FOR = ArchiveJob
|
fabianvf/osf.io
|
tests/factories.py
|
Python
|
apache-2.0
| 14,122
|
[
"Brian"
] |
fa5d4c16b73bd11312c1be249e7bd29b0a52f03d15bc5f96e32a0d5f5c466318
|
# A library of things to help with simple symmetry operation stuff.
#
# FIXED 17/NOV/06 add a method in here to give a list of likely, and then
# less likely, spacegroups based on an input spacegroup.
# For instance, if the input spacegroup is P 41 21 2 then
# another likely spacegroup is P 43 21 2 and less likely
# spacegroups are all those in the same pointgroup with
# different screw axes - e.g. P 41 2 2 (thinking of an Ed
# Mitchell example.) This should also allow in the likely
# case for body centred spacegroups where the screw axes
# are hidden, for example I 2 2 2/I 21 21 21 and I 2 3/I 21 3.
# This is now handled by Pointless in the "likely spacegroups"
# section.
#
# FIXME 06/DEC/06 need a mapping table from "old" spacegroup names to e.g. xHM
# for use with phenix.hyss.
from __future__ import annotations
import os
def lattice_to_spacegroup(lattice):
"""Convert a lattice e.g. tP into the minimal spacegroup number
to represent this."""
_lattice_to_spacegroup = {
"aP": 1,
"mP": 3,
"mC": 5,
"mI": 5,
"oP": 16,
"oC": 20,
"oF": 22,
"oI": 23,
"tP": 75,
"tI": 79,
"hP": 143,
"hR": 146,
"cP": 195,
"cF": 196,
"cI": 197,
}
if lattice not in _lattice_to_spacegroup:
raise RuntimeError('lattice "%s" unknown' % lattice)
return _lattice_to_spacegroup[lattice]
def spacegroup_name_xHM_to_old(xHM):
"""Convert to an old name."""
# generate mapping table
mapping = {}
current_old = ""
current_xHM = ""
old_names = set()
syminfo = os.path.join(os.environ["CCP4"], "lib", "data", "syminfo.lib")
with open(syminfo) as fh:
for line in fh.readlines():
if line[0] == "#":
continue
if "symbol old" in line:
current_old = line.split("'")[1]
if "symbol xHM" in line:
current_xHM = line.split("'")[1]
if "end_spacegroup" in line:
mapping[current_xHM] = current_old
old_names.add(current_old)
xHM = xHM.upper()
if xHM not in mapping:
if xHM in old_names:
return xHM
raise RuntimeError("spacegroup %s unknown" % xHM)
return mapping[xHM]
def clean_reindex_operator(symop):
return str(symop).replace("[", "").replace("]", "")
def lattices_in_order():
"""Return a list of possible crystal lattices (e.g. tP) in order of
increasing symmetry..."""
# eliminated this entry ... 'oA': 38,
lattices = [
"aP",
"mP",
"mC",
"oP",
"oC",
"oF",
"oI",
"tP",
"tI",
"hP",
"hR",
"cP",
"cF",
"cI",
]
# FIXME this should = lattice!
spacegroup_to_lattice = {
lattice_to_spacegroup(lattice): lattice for lattice in lattices
}
# lattice_to_spacegroup(lattice)
spacegroups = sorted(spacegroup_to_lattice)
return [spacegroup_to_lattice[s] for s in spacegroups]
def sort_lattices(lattices):
ordered_lattices = []
for l in lattices_in_order():
if l in lattices:
ordered_lattices.append(l)
return ordered_lattices
def lauegroup_to_lattice(lauegroup):
"""Convert a Laue group representation (from pointless, e.g. I m m m)
to something useful, like the implied crystal lattice (in this
case, oI.)"""
# this has been calculated from the results of Ralf GK's sginfo and a
# little fiddling...
#
# 19/feb/08 added mI record as pointless has started producing this -
# why??? this is not a "real" spacegroup... may be able to switch this
# off...
# 'I2/m': 'mI',
lauegroup_to_lattice = {
"Ammm": "oA",
"C2/m": "mC",
"Cmmm": "oC",
"Fm-3": "cF",
"Fm-3m": "cF",
"Fmmm": "oF",
"H-3": "hR",
"H-3m": "hR",
"R-3:H": "hR",
"R-3m:H": "hR",
"I4/m": "tI",
"I4/mmm": "tI",
"Im-3": "cI",
"Im-3m": "cI",
"Immm": "oI",
"P-1": "aP",
"P-3": "hP",
"P-3m": "hP",
"P2/m": "mP",
"P4/m": "tP",
"P4/mmm": "tP",
"P6/m": "hP",
"P6/mmm": "hP",
"Pm-3": "cP",
"Pm-3m": "cP",
"Pmmm": "oP",
}
updated_laue = ""
for l in lauegroup.split():
if not l == "1":
updated_laue += l
return lauegroup_to_lattice[updated_laue]
|
xia2/xia2
|
src/xia2/lib/SymmetryLib.py
|
Python
|
bsd-3-clause
| 4,740
|
[
"CRYSTAL"
] |
46d38c89729e309f5e903a89a6fe41bbb11eac9866ebab06dffc259ab1bd69b7
|
#!/usr/bin/env python
"""NeutronPy: Neutron scattering tools for scientific data analysis in python
NeutronPy is a collection of commonly used tools aimed at facilitating the
analysis of neutron scattering data. NeutronPy is built primarily using the
numpy and scipy python libraries, with a translation of ResLib 3.4c (MatLab)
routines for Instrument resolution calculations.
"""
import os
import re
import subprocess
import warnings
from math import ceil, log10
from setuptools import setup
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
License :: OSI Approved :: MIT License
Natural Language :: English
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Topic :: Scientific/Engineering :: Physics
Operating System :: Microsoft :: Windows
Operating System :: POSIX :: Linux
Operating System :: Unix
Operating System :: MacOS :: MacOS X
"""
DOCLINES = __doc__.split("\n")
def get_version():
r"""Determines version of package using either git describe or via the
folder name. Defaults to 0.0.0 if none is found, and warns user to
use a supported install method.
"""
vpat = re.compile(r"^([1-9]\d*!)?(0|[1-9]\d*)(\.(0|[1-9]\d*))*((a|b|rc)(0|[1-9]\d*))?(\.post(0|[1-9]\d*))?(\.dev(0|[1-9]\d*))?$")
try:
v = subprocess.check_output(["git", "describe", "--tags"]).rstrip().decode('ascii')
except subprocess.CalledProcessError:
ospat = re.compile(r".*neutronpy-(.+)")
osmatch = ospat.match(os.path.dirname(os.path.realpath(__file__)))
if osmatch is not None:
v = osmatch.groups()[0]
else:
warnings.warn("Cannot find current version of neutronpy, please use supported install method.")
v = "0.0.0"
if '-' in v:
v, ntag = v.split('-')[0:2]
vmatch = vpat.match(v)
epoch, major, minor, patch, pre, pretype, prever, post, postver, dev, devver = vmatch.groups()
if post is not None:
post = "post{0}".format(int(postver + 1))
elif pre is not None:
pre = "{0}{1}".format(pretype, int(prever) + 1)
else:
patch = "{0}".format(int(patch) + 1)
if dev is not None:
devver = int(devver)
ntag_mag = ceil(log10(ntag))
dev = "dev{0}".format(int(devver) * 10 ** ntag_mag + ntag)
else:
dev = "dev{0}".format(ntag)
front_vers = [major, minor, patch]
back_vers = [pre, post, dev]
__version__ = '.'.join([item.strip('.') for item in front_vers if item is not None])
__version__ += '.'.join([item.strip('.') for item in back_vers if item is not None])
if epoch is not None:
__version__ = "{0}!{1}".format(epoch, __version__)
else:
__version__ = v
return __version__
def setup_package():
r"""Setup package function
"""
metadata = dict(name='neutronpy',
version=get_version(),
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
author='David M Fobes',
author_email='dfobes@lanl.gov',
maintainer='davidfobes',
download_url='https://github.com/neutronpy/neutronpy/releases',
url='https://github.com/neutronpy/neutronpy',
license='MIT',
platforms=["Windows", "Linux", "Mac OS X", "Unix"],
install_requires=['numpy>=1.10', 'scipy>=1.0', 'matplotlib>=2.0', 'lmfit>=0.9.5', 'h5py'],
setup_requires=['pytest-runner'],
tests_require=['pytest','mock', 'codecov'],
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
ext_package='neutronpy',
package_data={'neutronpy': ['database/*.json', 'ui/*.ui']},
packages=['neutronpy', 'neutronpy.crystal', 'neutronpy.data', 'neutronpy.fileio',
'neutronpy.fileio.loaders', 'neutronpy.instrument', 'neutronpy.scattering',
'neutronpy.lsfit'],
entry_points={"console_scripts": ["neutronpy=neutronpy.gui:launch"]}, )
setup(**metadata)
if __name__ == '__main__':
setup_package()
|
granrothge/neutronpy
|
setup.py
|
Python
|
mit
| 4,445
|
[
"CRYSTAL"
] |
097c5a1f7485d2c1cbf4bd4b9eeeb731bd58210968e56d2f2fba412c84fcdfe1
|
#!/usr/bin/env python
import sys
import math
import simtk.openmm.app.element as element
import simtk.unit as unit
import subprocess
import datetime
from six.moves import cStringIO
import mdtraj as md
import logging
logger = logging.getLogger(__name__)
def fix(atomClass):
if atomClass == 'X':
return ''
return atomClass
elements = {}
for elem in element.Element._elements_by_symbol.values():
num = elem.atomic_number
if num not in elements or elem.mass < elements[num].mass:
elements[num] = elem
OTHER = 0
ATOMS = 1
CONNECT = 2
CONNECTIVITY = 3
RESIDUECONNECT = 4
section = OTHER
charge14scale = 1.0 / 1.2
epsilon14scale = 0.5
skipResidues = ['CIO', 'IB'] # "Generic" ions defined by Amber, which are identical to other real ions
skipClasses = ['OW', 'HW'] # Skip water atoms, since we define these in separate files
class AmberParser(object):
def __init__(self, override_mol2_residue_name=None):
"""Create an AmberParser object for converting amber force field files to XML format.
Parameters
----------
override_mol2_residue_name : str, default=None
If given, use this name to override mol2 residue names.
Useful to ensure that multiple ligands have unique residue
names, as required by the OpenMM ffXML parser.
"""
self.override_mol2_residue_name = override_mol2_residue_name
self.current_mol2 = 0
self.residueAtoms = {}
self.residueBonds = {}
self.residueConnections = {}
self.types = []
self.type_names = []
self.masses = {}
self.resAtomTypes = {}
self.vdwEquivalents = {}
self.vdw = {}
self.charge = {}
self.bonds = []
self.angles = []
self.torsions = []
self.impropers = []
self.set_provenance()
def addAtom(self, residue, atomName, atomClass, element, charge, use_numeric_types=True):
"""Add an atom to the database of FF data.
Notes
-----
use_numeric_types was not originally present in the OpenMM AMBER
parsers. It was added so that we can have atom types of the form
"RES-X", where RES is the name of the molecule or residue and X
is the atom numbering within that molecule. use_numeric_types is
set to False when processing mol2 files--e.g. for ligands.
"""
if residue is None:
return
type_id = len(self.types)
self.residueAtoms[residue].append([atomName, type_id])
self.types.append((atomClass, element, charge))
if use_numeric_types:
self.type_names.append("%d" % (type_id))
else:
self.type_names.append("%s-%s" % (residue, atomName))
def addBond(self, residue, atom1, atom2):
"""Add a bond to the database of FF data."""
if residue is None:
return
self.residueBonds[residue].append((atom1, atom2))
def addExternalBond(self, residue, atom):
"""Add an external bond to the database of FF data."""
if residue is None:
return
if atom != -1:
self.residueConnections[residue] += [atom]
def process_mol2_file(self, inputfile):
"""Process an AMBER GAFF-compatible mol2 file.
Parameters
----------
inputfile : str
filename of an .mol2 file
Notes
-----
Antechamber is known to produce NONSTANDARD mol2 files. This function
is designed to work with those nonstandard mol2 files, not
Tripos standard mol2 files. We are forced to live with the poor
decisions of our predecessors...
"""
atoms, bonds = md.formats.mol2.mol2_to_dataframes(inputfile)
if self.override_mol2_residue_name is None:
residue_name = atoms.resName[1] # To Do: Add check for consistency
else:
residue_name = self.override_mol2_residue_name
# Give each mol2 file a unique numbering to avoid conflicts.
residue_name = "%s-%d" % (residue_name, self.current_mol2)
self.current_mol2 += 1
self.residueAtoms[residue_name] = []
self.residueBonds[residue_name] = []
self.residueConnections[residue_name] = []
for (i0, i1, name, x, y, z, atype, code, resname, charge) in atoms.itertuples(index=True):
# i0 and i1 are zero-based and one-based indices, respectively
full_name = residue_name + "_" + name
element_symbol = md.formats.mol2.gaff_elements[atype]
e = element.Element.getBySymbol(element_symbol)
self.addAtom(residue_name, name, atype, e, charge, use_numeric_types=False) # use_numeric_types set to false to use string-based atom names, rather than numbers
self.vdwEquivalents[full_name] = atype
for (id0, id1, bond_type) in bonds.itertuples(False):
i = id0 - 1 # Subtract 1 for zero based indexing in OpenMM???
j = id1 - 1 # Subtract 1 for zero based indexing in OpenMM???
self.addBond(residue_name, i, j)
def process_library_file(self, inputfile):
"""Process an AMBER .lib file.
Parameters
----------
inputfile : str
filename of an .lib file
"""
for line in open(inputfile):
if line.startswith('!entry'):
fields = line.split('.')
residue = fields[1]
if residue in skipResidues:
residue = None
continue
key = fields[3].split()[0]
if key == 'atoms':
section = ATOMS
self.residueAtoms[residue] = []
self.residueBonds[residue] = []
self.residueConnections[residue] = []
elif key == 'connect':
section = CONNECT
elif key == 'connectivity':
section = CONNECTIVITY
elif key == 'residueconnect':
section = RESIDUECONNECT
else:
section = OTHER
elif section == ATOMS:
fields = line.split()
atomName = fields[0][1:-1]
atomClass = fields[1][1:-1]
if fields[6] == '-1':
# Workaround for bug in some Amber files.
if atomClass[0] == 'C':
elem = elements[6]
elif atomClass[0] == 'H':
elem = elements[1]
else:
raise ValueError('Illegal atomic number: ' + line)
else:
elem = elements[int(fields[6])]
self.charge = float(fields[7])
self.addAtom(residue, atomName, atomClass, elem, self.charge)
elif section == CONNECT:
self.addExternalBond(residue, int(line) - 1)
elif section == CONNECTIVITY:
fields = line.split()
self.addBond(residue, int(fields[0]) - 1, int(fields[1]) - 1)
elif section == RESIDUECONNECT:
# Some Amber files have errors in them, incorrectly listing atoms that should not be
# connected in the first two positions. We therefore rely on the "connect" section for
# those, using this block only for other external connections.
for atom in [int(x) - 1 for x in line.split()[2:]]:
self.addExternalBond(residue, atom)
def process_dat_file(self, inputfile):
"""Process an AMBER .dat file.
Parameters
----------
inputfile : str
filename of an .dat file
"""
block = 0
continueTorsion = False
for line in open(inputfile):
line = line.strip()
if block == 0: # Title
block += 1
elif block == 1: # Mass
fields = line.split()
if len(fields) == 0:
block += 1
else:
self.masses[fields[0]] = float(fields[1])
elif block == 2: # Hydrophilic atoms
block += 1
elif block == 3: # Bonds
if len(line) == 0:
block += 1
else:
fields = line[5:].split()
self.bonds.append((line[:2].strip(), line[3:5].strip(), fields[0], fields[1]))
elif block == 4: # Angles
if len(line) == 0:
block += 1
else:
fields = line[8:].split()
self.angles.append((line[:2].strip(), line[3:5].strip(), line[6:8].strip(), fields[0], fields[1]))
elif block == 5: # Torsions
if len(line) == 0:
block += 1
else:
fields = line[11:].split()
periodicity = int(float(fields[3]))
if continueTorsion:
self.torsions[-1] += [float(fields[1]) / float(fields[0]), fields[2], abs(periodicity)]
else:
self.torsions.append([line[:2].strip(), line[3:5].strip(), line[6:8].strip(), line[9:11].strip(), float(fields[1]) / float(fields[0]), fields[2], abs(periodicity)])
continueTorsion = (periodicity < 0)
elif block == 6: # Improper torsions
if len(line) == 0:
block += 1
else:
fields = line[11:].split()
self.impropers.append((line[:2].strip(), line[3:5].strip(), line[6:8].strip(), line[9:11].strip(), fields[0], fields[1], fields[2]))
elif block == 7: # 10-12 hbond potential
if len(line) == 0:
block += 1
elif block == 8: # VDW equivalents
if len(line) == 0:
block += 1
else:
fields = line.split()
for atom in fields[1:]:
self.vdwEquivalents[atom] = fields[0]
elif block == 9: # VDW type
block += 1
self.vdwType = line.split()[1]
if self.vdwType not in ['RE', 'AC']:
raise ValueError('Nonbonded type (KINDNB) must be RE or AC')
elif block == 10: # VDW parameters
if len(line) == 0:
block += 1
else:
fields = line.split()
self.vdw[fields[0]] = (fields[1], fields[2])
def process_frc_file(self, inputfile):
"""Process an AMBER .frc file.
Parameters
----------
inputfile : str
filename of an .frc file
"""
block = ''
continueTorsion = False
first = True
for line in open(inputfile):
line = line.strip()
if len(line) == 0 or first:
block = None
first = False
elif block is None:
block = line
elif block.startswith('MASS'):
fields = line.split()
self.masses[fields[0]] = float(fields[1])
elif block.startswith('BOND'):
fields = line[5:].split()
self.bonds.append((line[:2].strip(), line[3:5].strip(), fields[0], fields[1]))
elif block.startswith('ANGL'):
fields = line[8:].split()
self.angles.append((line[:2].strip(), line[3:5].strip(), line[6:8].strip(), fields[0], fields[1]))
elif block.startswith('DIHE'):
fields = line[11:].split()
periodicity = int(float(fields[3]))
if continueTorsion:
self.torsions[-1] += [float(fields[1]) / float(fields[0]), fields[2], abs(periodicity)]
else:
self.torsions.append([line[:2].strip(), line[3:5].strip(), line[6:8].strip(), line[9:11].strip(), float(fields[1]) / float(fields[0]), fields[2], abs(periodicity)])
continueTorsion = (periodicity < 0)
elif block.startswith('IMPR'):
fields = line[11:].split()
self.impropers.append((line[:2].strip(), line[3:5].strip(), line[6:8].strip(), line[9:11].strip(), fields[0], fields[1], fields[2]))
elif block.startswith('NONB'):
fields = line.split()
self.vdw[fields[0]] = (fields[1], fields[2])
def generate_xml(self):
"""Return the processed forcefield files as an XML stream.
Returns
-------
stream : cStringIO
The text of the output XML forcefield data.
Notes
-----
The stream can be written to disk via:
outfile = open("my_forcefield.xml", 'w')
outfile.write(stream.read())
outfile.close()
"""
stream = cStringIO()
write_stream = lambda x: stream.write(x + "\n")
write_stream(self.provenance)
write_stream("<ForceField>")
write_stream(" <AtomTypes>")
for index, type in enumerate(self.types):
write_stream(""" <Type name="%s" class="%s" element="%s" mass="%s"/>""" % (self.type_names[index], type[0], type[1].symbol, type[1].mass.value_in_unit(unit.amu)))
write_stream(" </AtomTypes>")
write_stream(" <Residues>")
for res in sorted(self.residueAtoms):
write_stream(""" <Residue name="%s">""" % res)
for atom in self.residueAtoms[res]:
atom_name, type_id = tuple(atom)
atom_type = self.type_names[type_id]
write_stream(" <Atom name=\"%s\" type=\"%s\"/>" % (atom_name, atom_type))
if res in self.residueBonds:
for bond in self.residueBonds[res]:
write_stream(""" <Bond from="%d" to="%d"/>""" % bond)
if res in self.residueConnections:
for bond in self.residueConnections[res]:
write_stream(""" <ExternalBond from="%d"/>""" % bond)
write_stream(" </Residue>")
write_stream(" </Residues>")
write_stream(" <HarmonicBondForce>")
processed = set()
for bond in self.bonds:
signature = (bond[0], bond[1])
if signature in processed:
continue
if any([c in skipClasses for c in signature]):
continue
processed.add(signature)
length = float(bond[3]) * 0.1
k = float(bond[2]) * 2 * 100 * 4.184
write_stream(""" <Bond class1="%s" class2="%s" length="%s" k="%s"/>""" % (bond[0], bond[1], str(length), str(k)))
write_stream(" </HarmonicBondForce>")
write_stream(" <HarmonicAngleForce>")
processed = set()
for angle in self.angles:
signature = (angle[0], angle[1], angle[2])
if signature in processed:
continue
if any([c in skipClasses for c in signature]):
continue
processed.add(signature)
theta = float(angle[4]) * math.pi / 180.0
k = float(angle[3]) * 2 * 4.184
write_stream(""" <Angle class1="%s" class2="%s" class3="%s" angle="%s" k="%s"/>""" % (angle[0], angle[1], angle[2], str(theta), str(k)))
write_stream(" </HarmonicAngleForce>")
write_stream(" <PeriodicTorsionForce>")
processed = set()
for tor in reversed(self.torsions):
signature = (fix(tor[0]), fix(tor[1]), fix(tor[2]), fix(tor[3]))
if signature in processed:
continue
if any([c in skipClasses for c in signature]):
continue
processed.add(signature)
tag = " <Proper class1=\"%s\" class2=\"%s\" class3=\"%s\" class4=\"%s\"" % signature
i = 4
while i < len(tor):
index = i / 3
periodicity = int(float(tor[i + 2]))
phase = float(tor[i + 1]) * math.pi / 180.0
k = tor[i] * 4.184
tag += " periodicity%d=\"%d\" phase%d=\"%s\" k%d=\"%s\"" % (index, periodicity, index, str(phase), index, str(k))
i += 3
tag += "/>"
write_stream(tag)
processed = set()
for tor in reversed(self.impropers):
signature = (fix(tor[2]), fix(tor[0]), fix(tor[1]), fix(tor[3]))
if signature in processed:
continue
if any([c in skipClasses for c in signature]):
continue
processed.add(signature)
tag = " <Improper class1=\"%s\" class2=\"%s\" class3=\"%s\" class4=\"%s\"" % signature
i = 4
while i < len(tor):
index = i / 3
periodicity = int(float(tor[i + 2]))
phase = float(tor[i + 1]) * math.pi / 180.0
k = float(tor[i]) * 4.184
tag += " periodicity%d=\"%d\" phase%d=\"%s\" k%d=\"%s\"" % (index, periodicity, index, str(phase), index, str(k))
i += 3
tag += "/>"
write_stream(tag)
write_stream(" </PeriodicTorsionForce>")
write_stream(""" <NonbondedForce coulomb14scale="%g" lj14scale="%s">""" % (charge14scale, epsilon14scale))
sigmaScale = 0.1 * 2.0 / (2.0 ** (1.0 / 6.0))
for index, type in enumerate(self.types):
atomClass = type[0]
q = type[2]
if atomClass in self.vdwEquivalents:
atomClass = self.vdwEquivalents[atomClass]
if atomClass in self.vdw:
params = [float(x) for x in self.vdw[atomClass]]
if self.vdwType == 'RE':
sigma = params[0] * sigmaScale
epsilon = params[1] * 4.184
else:
sigma = (params[0] / params[1]) ** (1.0 / 6.0)
epsilon = 4.184 * params[1] * params[1] / (4 * params[0])
else:
sigma = 0
epsilon = 0
if q != 0 or epsilon != 0:
write_stream(""" <Atom type="%s" charge="%s" sigma="%s" epsilon="%s"/>""" % (self.type_names[index], q, sigma, epsilon))
write_stream(" </NonbondedForce>")
write_stream("</ForceField>")
stream.seek(0)
return stream
def parse_filenames(self, filenames):
"""Process a list of filenames according to their filetype suffixes
Parameters
----------
filenames : list (of strings)
List of filenames of type (lib, off, dat, or mol2)
Notes
-----
When parameterizing small molecules, the correct order of inputs is:
$AMBER_LIB_PATH/gaff.dat ligand_name.mol2 ligand_name.frcmod
"""
for inputfile in filenames:
if inputfile.endswith('.lib') or inputfile.endswith('.off'):
self.process_library_file(inputfile)
elif inputfile.endswith('.dat'):
self.process_dat_file(inputfile)
elif inputfile.endswith("mol2"):
self.process_mol2_file(inputfile)
else:
self.process_frc_file(inputfile)
self.reduce_atomtypes()
def reduce_atomtypes(self, symmetrize_protons=False):
"""Reduce the list of atom self.types.
Parameters
----------
symmetrize_protons : bool, default=False
if True, multiple hydrogens bound to the same heavy atom
should all use the same type.
Notes
-----
The default behavior of symmetrize_protons differs from the
original OpenMM version of this script. For arbitrary small
molecules, we can not assume symmetric protons.
"""
removeType = [False] * len(self.types)
for res in self.residueAtoms:
if res not in self.residueBonds:
continue
atomBonds = [[] for atom in self.residueAtoms[res]]
for bond in self.residueBonds[res]:
atomBonds[bond[0]].append(bond[1])
atomBonds[bond[1]].append(bond[0])
if symmetrize_protons is True:
for index, atom in enumerate(self.residueAtoms[res]):
hydrogens = [x for x in atomBonds[index] if self.types[self.residueAtoms[res][x][1]][1] == element.hydrogen]
for h in hydrogens[1:]:
removeType[self.residueAtoms[res][h][1]] = True
self.residueAtoms[res][h][1] = self.residueAtoms[res][hydrogens[0]][1]
newTypes = []
replaceWithType = [0] * len(self.types)
for i in range(len(self.types)):
if not removeType[i]:
newTypes.append(self.types[i])
replaceWithType[i] = len(newTypes) - 1
self.types = newTypes
for res in self.residueAtoms:
for atom in self.residueAtoms[res]:
atom[1] = replaceWithType[atom[1]]
def set_provenance(self):
"""Set the provenance attribute with information about the current python session."""
self.provenance = []
line = """<!-- %s -->\n""" % "Time and parameters of origin:"
self.provenance.append(line)
now = datetime.datetime.now()
line = """<!-- %s -->\n""" % str(now)
self.provenance.append(line)
cmd_string = subprocess.list2cmdline(sys.argv[1:])
cmd_string = cmd_string.replace("-", " ") # Replace XML specific characters that can break some XML parsers
cmd_string = cmd_string.replace(">", " ") #
cmd_string = cmd_string.replace("<", " ") #
line = """<!-- %s -->\n""" % cmd_string
self.provenance.append(line)
self.provenance = "".join(self.provenance)
|
Clyde-fare/openmoltools
|
openmoltools/amber_parser.py
|
Python
|
gpl-2.0
| 22,160
|
[
"Amber",
"MDTraj",
"OpenMM"
] |
1a112a72d0074aeccafd4d8443ca578cc17e48893444b8f7daccadd2e1b23c9f
|
import random
import math
import hashlib
import datetime
import time
import numpy as np
import btcanalysis as btc
layers=[4,10,3]
layern=len(layers)
randn=0
def rand():
a=time.time()
global randn
b=hashlib.sha256(str(a)+str(randn)).hexdigest()
c=int(b,16)
randn=randn+math.pow(c,0.5)
d=float(c%1000000)
return d/1000000
def rand_array(n):
r=[]
for i in range(n):
r.append(rand())
return r
def logistics(sumin):
r=1/(1+math.pow(math.e,-1*sumin))
return r
def score(outputset, onedaychangedata):
m=1
a=0
while a<len(outputset)-1:
if outputset[a]==0: #SELL
m=m/(1+onedaychangedata[a+1])
elif outputset[a]==2: #BUY on margin
m=m*(1+onedaychangedata[a+1])
a=a+1
return m
class unit:
def __init__(self):
self.neurons=[]
self.axons=[]
for x in layers:
r=[]
for i in range(0,x):
r.append(0)
self.neurons.append(r)
for i in range(0,layern-1):
#self.axons.append(rand_array(layers[i]*layers[i+1]))
g=[0]*layers[i]*layers[i+1]
self.axons.append(g)
def reset(self):
self.neurons=[]
for x in layers:
r=[]
for i in range(0,x):
r.append(0)
self.neurons.append(r)
def run(self, inputs):
global outs
self.reset()
b=0
while b<len(self.neurons[0]):
self.neurons[0][b]=inputs[b]
b=b+1
a=0
while a<layern-1:
outs=[]
for x in self.neurons[a]:
t=logistics(x)
outs.append(t)
nn=0
while nn<len(self.neurons[a]):
#for each neuron in layer
#scan all subsequent axons
aa=0
while aa<len(self.neurons[a+1]): #length of next layer
axonid=nn*len(self.neurons[a+1])+aa
axonweight=self.axons[a][axonid]
self.neurons[a+1][aa]=axonweight*outs[nn]
aa=aa+1
nn=nn+1
a=a+1
f=self.neurons[layern-1]
g=0
best=-1
bestn=-1
while g<len(f):
if f[g]>best:
best=f[g]
bestn=g
g=g+1
return bestn
def cycle(self,inputset):
#RUNS MANY TIMES
#inputset should be 2D array
g=0
outputs=[]
while g<len(inputset):
outputs.append(self.run(inputset[g]))
g=g+1
return outputs
class system:
def __init__(self, unit_n):
self.units=[unit() for i in range(unit_n)]
self.bestscore=0
self.bestaxons=[]
def compete(self,inputs):
r=[]
scores=[]
probability=[]
for x in self.units:
outs=x.cycle(inputs)
s=score(outs,btc.onedaychange)
if s>self.bestscore:
self.bestscore=s
self.bestaxons=x.axons
print s
probability.append(math.pow(s,2))
a=sum(probability)
if a==0:
a=1
p=[]
n=0
for x in probability:
p.append([float(x)/float(a),n])
n=n+1
p.sort()
return p
def choosesurvivors(self,probability):
n=len(self.units)
survivors=[]
while len(survivors)<n:
a=rand()
b=0
f=-1
while b<len(probability):
if a<=probability[b][0]:
f=probability[b][1]
b=len(probability)
elif a>probability[b][0]:
a=a-probability[b][0]
b=b+1
if not f==-1:
survivors.append(f)
return survivors
def recombine(self,a,b):
r=[]
c=0
while c<len(self.units[a].axons):
g=[]
d=0
while d<len(self.units[a].axons[c]):
g.append(self.units[a].axons[c][d]/2+self.units[b].axons[c][d]/2)
d=d+1
r.append(g)
c=c+1
return r
def permutate(self,factor):
a=0
while a<len(self.units):
b=0
while b<len(self.units[a].axons):
c=0
while c<len(self.units[a].axons[b]):
y=rand()*factor-0.5*factor
self.units[a].axons[b][c]=self.units[a].axons[b][c]+y
c=c+1
b=b+1
a=a+1
def breednew(self,survivors):
#FULL recombination
a=0
while a<len(survivors):
r=random.randint(0,len(survivors)-1)
g=self.recombine(a,r)
self.units[a].axons=g
a=a+1
def evolveonce(self,inputs):
p=self.compete(inputs)
survivors=self.choosesurvivors(p)
self.permutate(0.1)
self.breednew(survivors)
def evolve(self,inputs,generation_n):
a=0
while a<generation_n:
self.evolveonce(inputs)
a=a+1
print "generation "+str(a)
a=system(20)
inputset=[]
def init():
global inputset
btc.init()
a=0
while a<len(btc.price):
r=[]
r.append(btc.price[a])
r.append(btc.volume[a])
r.append(btc.onedaychange[a])
r.append(btc.Xdifference[a])
r.append(btc.Ydifference[a])
inputset.append(r)
a=a+1
init()
|
barisser/perceptron
|
perceptron.py
|
Python
|
mit
| 5,632
|
[
"NEURON"
] |
213526d539da665a473265342d075a4963328278c0b89ee5bca11b0deb786e58
|
import enum
import json
import logging
import os
import platform
import sys
import textwrap
import uuid
import click
try:
from pybigquery.parse_url import parse_url as parse_bigquery_url
except (ImportError, ModuleNotFoundError):
parse_bigquery_url = None
import great_expectations.exceptions as ge_exceptions
from great_expectations import DataContext, rtd_url_ge_version
from great_expectations.cli.v012 import toolkit
from great_expectations.cli.v012.cli_messages import NO_DATASOURCES_FOUND
from great_expectations.cli.v012.docs import build_docs
from great_expectations.cli.v012.mark import Mark as mark
from great_expectations.cli.v012.util import (
CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
cli_message,
cli_message_dict,
verify_library_dependent_modules,
)
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.core.usage_statistics.util import send_usage_message
from great_expectations.data_context.types.base import DatasourceConfigSchema
from great_expectations.datasource import (
PandasDatasource,
SparkDFDatasource,
SqlAlchemyDatasource,
)
from great_expectations.datasource.batch_kwargs_generator import (
ManualBatchKwargsGenerator,
)
from great_expectations.datasource.batch_kwargs_generator.table_batch_kwargs_generator import (
TableBatchKwargsGenerator,
)
from great_expectations.exceptions import (
BatchKwargsError,
DatasourceInitializationError,
)
from great_expectations.validator.validator import BridgeValidator
logger = logging.getLogger(__name__)
try:
import sqlalchemy
except ImportError:
logger.debug(
"Unable to load SqlAlchemy context; install optional sqlalchemy dependency for support"
)
sqlalchemy = None
class DatasourceTypes(enum.Enum):
PANDAS = "pandas"
SQL = "sql"
SPARK = "spark"
# TODO DBT = "dbt"
MANUAL_GENERATOR_CLASSES = ManualBatchKwargsGenerator
class SupportedDatabases(enum.Enum):
MYSQL = "MySQL"
POSTGRES = "Postgres"
REDSHIFT = "Redshift"
SNOWFLAKE = "Snowflake"
BIGQUERY = "BigQuery"
OTHER = "other - Do you have a working SQLAlchemy connection string?"
# TODO MSSQL
@click.group()
def datasource():
"""Datasource operations"""
pass
@datasource.command(name="new")
@click.option(
"--directory",
"-d",
default=None,
help="The project's great_expectations directory.",
)
def datasource_new(directory):
"""Add a new datasource to the data context."""
context = toolkit.load_data_context_with_error_handling(directory)
datasource_name, data_source_type = add_datasource(context)
if datasource_name:
cli_message(f"A new datasource '{datasource_name}' was added to your project.")
send_usage_message(
data_context=context,
event="cli.datasource.new",
api_version="v2",
success=True,
)
else: # no datasource was created
send_usage_message(
data_context=context,
event="cli.datasource.new",
api_version="v2",
success=False,
)
sys.exit(1)
@datasource.command(name="delete")
@click.option(
"--directory",
"-d",
default=None,
help="The project's great_expectations directory.",
)
@click.argument("datasource")
def delete_datasource(directory, datasource):
"""Delete the datasource specified as an argument"""
context = toolkit.load_data_context_with_error_handling(directory)
try:
context.delete_datasource(datasource)
except ValueError:
cli_message(f"<red>Datasource {datasource} could not be found.</red>")
sys.exit(1)
try:
context.get_datasource(datasource)
except ValueError:
cli_message("<green>Datasource deleted successfully.</green>")
sys.exit(1)
else:
cli_message("<red>Datasource not deleted.</red>")
sys.exit(1)
@datasource.command(name="list")
@click.option(
"--directory",
"-d",
default=None,
help="The project's great_expectations directory.",
)
def datasource_list(directory):
"""List known datasources."""
context = toolkit.load_data_context_with_error_handling(directory)
datasources = context.list_datasources()
datasource_count = len(datasources)
if datasource_count == 0:
list_intro_string = "No Datasources found"
else:
list_intro_string = _build_datasource_intro_string(datasource_count)
cli_message(list_intro_string)
for datasource in datasources:
cli_message("")
cli_message_dict(datasource)
send_usage_message(
data_context=context,
event="cli.datasource.list",
api_version="v2",
success=True,
)
def _build_datasource_intro_string(datasource_count):
if datasource_count == 1:
list_intro_string = "1 Datasource found:"
if datasource_count > 1:
list_intro_string = f"{datasource_count} Datasources found:"
return list_intro_string
@datasource.command(name="profile")
@click.argument("datasource", default=None, required=False)
@click.option(
"--batch-kwargs-generator-name",
"-g",
default=None,
help="The name of the batch kwargs generator configured in the datasource. It will list data assets in the "
"datasource",
)
@click.option(
"--data-assets",
"-l",
default=None,
help="Comma-separated list of the names of data assets that should be profiled. Requires datasource specified.",
)
@click.option(
"--profile_all_data_assets",
"-A",
is_flag=True,
default=False,
help="Profile ALL data assets within the target data source. "
"If True, this will override --max_data_assets.",
)
@click.option(
"--assume-yes",
"--yes",
"-y",
is_flag=True,
default=False,
help="By default request confirmation unless you specify -y/--yes/--assume-yes flag to skip dialog",
)
@click.option(
"--directory",
"-d",
default=None,
help="The project's great_expectations directory.",
)
@click.option(
"--view/--no-view",
help="By default open in browser unless you specify the --no-view flag",
default=True,
)
@click.option(
"--additional-batch-kwargs",
default=None,
help="Additional keyword arguments to be provided to get_batch when loading the data asset. Must be a valid JSON dictionary",
)
@mark.cli_as_experimental
def datasource_profile(
datasource,
batch_kwargs_generator_name,
data_assets,
profile_all_data_assets,
directory,
view,
additional_batch_kwargs,
assume_yes,
):
"""
Profile a datasource (Experimental)
If the optional data_assets and profile_all_data_assets arguments are not specified, the profiler will check
if the number of data assets in the datasource exceeds the internally defined limit. If it does, it will
prompt the user to either specify the list of data assets to profile or to profile all.
If the limit is not exceeded, the profiler will profile all data assets in the datasource.
"""
context = toolkit.load_data_context_with_error_handling(directory)
try:
if additional_batch_kwargs is not None:
# TODO refactor out json load check in suite edit and add here
additional_batch_kwargs = json.loads(additional_batch_kwargs)
# TODO refactor batch load check in suite edit and add here
if datasource is None:
datasources = [
_datasource["name"] for _datasource in context.list_datasources()
]
if not datasources:
cli_message(NO_DATASOURCES_FOUND)
send_usage_message(
data_context=context,
event="cli.datasource.profile",
api_version="v2",
success=False,
)
sys.exit(1)
elif len(datasources) > 1:
cli_message(
"<red>Error: please specify the datasource to profile. "
"Available datasources: " + ", ".join(datasources) + "</red>"
)
send_usage_message(
data_context=context,
event="cli.datasource.profile",
api_version="v2",
success=False,
)
sys.exit(1)
else:
profile_datasource(
context,
datasources[0],
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_assets=data_assets,
profile_all_data_assets=profile_all_data_assets,
open_docs=view,
additional_batch_kwargs=additional_batch_kwargs,
skip_prompt_flag=assume_yes,
)
send_usage_message(
data_context=context,
event="cli.datasource.profile",
api_version="v2",
success=True,
)
else:
profile_datasource(
context,
datasource,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_assets=data_assets,
profile_all_data_assets=profile_all_data_assets,
open_docs=view,
additional_batch_kwargs=additional_batch_kwargs,
skip_prompt_flag=assume_yes,
)
send_usage_message(
data_context=context,
event="cli.datasource.profile",
api_version="v2",
success=True,
)
except Exception as e:
send_usage_message(
data_context=context,
event="cli.datasource.profile",
api_version="v2",
success=False,
)
raise e
def add_datasource(context, choose_one_data_asset=False):
"""
Interactive flow for adding a datasource to an existing context.
:param context:
:param choose_one_data_asset: optional - if True, this signals the method that the intent
is to let user choose just one data asset (e.g., a file) and there is no need
to configure a batch kwargs generator that comprehensively scans the datasource for data assets
:return: a tuple: datasource_name, data_source_type
"""
msg_prompt_where_is_your_data = """
What data would you like Great Expectations to connect to?
1. Files on a filesystem (for processing with Pandas or Spark)
2. Relational database (SQL)
"""
msg_prompt_files_compute_engine = """
What are you processing your files with?
1. Pandas
2. PySpark
"""
data_source_location_selection = click.prompt(
msg_prompt_where_is_your_data, type=click.Choice(["1", "2"]), show_choices=False
)
datasource_name = None
data_source_type = None
if data_source_location_selection == "1":
data_source_compute_selection = click.prompt(
msg_prompt_files_compute_engine,
type=click.Choice(["1", "2"]),
show_choices=False,
)
if data_source_compute_selection == "1": # pandas
data_source_type = DatasourceTypes.PANDAS
datasource_name = _add_pandas_datasource(
context, passthrough_generator_only=choose_one_data_asset
)
elif data_source_compute_selection == "2": # Spark
data_source_type = DatasourceTypes.SPARK
datasource_name = _add_spark_datasource(
context, passthrough_generator_only=choose_one_data_asset
)
else:
data_source_type = DatasourceTypes.SQL
datasource_name = _add_sqlalchemy_datasource(context)
return datasource_name, data_source_type
def _add_pandas_datasource(
context, passthrough_generator_only=True, prompt_for_datasource_name=True
):
send_usage_message(
data_context=context,
event="cli.new_ds_choice",
event_payload={"type": "pandas"},
api_version="v2",
success=True,
)
if passthrough_generator_only:
datasource_name = "files_datasource"
configuration = PandasDatasource.build_configuration()
else:
path = click.prompt(
msg_prompt_filesys_enter_base_path,
type=click.Path(exists=True, file_okay=False),
)
if path.startswith("./"):
path = path[2:]
if path.endswith("/"):
basenamepath = path[:-1]
else:
basenamepath = path
datasource_name = f"{os.path.basename(basenamepath)}__dir"
if prompt_for_datasource_name:
datasource_name = click.prompt(
msg_prompt_datasource_name, default=datasource_name
)
configuration = PandasDatasource.build_configuration(
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join("..", path),
}
}
)
configuration["class_name"] = "PandasDatasource"
configuration["module_name"] = "great_expectations.datasource"
errors = DatasourceConfigSchema().validate(configuration)
if len(errors) != 0:
raise ge_exceptions.GreatExpectationsError(
f"Invalid Datasource configuration: {errors:s}"
)
cli_message(
"""
Great Expectations will now add a new Datasource '{:s}' to your deployment, by adding this entry to your great_expectations.yml:
{:s}
""".format(
datasource_name,
textwrap.indent(toolkit.yaml.dump({datasource_name: configuration}), " "),
)
)
toolkit.confirm_proceed_or_exit(
continuation_message="Okay, exiting now. To learn more about adding datasources, run great_expectations "
"datasource --help or visit https://docs.greatexpectations.io/"
)
context.add_datasource(name=datasource_name, **configuration)
return datasource_name
def _add_sqlalchemy_datasource(context, prompt_for_datasource_name=True):
msg_success_database = (
"\n<green>Great Expectations connected to your database!</green>"
)
if not _verify_sqlalchemy_dependent_modules():
return None
db_choices = [str(x) for x in list(range(1, 1 + len(SupportedDatabases)))]
selected_database = (
int(
click.prompt(
msg_prompt_choose_database,
type=click.Choice(db_choices),
show_choices=False,
)
)
- 1
) # don't show user a zero index list :)
selected_database = list(SupportedDatabases)[selected_database]
send_usage_message(
data_context=context,
event="cli.new_ds_choice",
event_payload={"type": "sqlalchemy", "db": selected_database.name},
api_version="v2",
success=True,
)
datasource_name = f"my_{selected_database.value.lower()}_db"
if selected_database == SupportedDatabases.OTHER:
datasource_name = "my_database"
if prompt_for_datasource_name:
datasource_name = click.prompt(
msg_prompt_datasource_name, default=datasource_name
)
credentials = {}
# Since we don't want to save the database credentials in the config file that will be
# committed in the repo, we will use our Variable Substitution feature to store the credentials
# in the credentials file (that will not be committed, since it is in the uncommitted directory)
# with the datasource's name as the variable name.
# The value of the datasource's "credentials" key in the config file (great_expectations.yml) will
# be ${datasource name}.
# GE will replace the ${datasource name} with the value from the credentials file in runtime.
while True:
cli_message(msg_db_config.format(datasource_name))
if selected_database == SupportedDatabases.MYSQL:
if not _verify_mysql_dependent_modules():
return None
credentials = _collect_mysql_credentials(default_credentials=credentials)
elif selected_database == SupportedDatabases.POSTGRES:
if not _verify_postgresql_dependent_modules():
return None
credentials = _collect_postgres_credentials(default_credentials=credentials)
elif selected_database == SupportedDatabases.REDSHIFT:
if not _verify_redshift_dependent_modules():
return None
credentials = _collect_redshift_credentials(default_credentials=credentials)
elif selected_database == SupportedDatabases.SNOWFLAKE:
if not _verify_snowflake_dependent_modules():
return None
credentials = _collect_snowflake_credentials(
default_credentials=credentials
)
elif selected_database == SupportedDatabases.BIGQUERY:
if not _verify_bigquery_dependent_modules():
return None
credentials = _collect_bigquery_credentials(default_credentials=credentials)
elif selected_database == SupportedDatabases.OTHER:
sqlalchemy_url = click.prompt(
"""What is the url/connection string for the sqlalchemy connection?
(reference: https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls)
""",
show_default=False,
).strip()
credentials = {"url": sqlalchemy_url}
context.save_config_variable(datasource_name, credentials)
message = """
<red>Cannot connect to the database.</red>
- Please check your environment and the configuration you provided.
- Database Error: {0:s}"""
try:
cli_message(
"<cyan>Attempting to connect to your database. This may take a moment...</cyan>"
)
configuration = SqlAlchemyDatasource.build_configuration(
credentials=f"${{{datasource_name}}}"
)
configuration["class_name"] = "SqlAlchemyDatasource"
configuration["module_name"] = "great_expectations.datasource"
errors = DatasourceConfigSchema().validate(configuration)
if len(errors) != 0:
raise ge_exceptions.GreatExpectationsError(
f"Invalid Datasource configuration: {errors:s}"
)
cli_message(
"""
Great Expectations will now add a new Datasource '{0:s}' to your deployment, by adding this entry to your great_expectations.yml:
{1:s}
The credentials will be saved in uncommitted/config_variables.yml under the key '{0:s}'
""".format(
datasource_name,
textwrap.indent(
toolkit.yaml.dump({datasource_name: configuration}), " "
),
)
)
toolkit.confirm_proceed_or_exit()
context.add_datasource(name=datasource_name, **configuration)
cli_message(msg_success_database)
break
except ModuleNotFoundError as de:
cli_message(message.format(str(de)))
return None
except DatasourceInitializationError as de:
cli_message(message.format(str(de)))
if not click.confirm("Enter the credentials again?", default=True):
context.add_datasource(
datasource_name,
initialize=False,
module_name="great_expectations.datasource",
class_name="SqlAlchemyDatasource",
data_asset_type={"class_name": "SqlAlchemyDataset"},
credentials=f"${{{datasource_name}}}",
)
# TODO this message about continuing may not be accurate
cli_message(
"""
We saved datasource {:s} in {:s} and the credentials you entered in {:s}.
Since we could not connect to the database, you can complete troubleshooting in the configuration files documented in the how-to guides here:
<blue>https://docs.greatexpectations.io/en/latest/guides/how_to_guides/configuring_datasources.html?utm_source=cli&utm_medium=init&utm_campaign={:s}#{:s}</blue> .
After you connect to the datasource, run great_expectations init to continue.
""".format(
datasource_name,
DataContext.GE_YML,
context.get_config()["config_variables_file_path"],
rtd_url_ge_version,
selected_database.value.lower(),
)
)
return None
return datasource_name
def _should_hide_input():
"""
This is a workaround to help identify Windows and adjust the prompts accordingly
since hidden prompts may freeze in certain Windows terminals
"""
if "windows" in platform.platform().lower():
return False
return True
def _collect_postgres_credentials(default_credentials=None):
if default_credentials is None:
default_credentials = {}
credentials = {"drivername": "postgresql"}
credentials["host"] = click.prompt(
"What is the host for the postgres connection?",
default=default_credentials.get("host", "localhost"),
).strip()
credentials["port"] = click.prompt(
"What is the port for the postgres connection?",
default=default_credentials.get("port", "5432"),
).strip()
credentials["username"] = click.prompt(
"What is the username for the postgres connection?",
default=default_credentials.get("username", "postgres"),
).strip()
# This is a minimal workaround we're doing to deal with hidden input problems using Git Bash on Windows
# TODO: Revisit this if we decide to fully support Windows and identify if there is a better solution
credentials["password"] = click.prompt(
"What is the password for the postgres connection?",
default="",
show_default=False,
hide_input=_should_hide_input(),
)
credentials["database"] = click.prompt(
"What is the database name for the postgres connection?",
default=default_credentials.get("database", "postgres"),
show_default=True,
).strip()
return credentials
def _collect_snowflake_credentials(default_credentials=None):
if default_credentials is None:
default_credentials = {}
credentials = {"drivername": "snowflake"}
auth_method = click.prompt(
"""What authentication method would you like to use?
1. User and Password
2. Single sign-on (SSO)
3. Key pair authentication
""",
type=click.Choice(["1", "2", "3"]),
show_choices=False,
)
credentials["username"] = click.prompt(
"What is the user login name for the snowflake connection?",
default=default_credentials.get("username", ""),
).strip()
credentials["host"] = click.prompt(
"What is the account name for the snowflake connection (include region -- ex "
"'ABCD.us-east-1')?",
default=default_credentials.get("host", ""),
).strip()
database = click.prompt(
"What is database name for the snowflake connection?",
default=default_credentials.get("database", ""),
).strip()
if len(database) > 0:
credentials["database"] = database
credentials["query"] = {}
schema = click.prompt(
"What is schema name for the snowflake connection?",
default=default_credentials.get("schema_name", ""),
).strip()
if len(schema) > 0:
credentials["query"]["schema"] = schema
warehouse = click.prompt(
"What is warehouse name for the snowflake connection?",
default=default_credentials.get("warehouse", ""),
).strip()
if len(warehouse) > 0:
credentials["query"]["warehouse"] = warehouse
role = click.prompt(
"What is role name for the snowflake connection?",
default=default_credentials.get("role", ""),
).strip()
if len(role) > 0:
credentials["query"]["role"] = role
if auth_method == "1":
credentials = {**credentials, **_collect_snowflake_credentials_user_password()}
elif auth_method == "2":
credentials = {**credentials, **_collect_snowflake_credentials_sso()}
elif auth_method == "3":
credentials = {**credentials, **_collect_snowflake_credentials_key_pair()}
return credentials
def _collect_snowflake_credentials_user_password():
credentials = {}
credentials["password"] = click.prompt(
"What is the password for the snowflake connection?",
default="",
show_default=False,
hide_input=True,
)
return credentials
def _collect_snowflake_credentials_sso():
credentials = {}
credentials["connect_args"] = {}
credentials["connect_args"]["authenticator"] = click.prompt(
"Valid okta URL or 'externalbrowser' used to connect through SSO",
default="externalbrowser",
show_default=False,
)
return credentials
def _collect_snowflake_credentials_key_pair():
credentials = {}
credentials["private_key_path"] = click.prompt(
"Path to the private key used for authentication",
show_default=False,
)
credentials["private_key_passphrase"] = click.prompt(
"Passphrase for the private key used for authentication (optional -- leave blank for none)",
default="",
show_default=False,
)
return credentials
def _collect_bigquery_credentials(default_credentials=None):
sqlalchemy_url = click.prompt(
"""What is the SQLAlchemy url/connection string for the BigQuery connection?
(reference: https://github.com/googleapis/python-bigquery-sqlalchemy#connection-string-parameters)
""",
show_default=False,
).strip()
credentials = {"url": sqlalchemy_url}
return credentials
def _collect_mysql_credentials(default_credentials=None):
# We are insisting on pymysql driver when adding a MySQL datasource through the CLI
# to avoid overcomplication of this flow.
# If user wants to use another driver, they must create the sqlalchemy connection
# URL by themselves in config_variables.yml
if default_credentials is None:
default_credentials = {}
credentials = {"drivername": "mysql+pymysql"}
credentials["host"] = click.prompt(
"What is the host for the MySQL connection?",
default=default_credentials.get("host", "localhost"),
).strip()
credentials["port"] = click.prompt(
"What is the port for the MySQL connection?",
default=default_credentials.get("port", "3306"),
).strip()
credentials["username"] = click.prompt(
"What is the username for the MySQL connection?",
default=default_credentials.get("username", ""),
).strip()
credentials["password"] = click.prompt(
"What is the password for the MySQL connection?",
default="",
show_default=False,
hide_input=True,
)
credentials["database"] = click.prompt(
"What is the database name for the MySQL connection?",
default=default_credentials.get("database", ""),
).strip()
return credentials
def _collect_redshift_credentials(default_credentials=None):
# We are insisting on psycopg2 driver when adding a Redshift datasource through the CLI
# to avoid overcomplication of this flow.
# If user wants to use another driver, they must create the sqlalchemy connection
# URL by themselves in config_variables.yml
if default_credentials is None:
default_credentials = {}
credentials = {"drivername": "postgresql+psycopg2"}
# required
credentials["host"] = click.prompt(
"What is the host for the Redshift connection?",
default=default_credentials.get("host", ""),
).strip()
credentials["port"] = click.prompt(
"What is the port for the Redshift connection?",
default=default_credentials.get("port", "5439"),
).strip()
credentials["username"] = click.prompt(
"What is the username for the Redshift connection?",
default=default_credentials.get("username", ""),
).strip()
# This is a minimal workaround we're doing to deal with hidden input problems using Git Bash on Windows
# TODO: Revisit this if we decide to fully support Windows and identify if there is a better solution
credentials["password"] = click.prompt(
"What is the password for the Redshift connection?",
default="",
show_default=False,
hide_input=_should_hide_input(),
)
credentials["database"] = click.prompt(
"What is the database name for the Redshift connection?",
default=default_credentials.get("database", ""),
).strip()
# optional
credentials["query"] = {}
credentials["query"]["sslmode"] = click.prompt(
"What is sslmode name for the Redshift connection?",
default=default_credentials.get("sslmode", "prefer"),
)
return credentials
def _add_spark_datasource(
context, passthrough_generator_only=True, prompt_for_datasource_name=True
):
send_usage_message(
data_context=context,
event="cli.new_ds_choice",
event_payload={"type": "spark"},
api_version="v2",
success=True,
)
if not _verify_pyspark_dependent_modules():
return None
if passthrough_generator_only:
datasource_name = "files_spark_datasource"
# configuration = SparkDFDatasource.build_configuration(batch_kwargs_generators={
# "default": {
# "class_name": "PassthroughGenerator",
# }
# }
# )
configuration = SparkDFDatasource.build_configuration()
else:
path = click.prompt(
msg_prompt_filesys_enter_base_path,
type=click.Path(exists=True, file_okay=False),
).strip()
if path.startswith("./"):
path = path[2:]
if path.endswith("/"):
basenamepath = path[:-1]
else:
basenamepath = path
datasource_name = f"{os.path.basename(basenamepath)}__dir"
if prompt_for_datasource_name:
datasource_name = click.prompt(
msg_prompt_datasource_name, default=datasource_name
)
configuration = SparkDFDatasource.build_configuration(
batch_kwargs_generators={
"subdir_reader": {
"class_name": "SubdirReaderBatchKwargsGenerator",
"base_directory": os.path.join("..", path),
}
}
)
configuration["class_name"] = "SparkDFDatasource"
configuration["module_name"] = "great_expectations.datasource"
errors = DatasourceConfigSchema().validate(configuration)
if len(errors) != 0:
raise ge_exceptions.GreatExpectationsError(
f"Invalid Datasource configuration: {errors:s}"
)
cli_message(
"""
Great Expectations will now add a new Datasource '{:s}' to your deployment, by adding this entry to your great_expectations.yml:
{:s}
""".format(
datasource_name,
textwrap.indent(toolkit.yaml.dump({datasource_name: configuration}), " "),
)
)
toolkit.confirm_proceed_or_exit()
context.add_datasource(name=datasource_name, **configuration)
return datasource_name
# TODO consolidate all the myriad CLI tests into this
def select_batch_kwargs_generator(
context, datasource_name, available_data_assets_dict=None
):
msg_prompt_select_generator = "Select batch kwarg generator"
if available_data_assets_dict is None:
available_data_assets_dict = context.get_available_data_asset_names(
datasource_names=datasource_name
)
available_data_asset_names_by_generator = {}
for key, value in available_data_assets_dict[datasource_name].items():
if len(value["names"]) > 0:
available_data_asset_names_by_generator[key] = value["names"]
if len(available_data_asset_names_by_generator.keys()) == 0:
return None
elif len(available_data_asset_names_by_generator.keys()) == 1:
return list(available_data_asset_names_by_generator.keys())[0]
else: # multiple batch_kwargs_generators
generator_names = list(available_data_asset_names_by_generator.keys())
choices = "\n".join(
[
f" {i}. {generator_name}"
for i, generator_name in enumerate(generator_names, 1)
]
)
option_selection = click.prompt(
f"{msg_prompt_select_generator}\n{choices}",
type=click.Choice(
[str(i) for i, generator_name in enumerate(generator_names, 1)]
),
show_choices=False,
)
batch_kwargs_generator_name = generator_names[int(option_selection) - 1]
return batch_kwargs_generator_name
# TODO this method needs testing
# TODO this method has different numbers of returned objects
def get_batch_kwargs(
context,
datasource_name=None,
batch_kwargs_generator_name=None,
data_asset_name=None,
additional_batch_kwargs=None,
):
"""
This method manages the interaction with user necessary to obtain batch_kwargs for a batch of a data asset.
In order to get batch_kwargs this method needs datasource_name, batch_kwargs_generator_name and data_asset_name
to combine them into a fully-qualified data asset identifier(datasource_name/batch_kwargs_generator_name/data_asset_name).
All three arguments are optional. If they are present, the method uses their values. Otherwise, the method
prompts user to enter them interactively. Since it is possible for any of these three components to be
passed to this method as empty values and to get their values after interacting with user, this method
returns these components' values in case they changed.
If the datasource has batch_kwargs_generators that can list available data asset names, the method lets user choose a name
from that list (note: if there are multiple batch_kwargs_generators, user has to choose one first). If a name known to
the chosen batch_kwargs_generator is selected, the batch_kwargs_generators will be able to yield batch_kwargs. The method also gives user
an alternative to selecting the data asset name from the batch_kwargs_generators's list - user can type in a name for their
data asset. In this case a passthrough batch kwargs batch_kwargs_generators will be used to construct a fully-qualified data asset
identifier (note: if the datasource has no passthrough batch_kwargs_generators configured, the method will exist with a failure).
Since no batch_kwargs_generators can yield batch_kwargs for this data asset name, the method prompts user to specify batch_kwargs
by choosing a file (if the datasource is pandas or spark) or by writing a SQL query (if the datasource points
to a database).
:param context:
:param datasource_name:
:param batch_kwargs_generator_name:
:param data_asset_name:
:param additional_batch_kwargs:
:return: a tuple: (datasource_name, batch_kwargs_generator_name, data_asset_name, batch_kwargs). The components
of the tuple were passed into the methods as optional arguments, but their values might
have changed after this method's execution. If the returned batch_kwargs is None, it means
that the batch_kwargs_generator will know to yield batch_kwargs when called.
"""
try:
available_data_assets_dict = context.get_available_data_asset_names(
datasource_names=datasource_name
)
except ValueError:
# the datasource has no batch_kwargs_generators
available_data_assets_dict = {datasource_name: {}}
data_source = toolkit.select_datasource(context, datasource_name=datasource_name)
datasource_name = data_source.name
if batch_kwargs_generator_name is None:
batch_kwargs_generator_name = select_batch_kwargs_generator(
context,
datasource_name,
available_data_assets_dict=available_data_assets_dict,
)
# if the user provided us with the batch kwargs generator name and the data asset, we have everything we need -
# let's ask the generator to build batch kwargs for this asset - we are done.
if batch_kwargs_generator_name is not None and data_asset_name is not None:
generator = data_source.get_batch_kwargs_generator(batch_kwargs_generator_name)
batch_kwargs = generator.build_batch_kwargs(
data_asset_name, **additional_batch_kwargs
)
return batch_kwargs
if isinstance(
context.get_datasource(datasource_name), (PandasDatasource, SparkDFDatasource)
):
(
data_asset_name,
batch_kwargs,
) = _get_batch_kwargs_from_generator_or_from_file_path(
context,
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
)
elif isinstance(context.get_datasource(datasource_name), SqlAlchemyDatasource):
data_asset_name, batch_kwargs = _get_batch_kwargs_for_sqlalchemy_datasource(
context, datasource_name, additional_batch_kwargs=additional_batch_kwargs
)
else:
raise ge_exceptions.DataContextError(
"Datasource {:s} is expected to be a PandasDatasource or SparkDFDatasource, but is {:s}".format(
datasource_name, str(type(context.get_datasource(datasource_name)))
)
)
return (datasource_name, batch_kwargs_generator_name, data_asset_name, batch_kwargs)
def _get_batch_kwargs_from_generator_or_from_file_path(
context,
datasource_name,
batch_kwargs_generator_name=None,
additional_batch_kwargs=None,
):
if additional_batch_kwargs is None:
additional_batch_kwargs = {}
msg_prompt_generator_or_file_path = """
Would you like to:
1. choose from a list of data assets in this datasource
2. enter the path of a data file
"""
msg_prompt_file_path = """
Enter the path of a data file (relative or absolute, s3a:// and gs:// paths are ok too)
"""
msg_prompt_enter_data_asset_name = "\nWhich data would you like to use?\n"
msg_prompt_enter_data_asset_name_suffix = (
" Don't see the name of the data asset in the list above? Just type it\n"
)
msg_prompt_file_type = """
We could not determine the format of the file. What is it?
1. CSV
2. Parquet
3. Excel
4. JSON
5. SAS
"""
reader_method_file_extensions = {
"1": "csv",
"2": "parquet",
"3": "xlsx",
"4": "json",
"5": "sas",
}
data_asset_name = None
datasource = context.get_datasource(datasource_name)
if batch_kwargs_generator_name is not None:
generator = datasource.get_batch_kwargs_generator(batch_kwargs_generator_name)
option_selection = click.prompt(
msg_prompt_generator_or_file_path,
type=click.Choice(["1", "2"]),
show_choices=False,
)
if option_selection == "1":
available_data_asset_names = sorted(
generator.get_available_data_asset_names()["names"], key=lambda x: x[0]
)
available_data_asset_names_str = [
f"{name[0]} ({name[1]})" for name in available_data_asset_names
]
data_asset_names_to_display = available_data_asset_names_str[:50]
choices = "\n".join(
[
f" {i}. {name}"
for i, name in enumerate(data_asset_names_to_display, 1)
]
)
prompt = (
msg_prompt_enter_data_asset_name
+ choices
+ "\n"
+ msg_prompt_enter_data_asset_name_suffix.format(
len(data_asset_names_to_display)
)
)
data_asset_name_selection = click.prompt(prompt, show_default=False)
data_asset_name_selection = data_asset_name_selection.strip()
try:
data_asset_index = int(data_asset_name_selection) - 1
try:
data_asset_name = [name[0] for name in available_data_asset_names][
data_asset_index
]
except IndexError:
pass
except ValueError:
data_asset_name = data_asset_name_selection
batch_kwargs = generator.build_batch_kwargs(
data_asset_name, **additional_batch_kwargs
)
return (data_asset_name, batch_kwargs)
# No generator name was passed or the user chose to enter a file path
# We should allow a directory for Spark, but not for Pandas
dir_okay = isinstance(datasource, SparkDFDatasource)
path = None
while True:
# do not use Click to check if the file exists - the get_batch
# logic will check this
path = click.prompt(
msg_prompt_file_path,
type=click.Path(dir_okay=dir_okay),
default=path,
)
if not path.startswith("gs:") and not path.startswith("s3"):
path = os.path.abspath(path)
batch_kwargs = {"path": path, "datasource": datasource_name}
reader_method = None
try:
reader_kwargs = datasource.guess_reader_method_from_path(path)
reader_method = reader_kwargs["reader_method"]
reader_options = reader_kwargs.get("reader_options", {})
except BatchKwargsError:
pass
if reader_method is None:
while True:
option_selection = click.prompt(
msg_prompt_file_type,
type=click.Choice(["1", "2", "3", "4", "5"]),
show_choices=False,
)
try:
reader_method = datasource.guess_reader_method_from_path(
f"{path}.{reader_method_file_extensions[option_selection]}"
)["reader_method"]
except BatchKwargsError:
pass
if reader_method is not None:
batch_kwargs["reader_method"] = reader_method
if (
isinstance(datasource, SparkDFDatasource)
and reader_method == "csv"
):
header_row = click.confirm(
"\nDoes this file contain a header row?", default=True
)
batch_kwargs["reader_options"] = {"header": header_row}
batch = datasource.get_batch(batch_kwargs=batch_kwargs)
break
else:
try:
batch_kwargs["reader_method"] = reader_method
reader_options = {
**batch_kwargs.get("reader_options", {}),
**reader_options,
}
if reader_options:
batch_kwargs["reader_options"] = reader_options
if isinstance(datasource, SparkDFDatasource) and reader_method == "csv":
header_row = click.confirm(
"\nDoes this file contain a header row?", default=True
)
batch_kwargs["reader_options"] = {"header": header_row}
batch = datasource.get_batch(batch_kwargs=batch_kwargs)
break
except Exception as e:
file_load_error_message = """
<red>Cannot load file.</red>
- Please check the file and try again or select a different data file.
- Error: {0:s}"""
cli_message(file_load_error_message.format(str(e)))
if not click.confirm("\nTry again?", default=True):
cli_message(
"""
We have saved your setup progress. When you are ready, run great_expectations init to continue.
"""
)
sys.exit(1)
if data_asset_name is None and batch_kwargs.get("path"):
try:
# Try guessing a filename
filename = os.path.split(batch_kwargs.get("path"))[1]
# Take all but the last part after the period
filename = ".".join(filename.split(".")[:-1])
data_asset_name = filename
except (OSError, IndexError):
pass
batch_kwargs["data_asset_name"] = data_asset_name
return (data_asset_name, batch_kwargs)
def _get_default_schema(datasource):
inspector = sqlalchemy.inspect(datasource.engine)
return inspector.default_schema_name
def _get_batch_kwargs_for_sqlalchemy_datasource(
context, datasource_name, additional_batch_kwargs=None
):
data_asset_name = None
sql_query = None
datasource = context.get_datasource(datasource_name)
msg_prompt_how_to_connect_to_data = """
You have selected a datasource that is a SQL database. How would you like to specify the data?
1. Enter a table name and schema
2. Enter a custom SQL query
3. List all tables in the database (this may take a very long time)
"""
default_schema = _get_default_schema(datasource)
temp_generator = TableBatchKwargsGenerator(name="temp", datasource=datasource)
while data_asset_name is None:
single_or_multiple_data_asset_selection = click.prompt(
msg_prompt_how_to_connect_to_data,
type=click.Choice(["1", "2", "3"]),
show_choices=False,
)
if single_or_multiple_data_asset_selection == "1": # name the table and schema
schema_name = click.prompt(
"Please provide the schema name of the table (this is optional)",
default=default_schema,
)
table_name = click.prompt(
"Please provide the table name (this is required)"
)
data_asset_name = f"{schema_name}.{table_name}"
elif single_or_multiple_data_asset_selection == "2": # SQL query
sql_query = click.prompt("Please provide the SQL query")
data_asset_name = "custom_sql_query"
elif single_or_multiple_data_asset_selection == "3": # list it all
msg_prompt_warning: str = r"""Warning: I you have a large number of tables in your datasource, this may take a very long time.
Would you like to continue?"""
confirmation = click.prompt(
msg_prompt_warning, type=click.Choice(["y", "n"]), show_choices=True
)
if confirmation == "y":
# avoid this call until necessary
available_data_asset_names = (
temp_generator.get_available_data_asset_names()["names"]
)
available_data_asset_names_str = [
f"{name[0]} ({name[1]})" for name in available_data_asset_names
]
data_asset_names_to_display = available_data_asset_names_str
choices = "\n".join(
[
f" {i}. {name}"
for i, name in enumerate(data_asset_names_to_display, 1)
]
)
msg_prompt_enter_data_asset_name = (
"\nWhich table would you like to use? (Choose one)\n"
)
prompt = msg_prompt_enter_data_asset_name + choices + os.linesep
selection = click.prompt(prompt, show_default=False)
selection = selection.strip()
try:
data_asset_index = int(selection) - 1
try:
data_asset_name = [
name[0] for name in available_data_asset_names
][data_asset_index]
except IndexError:
print(
f"You have specified {selection}, which is an incorrect index"
)
pass
except ValueError:
print(
f"You have specified {selection}, which is an incorrect value"
)
pass
if additional_batch_kwargs is None:
additional_batch_kwargs = {}
# Some backends require named temporary table parameters. We specifically elicit those and add them
# where appropriate.
temp_table_kwargs = {}
datasource = context.get_datasource(datasource_name)
if datasource.engine.dialect.name.lower() == "bigquery":
# bigquery table needs to contain the project id if it differs from the credentials project
if len(data_asset_name.split(".")) < 3:
project_id, _, _, _, _, _ = parse_bigquery_url(datasource.engine.url)
data_asset_name = f"{project_id}.{data_asset_name}"
# bigquery also requires special handling
bigquery_temp_table = click.prompt(
"Great Expectations will create a table to use for "
"validation." + os.linesep + "Please enter a name for this table: ",
default=f"SOME_PROJECT.SOME_DATASET.ge_tmp_{str(uuid.uuid4())[:8]}",
)
temp_table_kwargs = {
"bigquery_temp_table": bigquery_temp_table,
}
# now building the actual batch_kwargs
if sql_query is None:
batch_kwargs = temp_generator.build_batch_kwargs(
data_asset_name, **additional_batch_kwargs
)
batch_kwargs.update(temp_table_kwargs)
else:
batch_kwargs = {"query": sql_query, "datasource": datasource_name}
batch_kwargs.update(temp_table_kwargs)
BridgeValidator(
batch=datasource.get_batch(batch_kwargs),
expectation_suite=ExpectationSuite("throwaway", data_context=context),
).get_dataset()
batch_kwargs["data_asset_name"] = data_asset_name
return data_asset_name, batch_kwargs
def _verify_sqlalchemy_dependent_modules() -> bool:
return verify_library_dependent_modules(
python_import_name="sqlalchemy", pip_library_name="sqlalchemy"
)
def _verify_mysql_dependent_modules() -> bool:
return verify_library_dependent_modules(
python_import_name="pymysql",
pip_library_name="pymysql",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
def _verify_postgresql_dependent_modules() -> bool:
psycopg2_success: bool = verify_library_dependent_modules(
python_import_name="psycopg2",
pip_library_name="psycopg2-binary",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
# noinspection SpellCheckingInspection
postgresql_psycopg2_success: bool = verify_library_dependent_modules(
python_import_name="sqlalchemy.dialects.postgresql.psycopg2",
pip_library_name="psycopg2-binary",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
return psycopg2_success and postgresql_psycopg2_success
def _verify_redshift_dependent_modules() -> bool:
# noinspection SpellCheckingInspection
postgresql_success: bool = _verify_postgresql_dependent_modules()
redshift_success: bool = verify_library_dependent_modules(
python_import_name="sqlalchemy_redshift.dialect",
pip_library_name="sqlalchemy-redshift",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
return redshift_success or postgresql_success
def _verify_snowflake_dependent_modules() -> bool:
return verify_library_dependent_modules(
python_import_name="snowflake.sqlalchemy.snowdialect",
pip_library_name="snowflake-sqlalchemy",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
def _verify_bigquery_dependent_modules() -> bool:
pybigquery_ok = verify_library_dependent_modules(
python_import_name="pybigquery.sqlalchemy_bigquery",
pip_library_name="pybigquery",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
sqlalchemy_bigquery_ok = verify_library_dependent_modules(
python_import_name="sqlalchemy_bigquery",
pip_library_name="sqlalchemy_bigquery",
module_names_to_reload=CLI_ONLY_SQLALCHEMY_ORDERED_DEPENDENCY_MODULE_NAMES,
)
return pybigquery_ok or sqlalchemy_bigquery_ok
def _verify_pyspark_dependent_modules() -> bool:
return verify_library_dependent_modules(
python_import_name="pyspark", pip_library_name="pyspark"
)
def skip_prompt_message(skip_flag, prompt_message_text) -> bool:
if not skip_flag:
return click.confirm(prompt_message_text, default=True)
return skip_flag
def profile_datasource(
context,
datasource_name,
batch_kwargs_generator_name=None,
data_assets=None,
profile_all_data_assets=False,
max_data_assets=20,
additional_batch_kwargs=None,
open_docs=False,
skip_prompt_flag=False,
):
"""Profile a named datasource using the specified context"""
# Note we are explicitly not using a logger in all CLI output to have
# more control over console UI.
logging.getLogger("great_expectations.profile.basic_dataset_profiler").setLevel(
logging.INFO
)
msg_intro = "Profiling '{0:s}' will create expectations and documentation."
msg_confirm_ok_to_proceed = """Would you like to profile '{0:s}'?"""
msg_skipping = (
"Skipping profiling for now. You can always do this later "
"by running `<green>great_expectations datasource profile</green>`."
)
msg_some_data_assets_not_found = """Some of the data assets you specified were not found: {0:s}
"""
msg_too_many_data_assets = """There are {0:d} data assets in {1:s}. Profiling all of them might take too long.
"""
msg_error_multiple_generators_found = """<red>More than one batch kwargs generator found in datasource {0:s}.
Specify the one you want the profiler to use in batch_kwargs_generator_name argument.</red>
"""
msg_error_no_generators_found = """<red>No batch kwargs generators can list available data assets in datasource
{0:s}. The datasource might be empty or a batch kwargs generator not configured in the config file.</red>
"""
msg_prompt_enter_data_asset_list = """Enter comma-separated list of data asset names (e.g., {0:s})
"""
msg_options = """Choose how to proceed:
1. Specify a list of the data assets to profile
2. Exit and profile later
3. Profile ALL data assets (this might take a while)
"""
msg_data_doc_intro = """
<cyan>========== Data Docs ==========</cyan>
Great Expectations is building Data Docs from the data you just profiled!"""
cli_message(msg_intro.format(datasource_name))
if data_assets:
data_assets = [item.strip() for item in data_assets.split(",")]
# Call the data context's profiling method to check if the arguments are valid
profiling_results = context.profile_datasource(
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_assets=data_assets,
profile_all_data_assets=profile_all_data_assets,
max_data_assets=max_data_assets,
dry_run=True,
additional_batch_kwargs=additional_batch_kwargs,
)
if (
profiling_results["success"] is True
): # data context is ready to profile - run profiling
if (
data_assets
or profile_all_data_assets
or skip_prompt_message(
skip_prompt_flag, msg_confirm_ok_to_proceed.format(datasource_name)
)
):
profiling_results = context.profile_datasource(
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_assets=data_assets,
profile_all_data_assets=profile_all_data_assets,
max_data_assets=max_data_assets,
dry_run=False,
additional_batch_kwargs=additional_batch_kwargs,
)
else:
cli_message(msg_skipping)
return
else: # we need to get arguments from user interactively
do_exit = False
while not do_exit:
if (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_SPECIFIED_DATA_ASSETS_NOT_FOUND
):
cli_message(
msg_some_data_assets_not_found.format(
",".join(profiling_results["error"]["not_found_data_assets"])
)
)
elif (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_TOO_MANY_DATA_ASSETS
):
cli_message(
msg_too_many_data_assets.format(
profiling_results["error"]["num_data_assets"], datasource_name
)
)
elif (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_MULTIPLE_BATCH_KWARGS_GENERATORS_FOUND
):
cli_message(msg_error_multiple_generators_found.format(datasource_name))
sys.exit(1)
elif (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_NO_BATCH_KWARGS_GENERATORS_FOUND
):
cli_message(msg_error_no_generators_found.format(datasource_name))
sys.exit(1)
else: # unknown error
raise ValueError(
"Unknown profiling error code: "
+ profiling_results["error"]["code"]
)
option_selection = click.prompt(
msg_options, type=click.Choice(["1", "2", "3"]), show_choices=False
)
if option_selection == "1":
data_assets = click.prompt(
msg_prompt_enter_data_asset_list.format(
", ".join(
[
data_asset[0]
for data_asset in profiling_results["error"][
"data_assets"
]
][:3]
)
),
show_default=False,
)
if data_assets:
data_assets = [item.strip() for item in data_assets.split(",")]
elif option_selection == "3":
profile_all_data_assets = True
data_assets = None
elif option_selection == "2": # skip
cli_message(msg_skipping)
return
else:
raise ValueError(f"Unrecognized option: {option_selection}")
# after getting the arguments from the user, let's try to run profiling again
# (no dry run this time)
profiling_results = context.profile_datasource(
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_assets=data_assets,
profile_all_data_assets=profile_all_data_assets,
max_data_assets=max_data_assets,
dry_run=False,
additional_batch_kwargs=additional_batch_kwargs,
)
if profiling_results["success"]: # data context is ready to profile
break
cli_message(msg_data_doc_intro.format(rtd_url_ge_version))
build_docs(context, view=open_docs, assume_yes=skip_prompt_flag)
if open_docs: # This is mostly to keep tests from spawning windows
context.open_data_docs()
msg_prompt_choose_datasource = """Configure a datasource:
1. Pandas DataFrame
2. Relational database (SQL)
3. Spark DataFrame
4. Skip datasource configuration
"""
msg_prompt_choose_database = """
Which database backend are you using?
{}
""".format(
"\n".join([f" {i}. {db.value}" for i, db in enumerate(SupportedDatabases, 1)])
)
msg_prompt_filesys_enter_base_path = """
Enter the path (relative or absolute) of the root directory where the data files are stored.
"""
msg_prompt_datasource_name = """
Give your new Datasource a short name.
"""
msg_db_config = """
Next, we will configure database credentials and store them in the `{0:s}` section
of this config file: great_expectations/uncommitted/config_variables.yml:
"""
msg_unknown_data_source = """
Do we not have the type of data source you want?
- Please create a GitHub issue here so we can discuss it!
- <blue>https://github.com/great-expectations/great_expectations/issues/new</blue>"""
|
great-expectations/great_expectations
|
great_expectations/cli/v012/datasource.py
|
Python
|
apache-2.0
| 61,560
|
[
"VisIt"
] |
988ae5dcfc251dc2323a306c26486e4a1627a8c6642e290ecc87215a77c748a0
|
# coding: utf-8
# Copyright (c) Materials Virtual Lab.
# Distributed under the terms of the BSD License.
from __future__ import division, unicode_literals, print_function
import os
import six
import abc
from functools import partial
import traceback
import shutil
import re
import numpy as np
from monty.dev import deprecated
from monty.json import MSONable
from monty.serialization import loadfn
from pymatgen.io.vasp.inputs import Poscar, Kpoints, Potcar, Incar
from pymatgen.io.vasp.outputs import Vasprun, Outcar
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.symmetry.bandstructure import HighSymmKpath
"""
#TODO: Replace with proper module doc.
"""
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class AbstractVaspInputSet(six.with_metaclass(abc.ABCMeta, MSONable)):
"""
Abstract base class representing a set of Vasp input parameters.
The idea is that using a VaspInputSet, a complete set of input files
(INPUT, KPOINTS, POSCAR and POTCAR) can be generated in an automated
fashion for any structure.
"""
@abc.abstractmethod
def get_poscar(self, structure):
"""
Returns Poscar from a structure.
"""
return
@abc.abstractmethod
def get_kpoints(self, structure):
"""
Returns Kpoints from a structure.
Args:
structure (Structure/IStructure): Structure to generate kpoints
for.
Returns:
Kpoints object
"""
return
@abc.abstractmethod
def get_incar(self, structure):
"""
Returns Incar from a structure.
Args:
structure (Structure/IStructure): Structure to generate Incar for.
Returns:
Incar object
"""
return
@abc.abstractmethod
def get_potcar(self, structure):
"""
Returns Potcar from a structure.
Args:
structure (Structure/IStructure): Structure to generate potcar
for.
Returns:
Potcar object
"""
return
@abc.abstractmethod
def get_potcar_symbols(self, structure):
"""
Returns list of POTCAR symbols from a structure.
Args:
structure (Structure/IStructure): Structure to generate potcar
symbols for.
Returns:
List of POTCAR symbols
"""
return
def get_all_vasp_input(self, structure, generate_potcar=True):
"""
Returns all input files as a dict of {filename: vasp object}
Args:
structure (Structure/IStructure): Structure to generate vasp
input for.
generate_potcar (bool): Set to False to generate a POTCAR.spec
file instead of a POTCAR, which contains the POTCAR labels
but not the actual POTCAR. Defaults to True.
Returns:
dict of {filename: file_as_string}, e.g., {'INCAR':'EDIFF=1e-4...'}
"""
kpoints = self.get_kpoints(structure)
incar = self.get_incar(structure)
if np.product(kpoints.kpts) < 4 and incar.get("ISMEAR", 0) == -5:
incar["ISMEAR"] = 0
d = {'INCAR': incar,
'KPOINTS': kpoints,
'POSCAR': self.get_poscar(structure)}
if generate_potcar:
d['POTCAR'] = self.get_potcar(structure)
else:
d['POTCAR.spec'] = "\n".join(self.get_potcar_symbols(structure))
return d
def write_input(self, structure, output_dir,
make_dir_if_not_present=True, include_cif=False):
"""
Writes a set of VASP input to a directory.
Args:
structure (Structure/IStructure): Structure to write VASP input
files for.
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
include_cif (bool): Whether to write a CIF file in the output
directory for easier opening by VESTA.
"""
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
for k, v in self.get_all_vasp_input(structure).items():
v.write_file(os.path.join(output_dir, k))
if k == "POSCAR" and include_cif:
v.structure.to(
filename=os.path.join(output_dir,
"%s.cif" % v.structure.formula))
class DictVaspInputSet(AbstractVaspInputSet):
"""
Concrete implementation of VaspInputSet that is initialized from a dict
settings. This allows arbitrary settings to be input. In general,
this is rarely used directly unless there is a source of settings in yaml
format (e.g., from a REST interface). It is typically used by other
VaspInputSets for initialization.
Special consideration should be paid to the way the MAGMOM initialization
for the INCAR is done. The initialization differs depending on the type of
structure and the configuration settings. The order in which the magmom is
determined is as follows:
1. If the site itself has a magmom setting, that is used.
2. If the species on the site has a spin setting, that is used.
3. If the species itself has a particular setting in the config file, that
is used, e.g., Mn3+ may have a different magmom than Mn4+.
4. Lastly, the element symbol itself is checked in the config file. If
there are no settings, VASP's default of 0.6 is used.
Args:
name (str): A name fo the input set.
config_dict (dict): The config dictionary to use.
hubbard_off (bool): Whether to turn off Hubbard U if it is specified in
config_dict. Defaults to False, i.e., follow settings in
config_dict.
user_incar_settings (dict): User INCAR settings. This allows a user
to override INCAR settings, e.g., setting a different MAGMOM for
various elements or species.
constrain_total_magmom (bool): Whether to constrain the total magmom
(NUPDOWN in INCAR) to be the sum of the expected MAGMOM for all
species. Defaults to False.
sort_structure (bool): Whether to sort the structure (using the
default sort order of electronegativity) before generating input
files. Defaults to True, the behavior you would want most of the
time. This ensures that similar atomic species are grouped
together.
ediff_per_atom (bool): Whether the EDIFF is specified on a per atom
basis. This is generally desired, though for some calculations (
e.g. NEB) this should be turned off (and an appropriate EDIFF
supplied in user_incar_settings)
potcar_functional (str): Functional to use. Default (None) is to use
the functional in Potcar.DEFAULT_FUNCTIONAL. Valid values:
"PBE", "LDA", "PW91", "LDA_US"
force_gamma (bool): Force gamma centered kpoint generation. Default
(False) is to use the Automatic Density kpoint scheme, which
will use the Gamma centered generation scheme for hexagonal
cells, and Monkhorst-Pack otherwise.
reduce_structure (None/str): Before generating the input files,
generate the reduced structure. Default (None), does not
alter the structure. Valid values: None, "niggli", "LLL"
"""
@deprecated(
message="All vasp input sets have been replaced by equivalents "
"pymatgen.io.sets. Will be removed in pmg 4.0.")
def __init__(self, name, config_dict, hubbard_off=False,
user_incar_settings=None,
constrain_total_magmom=False, sort_structure=True,
ediff_per_atom=True, potcar_functional=None,
force_gamma=False, reduce_structure=None):
self.name = name
self.potcar_settings = config_dict["POTCAR"]
self.kpoints_settings = config_dict['KPOINTS']
self.incar_settings = config_dict['INCAR']
self.set_nupdown = constrain_total_magmom
self.sort_structure = sort_structure
self.ediff_per_atom = ediff_per_atom
self.hubbard_off = hubbard_off
self.potcar_functional = potcar_functional
self.force_gamma = force_gamma
self.reduce_structure = reduce_structure
if hubbard_off:
for k in list(self.incar_settings.keys()):
if k.startswith("LDAU"):
del self.incar_settings[k]
if user_incar_settings:
self.incar_settings.update(user_incar_settings)
def get_incar(self, structure):
incar = Incar()
if self.reduce_structure:
structure = structure.get_reduced_structure(self.reduce_structure)
if self.sort_structure:
structure = structure.get_sorted_structure()
comp = structure.composition
elements = sorted([el for el in comp.elements if comp[el] > 0],
key=lambda e: e.X)
most_electroneg = elements[-1].symbol
poscar = Poscar(structure)
for key, setting in self.incar_settings.items():
if key == "MAGMOM":
mag = []
for site in structure:
if hasattr(site, 'magmom'):
mag.append(site.magmom)
elif hasattr(site.specie, 'spin'):
mag.append(site.specie.spin)
elif str(site.specie) in setting:
mag.append(setting.get(str(site.specie)))
else:
mag.append(setting.get(site.specie.symbol, 0.6))
incar[key] = mag
elif key in ('LDAUU', 'LDAUJ', 'LDAUL'):
if hasattr(structure[0], key.lower()):
m = dict([(site.specie.symbol, getattr(site, key.lower()))
for site in structure])
incar[key] = [m[sym] for sym in poscar.site_symbols]
elif most_electroneg in setting.keys():
incar[key] = [setting[most_electroneg].get(sym, 0)
for sym in poscar.site_symbols]
else:
incar[key] = [0] * len(poscar.site_symbols)
elif key == "EDIFF":
if self.ediff_per_atom:
incar[key] = float(setting) * structure.num_sites
else:
incar[key] = float(setting)
else:
incar[key] = setting
has_u = ("LDAUU" in incar and sum(incar['LDAUU']) > 0)
if has_u:
# modify LMAXMIX if LSDA+U and you have d or f electrons
# note that if the user explicitly sets LMAXMIX in settings it will
# override this logic.
if 'LMAXMIX' not in self.incar_settings.keys():
# contains f-electrons
if any([el.Z > 56 for el in structure.composition]):
incar['LMAXMIX'] = 6
# contains d-electrons
elif any([el.Z > 20 for el in structure.composition]):
incar['LMAXMIX'] = 4
else:
for key in list(incar.keys()):
if key.startswith('LDAU'):
del incar[key]
if self.set_nupdown:
nupdown = sum([mag if abs(mag) > 0.6 else 0
for mag in incar['MAGMOM']])
incar['NUPDOWN'] = nupdown
return incar
def get_poscar(self, structure):
if self.reduce_structure:
structure = structure.get_reduced_structure(self.reduce_structure)
if self.sort_structure:
structure = structure.get_sorted_structure()
return Poscar(structure)
def get_potcar(self, structure, check_hash=False):
if self.reduce_structure:
structure = structure.get_reduced_structure(self.reduce_structure)
if self.sort_structure:
structure = structure.get_sorted_structure()
if self.potcar_functional:
p = Potcar(self.get_potcar_symbols(structure),
functional=self.potcar_functional)
else:
p = Potcar(self.get_potcar_symbols(structure))
if check_hash:
hash_check = [ps.hash == self.potcar_settings[ps.element][
'hash'] for ps in p]
if all(hash_check):
return p
else:
wrong_hashes = [p.symbols[i] for i, tf in enumerate(
hash_check) if not tf]
raise ValueError("Potcars {} have different hashes "
"than those specified in the config "
"dictionary".format(wrong_hashes))
else:
return p
def get_nelect(self, structure):
"""
Gets the default number of electrons for a given structure.
"""
n = 0
for ps in self.get_potcar(structure):
n += structure.composition[ps.element] * ps.ZVAL
return n
def get_potcar_symbols(self, structure):
if self.reduce_structure:
structure = structure.get_reduced_structure(self.reduce_structure)
if self.sort_structure:
structure = structure.get_sorted_structure()
p = self.get_poscar(structure)
elements = p.site_symbols
potcar_symbols = []
if isinstance(self.potcar_settings[elements[-1]], dict):
for el in elements:
potcar_symbols.append(self.potcar_settings[el]['symbol']
if el in self.potcar_settings else el)
else:
for el in elements:
potcar_symbols.append(self.potcar_settings[el]
if el in self.potcar_settings else el)
return potcar_symbols
def get_kpoints(self, structure):
"""
Writes out a KPOINTS file using the fully automated grid method. Uses
Gamma centered meshes for hexagonal cells and Monk grids otherwise.
Algorithm:
Uses a simple approach scaling the number of divisions along each
reciprocal lattice vector proportional to its length.
"""
if self.reduce_structure:
structure = structure.get_reduced_structure(self.reduce_structure)
if self.sort_structure:
structure = structure.get_sorted_structure()
# If grid_density is in the kpoints_settings use Kpoints.automatic_density
if self.kpoints_settings.get('grid_density'):
return Kpoints.automatic_density(
structure, int(self.kpoints_settings['grid_density']),
self.force_gamma)
# If reciprocal_density is in the kpoints_settings use Kpoints.automatic_density_by_vol
elif self.kpoints_settings.get('reciprocal_density'):
return Kpoints.automatic_density_by_vol(
structure, int(self.kpoints_settings['reciprocal_density']),
self.force_gamma)
# If length is in the kpoints_settings use Kpoints.automatic
elif self.kpoints_settings.get('length'):
return Kpoints.automatic(self.kpoints_settings['length'])
# Raise error. Unsure of which kpoint generation to use
else:
raise ValueError(
"Invalid KPoint Generation algo : Supported Keys are "
"grid_density: for Kpoints.automatic_density generation, "
"reciprocal_density: for KPoints.automatic_density_by_vol generation, "
"and length : for Kpoints.automatic generation")
def __str__(self):
return self.name
def __repr__(self):
output = [self.name, ""]
section_names = ['INCAR settings', 'KPOINTS settings',
'POTCAR settings']
count = 0
for d in [self.incar_settings, self.kpoints_settings,
self.potcar_settings]:
output.append(section_names[count])
for k, v in d.items():
output.append("%s = %s" % (k, str(v)))
output.append("")
count += 1
return "\n".join(output)
def as_dict(self):
config_dict = {
"INCAR": self.incar_settings,
"KPOINTS": self.kpoints_settings,
"POTCAR": self.potcar_settings
}
return {
"name": self.name,
"config_dict": config_dict,
"hubbard_off": self.hubbard_off,
"constrain_total_magmom": self.set_nupdown,
"sort_structure": self.sort_structure,
"potcar_functional": self.potcar_functional,
"ediff_per_atom": self.ediff_per_atom,
"force_gamma": self.force_gamma,
"reduce_structure": self.reduce_structure,
"@class": self.__class__.__name__,
"@module": self.__class__.__module__,
}
@classmethod
def from_dict(cls, d):
return cls(name=d["name"], config_dict=d["config_dict"],
hubbard_off=d.get("hubbard_off", False),
constrain_total_magmom=d["constrain_total_magmom"],
sort_structure=d.get("sort_structure", True),
potcar_functional=d.get("potcar_functional", None),
ediff_per_atom=d.get("ediff_per_atom", True),
force_gamma=d.get("force_gamma", False),
reduce_structure=d.get("reduce_structure", None))
@staticmethod
def from_file(name, filename, **kwargs):
"""
Creates a DictVaspInputSet from a yaml/json file.
Args:
name (str): A name for the input set.
filename (str): Path to a yaml/json file containing the settings.
\*\*kwargs: Same kwargs as in the constructor.
Returns:
DictVaspInputSet
"""
return DictVaspInputSet(name, loadfn(filename), **kwargs)
MITVaspInputSet = partial(DictVaspInputSet.from_file, "MIT",
os.path.join(MODULE_DIR, "MITVaspInputSet.yaml"))
"""
Standard implementation of VaspInputSet utilizing parameters in the MIT
High-throughput project.
The parameters are chosen specifically for a high-throughput project,
which means in general pseudopotentials with fewer electrons were chosen.
Please refer::
A Jain, G. Hautier, C. Moore, S. P. Ong, C. Fischer, T. Mueller,
K. A. Persson, G. Ceder. A high-throughput infrastructure for density
functional theory calculations. Computational Materials Science,
2011, 50(8), 2295-2310. doi:10.1016/j.commatsci.2011.02.023
"""
MITGGAVaspInputSet = partial(DictVaspInputSet.from_file, "MIT GGA",
os.path.join(MODULE_DIR, "MITVaspInputSet.yaml"),
hubbard_off=True)
"""
GGA (no U) version of MITVaspInputSet.
"""
MITHSEVaspInputSet = partial(
DictVaspInputSet.from_file, "MIT HSE",
os.path.join(MODULE_DIR, "MITHSEVaspInputSet.yaml"))
"""
Typical implementation of input set for a HSE run using MIT parameters.
"""
class MITNEBVaspInputSet(DictVaspInputSet):
"""
Class for writing NEB inputs. Note that EDIFF is not on a per atom
basis for this input set.
Args:
nimages (int): Number of NEB images (excluding start and ending
structures).
write_endpoint_files (bool): Whether to write KPOINTS, POTCAR, INCAR
in the first and last folders.
kpoints_gamma_override (iterable of ints): Gamma centered subdivisions
to override the kpoints density of MITVaspInputSet.yaml
write_path_cif (bool): Whether to write a cif of all the positions along
the path. Useful for visualization
\*\*kwargs: Other kwargs supported by :class:`DictVaspInputSet`.
"""
@deprecated(message="Replaced by MITNEBSet. Will be removed in pmg 4.0.")
def __init__(self, nimages=8, user_incar_settings=None,
write_endpoint_inputs=False, kpoints_gamma_override=None,
write_path_cif=False, unset_encut=False,
sort_structure=False, **kwargs):
super(MITNEBVaspInputSet, self).__init__(
"MIT NEB",
loadfn(os.path.join(MODULE_DIR, "MITVaspInputSet.yaml")),
ediff_per_atom=False, sort_structure=False,
**kwargs)
self.endpoint_set = MITVaspInputSet(ediff_per_atom=False, sort_structure=False)
if unset_encut:
del self.incar_settings["ENCUT"]
del self.endpoint_set.incar_settings["ENCUT"]
#NEB specific defaults
defaults = {'IMAGES': nimages, 'IBRION': 1, 'ISYM': 0, 'LCHARG': False}
endpoint_defaults = {'ISYM': 0, 'LCHARG': False}
if user_incar_settings:
defaults.update(user_incar_settings)
endpoint_defaults.update(user_incar_settings)
self.incar_settings.update(defaults)
self.endpoint_set.incar_settings.update(endpoint_defaults)
self.nimages = nimages
self.kpoints_gamma_override = kpoints_gamma_override
self.write_endpoint_inputs = write_endpoint_inputs
self.write_path_cif = write_path_cif
def _process_structures(self, structures):
"""
Remove any atom jumps across the cell
"""
input_structures = structures
structures = [input_structures[0]]
for s in input_structures[1:]:
prev = structures[-1]
for i in range(len(s)):
t = np.round(prev[i].frac_coords - s[i].frac_coords)
if np.sum(t) > 0.5:
s.translate_sites([i], t, to_unit_cell=False)
structures.append(s)
return structures
def write_input(self, structures, output_dir, make_dir_if_not_present=True,
write_cif=False):
"""
NEB inputs has a special directory structure where inputs are in 00,
01, 02, ....
Args:
structures ([Structure]): nimages + 2 structures (including
start and end structures).
output_dir (str): Directory to output the VASP input files
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
write_cif (bool): If true, writes a cif along with each POSCAR.
"""
if len(structures) != self.incar_settings['IMAGES'] + 2:
raise ValueError('incorrect number of structures')
structures = self._process_structures(structures)
if make_dir_if_not_present and not os.path.exists(output_dir):
os.makedirs(output_dir)
s0 = structures[0]
self.get_incar(s0).write_file(os.path.join(output_dir, 'INCAR'))
if self.kpoints_gamma_override:
kpoints = Kpoints.gamma_automatic(self.kpoints_gamma_override)
else:
kpoints = self.get_kpoints(s0)
potcar = self.get_potcar(s0)
kpoints.write_file(os.path.join(output_dir, 'KPOINTS'))
potcar.write_file(os.path.join(output_dir, 'POTCAR'))
for i, s in enumerate(structures):
d = os.path.join(output_dir, str(i).zfill(2))
if make_dir_if_not_present and not os.path.exists(d):
os.makedirs(d)
self.get_poscar(s).write_file(os.path.join(d, 'POSCAR'))
if write_cif:
s.to(filename=os.path.join(d, '{}.cif'.format(i)))
if self.write_endpoint_inputs:
incar = self.endpoint_set.get_incar(s0)
for image in ['00', str(len(structures) - 1).zfill(2)]:
incar.write_file(os.path.join(output_dir, image, 'INCAR'))
kpoints.write_file(os.path.join(output_dir, image, 'KPOINTS'))
potcar.write_file(os.path.join(output_dir, image, 'POTCAR'))
if self.write_path_cif:
from pymatgen import Structure, PeriodicSite
from itertools import chain
sites = set()
l = structures[0].lattice
for site in chain(*(s.sites for s in structures)):
sites.add(PeriodicSite(site.species_and_occu, site.frac_coords, l))
path = Structure.from_sites(sorted(sites))
path.to(filename=os.path.join(output_dir, 'path.cif'))
def as_dict(self):
d = super(MITNEBVaspInputSet, self).as_dict()
d["nimages"] = self.nimages
return d
@classmethod
def from_dict(cls, d):
return cls(user_incar_settings=d.get("user_incar_settings", None),
constrain_total_magmom=d["constrain_total_magmom"],
sort_structure=d.get("sort_structure", True),
hubbard_off=d.get("hubbard_off", False),
nimages=d["nimages"])
class MITMDVaspInputSet(DictVaspInputSet):
"""
Class for writing a vasp md run. This DOES NOT do multiple stage
runs.
Args:
start_temp (int): Starting temperature.
end_temp (int): Final temperature.
nsteps (int): Number of time steps for simulations. The NSW parameter.
time_step (int): The time step for the simulation. The POTIM
parameter. Defaults to 2fs.
hubbard_off (bool): Whether to turn off Hubbard U. Defaults to
*True* (different behavior from standard input sets) for MD runs.
spin_polarized (bool): Whether to do spin polarized calculations.
The ISPIN parameter. Defaults to False.
sort_structure (bool): Whether to sort structure. Defaults to False
(different behavior from standard input sets).
**kwargs:
Other kwargs supported by :class:`DictVaspInputSet`.
"""
@deprecated(message="Replaced by MITMDSet. Will be removed in pmg 4.0.")
def __init__(self, start_temp, end_temp, nsteps, time_step=2,
hubbard_off=True, spin_polarized=False,
sort_structure=False, user_incar_settings=None,
**kwargs):
#MD default settings
defaults = {'TEBEG': start_temp, 'TEEND': end_temp, 'NSW': nsteps,
'EDIFF': 0.000001, 'LSCALU': False, 'LCHARG': False,
'LPLANE': False, 'LWAVE': True, 'ISMEAR': 0,
'NELMIN': 4, 'LREAL': True, 'BMIX': 1,
'MAXMIX': 20, 'NELM': 500, 'NSIM': 4, 'ISYM': 0,
'ISIF': 0, 'IBRION': 0, 'NBLOCK': 1, 'KBLOCK': 100,
'SMASS': 0, 'POTIM': time_step, 'PREC': 'Normal',
'ISPIN': 2 if spin_polarized else 1}
#override default settings with user supplied settings
if user_incar_settings:
defaults.update(user_incar_settings)
super(MITMDVaspInputSet, self).__init__(
"MIT MD",
loadfn(os.path.join(MODULE_DIR, "MITVaspInputSet.yaml")),
hubbard_off=hubbard_off, sort_structure=sort_structure,
user_incar_settings=defaults, **kwargs)
self.start_temp = start_temp
self.end_temp = end_temp
self.nsteps = nsteps
self.time_step = time_step
self.spin_polarized = spin_polarized
self.user_incar_settings = user_incar_settings or {}
#use VASP default ENCUT
if 'ENCUT' not in self.user_incar_settings:
del self.incar_settings['ENCUT']
if defaults['ISPIN'] == 1:
del self.incar_settings['MAGMOM']
def get_kpoints(self, structure):
return Kpoints.gamma_automatic()
def as_dict(self):
d = super(MITMDVaspInputSet, self).as_dict()
d.update({
"start_temp": self.start_temp,
"end_temp": self.end_temp,
"nsteps": self.nsteps,
"time_step": self.time_step,
"spin_polarized": self.spin_polarized,
"user_incar_settings": self.user_incar_settings
})
return d
@classmethod
def from_dict(cls, d):
return cls(start_temp=d["start_temp"], end_temp=d["end_temp"],
nsteps=d["nsteps"], time_step=d["time_step"],
hubbard_off=d.get("hubbard_off", False),
user_incar_settings=d["user_incar_settings"],
spin_polarized=d.get("spin_polarized", False),
constrain_total_magmom=d["constrain_total_magmom"],
sort_structure=d.get("sort_structure", True))
MPVaspInputSet = partial(DictVaspInputSet.from_file, "MP",
os.path.join(MODULE_DIR, "MPVaspInputSet.yaml"))
"""
Implementation of VaspInputSet utilizing parameters in the public
Materials Project. Typically, the pseudopotentials chosen contain more
electrons than the MIT parameters, and the k-point grid is ~50% more dense.
The LDAUU parameters are also different due to the different psps used,
which result in different fitted values.
"""
MPGGAVaspInputSet = partial(DictVaspInputSet.from_file, "MP GGA",
os.path.join(MODULE_DIR, "MPVaspInputSet.yaml"),
hubbard_off=True)
"""
Same as the MPVaspInput set, but the +U is enforced to be turned off.
"""
MPHSEVaspInputSet = partial(DictVaspInputSet.from_file, "MP HSE",
os.path.join(MODULE_DIR, "MPHSEVaspInputSet.yaml"))
"""
Same as the MPVaspInput set, but with HSE parameters.
"""
class MPStaticVaspInputSet(DictVaspInputSet):
"""
Implementation of VaspInputSet overriding MaterialsProjectVaspInputSet
for static calculations that typically follow relaxation runs.
It is recommended to use the static from_previous_run method to construct
the input set to inherit most of the functions.
Args:
kpoints_density (int): kpoints density for the reciprocal cell of
structure. Might need to increase the default value when
calculating metallic materials.
sym_prec (float): Tolerance for symmetry finding
kwargs:
hubbard_off (bool): Whether to turn off Hubbard U if it is specified in
config_dict ("MP Static"). Defaults to False, i.e., follow settings
in config_dict.
user_incar_settings (dict): User INCAR settings. This allows a user
to override INCAR settings, e.g., setting a different MAGMOM for
various elements or species.
constrain_total_magmom (bool): Whether to constrain the total magmom
(NUPDOWN in INCAR) to be the sum of the expected MAGMOM for all
species. Defaults to False.
sort_structure (bool): Whether to sort the structure (using the
default sort order of electronegativity) before generating input
files. Defaults to True, the behavior you would want most of the
time. This ensures that similar atomic species are grouped
together.
ediff_per_atom (bool): Whether the EDIFF is specified on a per atom
basis.
"""
@deprecated(message="Replaced by MPStaticSet. Will be removed in pmg 4.0.")
def __init__(self, kpoints_density=90, sym_prec=0.1, **kwargs):
super(MPStaticVaspInputSet, self).__init__(
"MP Static",
loadfn(os.path.join(MODULE_DIR, "MPVaspInputSet.yaml")),
**kwargs)
self.incar_settings.update(
{"IBRION": -1, "ISMEAR": -5, "LAECHG": True, "LCHARG": True,
"LORBIT": 11, "LVHAR": True, "LWAVE": False, "NSW": 0,
"ICHARG": 0, "EDIFF": 0.000001, "ALGO": "Normal"})
self.kpoints_settings.update({"kpoints_density": kpoints_density})
self.sym_prec = sym_prec
def get_kpoints(self, structure, primitive_standard=False):
"""
Get a KPOINTS file using the fully automated grid method. Uses
Gamma centered meshes for hexagonal cells and Monk grids otherwise.
Args:
structure (Structure/IStructure): structure to get kpoints
primitive_standard (Bool): whether the input structure is
a primitive standardized cell
"""
if not primitive_standard:
structure = self.get_poscar(structure).structure
self.kpoints_settings['grid_density'] = \
self.kpoints_settings["kpoints_density"] * \
structure.lattice.reciprocal_lattice.volume * \
structure.num_sites
return super(MPStaticVaspInputSet, self).get_kpoints(structure)
def get_poscar(self, structure):
"""
Get a POSCAR file with a primitive standardized cell of
the giving structure.
Args:
structure (Structure/IStructure): structure to get POSCAR
"""
sym_finder = SpacegroupAnalyzer(structure, symprec=self.sym_prec)
return Poscar(sym_finder.get_primitive_standard_structure(False))
@staticmethod
def get_structure(vasp_run, outcar=None, initial_structure=False,
additional_info=False, sym_prec=0.1):
"""
Process structure for static calculations from previous run.
Args:
vasp_run (Vasprun): Vasprun that contains the final structure
from previous run.
outcar (Outcar): Outcar that contains the magnetization info from
previous run.
initial_structure (bool): Whether to return the structure from
previous run. Default is False.
additional_info (bool):
Whether to return additional symmetry info related to the
structure. If True, return a list of the refined structure (
conventional cell), the conventional standard structure,
the symmetry dataset and symmetry operations of the
structure (see SpacegroupAnalyzer doc for details).
sym_prec (float): Tolerance for symmetry finding
Returns:
Returns the magmom-decorated structure that can be passed to get
Vasp input files, e.g. get_kpoints.
"""
if vasp_run.is_spin:
if outcar and outcar.magnetization:
magmom = {"magmom": [i['tot'] for i in outcar.magnetization]}
else:
magmom = {
"magmom": vasp_run.as_dict()['input']['parameters']
['MAGMOM']}
else:
magmom = None
structure = vasp_run.final_structure
if magmom:
structure = structure.copy(site_properties=magmom)
sym_finder = SpacegroupAnalyzer(structure, symprec=sym_prec)
if initial_structure:
return structure
elif additional_info:
info = [sym_finder.get_refined_structure(),
sym_finder.get_conventional_standard_structure(False),
sym_finder.get_symmetry_dataset(),
sym_finder.get_symmetry_operations()]
return [sym_finder.get_primitive_standard_structure(False),
info]
else:
return sym_finder.get_primitive_standard_structure(False)
@staticmethod
@deprecated(message="Replaced by MPStaticSet. Will be removed in pmg 4.0.")
def from_previous_vasp_run(previous_vasp_dir, output_dir='.',
user_incar_settings=None,
make_dir_if_not_present=True,
kpoints_density=90, sym_prec=0.1):
"""
Generate a set of Vasp input files for static calculations from a
directory of previous Vasp run.
Args:
previous_vasp_dir (str): Directory containing the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
output_dir (str): Directory to write the VASP input files for
the static calculations. Defaults to current directory.
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
kpoints_density (int): kpoints density for the reciprocal cell
of structure. Might need to increase the default value when
calculating metallic materials.
sym_prec (float): Tolerance for symmetry finding
"""
# Read input and output from previous run
try:
vasp_run = Vasprun(os.path.join(previous_vasp_dir, "vasprun.xml"),
parse_dos=False, parse_eigen=None)
outcar = Outcar(os.path.join(previous_vasp_dir, "OUTCAR"))
previous_incar = vasp_run.incar
previous_kpoints = vasp_run.kpoints
except:
traceback.print_exc()
raise RuntimeError("Can't get valid results from previous run. prev dir: {}".format(previous_vasp_dir))
mpsvip = MPStaticVaspInputSet(kpoints_density=kpoints_density,
sym_prec=sym_prec)
structure = mpsvip.get_structure(vasp_run, outcar)
mpsvip.write_input(structure, output_dir, make_dir_if_not_present)
new_incar = mpsvip.get_incar(structure)
# Use previous run INCAR and override necessary parameters
previous_incar.update({"IBRION": -1, "ISMEAR": -5, "LAECHG": True,
"LCHARG": True, "LORBIT": 11, "LVHAR": True,
"LWAVE": False, "NSW": 0, "ICHARG": 0,
"ALGO": "Normal"})
for incar_key in ["MAGMOM", "NUPDOWN"]:
if new_incar.get(incar_key, None):
previous_incar.update({incar_key: new_incar[incar_key]})
else:
previous_incar.pop(incar_key, None)
# use new LDAUU when possible b/c the Poscar might have changed
# representation
if previous_incar.get('LDAU'):
u = previous_incar.get('LDAUU', [])
j = previous_incar.get('LDAUJ', [])
if sum([u[x] - j[x] for x, y in enumerate(u)]) > 0:
for tag in ('LDAUU', 'LDAUL', 'LDAUJ'):
previous_incar.update({tag: new_incar[tag]})
# ensure to have LMAXMIX for GGA+U static run
if "LMAXMIX" not in previous_incar:
previous_incar.update({"LMAXMIX": new_incar["LMAXMIX"]})
# Compare ediff between previous and staticinputset values,
# choose the tighter ediff
previous_incar.update({"EDIFF": min(previous_incar.get("EDIFF", 1),
new_incar["EDIFF"])})
# add user settings
if user_incar_settings:
previous_incar.update(user_incar_settings)
previous_incar.write_file(os.path.join(output_dir, "INCAR"))
# Perform checking on INCAR parameters
if any([previous_incar.get("NSW", 0) != 0,
previous_incar["IBRION"] != -1,
previous_incar["LCHARG"] is not True,
any([sum(previous_incar["LDAUU"]) <= 0,
previous_incar["LMAXMIX"] < 4])
if previous_incar.get("LDAU") else False]):
raise ValueError("Incompatible INCAR parameters!")
# Prefer to use k-point scheme from previous run
new_kpoints = mpsvip.get_kpoints(structure)
if previous_kpoints.style != new_kpoints.style:
if previous_kpoints.style == Kpoints.supported_modes.Monkhorst and \
SpacegroupAnalyzer(structure, 0.1).get_lattice_type() != \
"hexagonal":
k_div = (kp + 1 if kp % 2 == 1 else kp
for kp in new_kpoints.kpts[0])
Kpoints.monkhorst_automatic(k_div). \
write_file(os.path.join(output_dir, "KPOINTS"))
else:
Kpoints.gamma_automatic(new_kpoints.kpts[0]). \
write_file(os.path.join(output_dir, "KPOINTS"))
else:
new_kpoints.write_file(os.path.join(output_dir, "KPOINTS"))
class MPStaticDielectricDFPTVaspInputSet(DictVaspInputSet):
"""
Using MP parameters to compute a static dielectric constant
with DFPT. This includes the electronic and ionic contributions
to the static dielectric constant.
Args:
user_incar_settings (dict): A dict specifying additional incar
settings
ionic: a boolean telling if we clamp the ions (False) or we
add the ionic part to the dielectric constant (True default)
"""
def __init__(self, user_incar_settings=None, ionic=True):
super(MPStaticDielectricDFPTVaspInputSet, self).__init__(
"Materials Project Static Dielectric DFPT",
loadfn(os.path.join(MODULE_DIR, "MPVaspInputSet.yaml")))
self.user_incar_settings = user_incar_settings if \
user_incar_settings is not None else {}
self.incar_settings.update(self.user_incar_settings)
if ionic:
self.incar_settings.update(
{"IBRION": 8, "LEPSILON": True, 'LREAL':False})
else:
self.incar_settings.update(
{"LEPSILON": True, 'LREAL': False})
if 'NPAR' in self.incar_settings:
del self.incar_settings['NPAR']
if 'NSW' in self.incar_settings:
del self.incar_settings['NSW']
class MPNonSCFVaspInputSet(MPStaticVaspInputSet):
"""
Implementation of VaspInputSet overriding MaterialsProjectVaspInputSet
for non self-consistent field (NonSCF) calculation that follows
a static run to calculate bandstructure, density of states(DOS) and etc.
It is recommended to use the NonSCF from_previous_run method to construct
the input set to inherit most of the functions.
Args:
user_incar_settings (dict): A dict specify customized settings
for INCAR. Must contain a NBANDS value, suggest to use
1.2*(NBANDS from static run).
mode: Line: Generate k-points along symmetry lines for
bandstructure. Uniform: Generate uniform k-points
grids for DOS.
constrain_total_magmom (bool): Whether to constrain the total
magmom (NUPDOWN in INCAR) to be the sum of the expected
MAGMOM for all species. Defaults to False.
kpoints_density (int): kpoints density for the reciprocal cell
of structure. Might need to increase the default value when
calculating metallic materials.
kpoints_line_density (int): kpoints density to use in line-mode.
Might need to increase the default value when calculating
metallic materials.
sort_structure (bool): Whether to sort structure. Defaults to
False.
sym_prec (float): Tolerance for symmetry finding
"""
@deprecated(message="Replaced by MPNonSCFSet. Will be removed in pmg 4.0.")
def __init__(self, user_incar_settings, mode="Line",
constrain_total_magmom=False, sort_structure=False,
kpoints_density=1000, sym_prec=0.1, kpoints_line_density=20):
self.mode = mode
self.sym_prec = sym_prec
self.kpoints_line_density = kpoints_line_density
if mode not in ["Line", "Uniform"]:
raise ValueError("Supported modes for NonSCF runs are 'Line' and "
"'Uniform'!")
DictVaspInputSet.__init__(self,
"Materials Project Static",
loadfn(os.path.join(MODULE_DIR, "MPVaspInputSet.yaml")),
constrain_total_magmom=constrain_total_magmom,
sort_structure=sort_structure)
self.user_incar_settings = user_incar_settings
self.incar_settings.update(
{"IBRION": -1, "ISMEAR": 0, "SIGMA": 0.001, "LCHARG": False,
"LORBIT": 11, "LWAVE": False, "NSW": 0, "ISYM": 0, "ICHARG": 11})
self.kpoints_settings.update({"kpoints_density": kpoints_density})
if mode == "Uniform":
# Set smaller steps for DOS output
self.incar_settings.update({"NEDOS": 601})
if "NBANDS" not in user_incar_settings:
raise KeyError("For NonSCF runs, NBANDS value from SC runs is "
"required!")
else:
self.incar_settings.update(user_incar_settings)
def get_kpoints(self, structure):
"""
Get a KPOINTS file for NonSCF calculation. In "Line" mode, kpoints are
generated along high symmetry lines. In "Uniform" mode, kpoints are
Gamma-centered mesh grid. Kpoints are written explicitly in both cases.
Args:
structure (Structure/IStructure): structure to get Kpoints
"""
if self.mode == "Line":
kpath = HighSymmKpath(structure)
frac_k_points, k_points_labels = kpath.get_kpoints(
line_density=self.kpoints_line_density,
coords_are_cartesian=False)
return Kpoints(comment="Non SCF run along symmetry lines",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(frac_k_points),
kpts=frac_k_points, labels=k_points_labels,
kpts_weights=[1] * len(frac_k_points))
else:
num_kpoints = self.kpoints_settings["kpoints_density"] * \
structure.lattice.reciprocal_lattice.volume
kpoints = Kpoints.automatic_density(
structure, num_kpoints * structure.num_sites)
mesh = kpoints.kpts[0]
ir_kpts = SpacegroupAnalyzer(structure, symprec=self.sym_prec) \
.get_ir_reciprocal_mesh(mesh)
kpts = []
weights = []
for k in ir_kpts:
kpts.append(k[0])
weights.append(int(k[1]))
return Kpoints(comment="Non SCF run on uniform grid",
style=Kpoints.supported_modes.Reciprocal,
num_kpts=len(ir_kpts),
kpts=kpts, kpts_weights=weights)
@staticmethod
def get_incar_settings(vasp_run, outcar=None):
"""
Helper method to get necessary user_incar_settings from previous run.
Args:
vasp_run (Vasprun): Vasprun that contains the final
structure from previous run.
outcar (Outcar): Outcar that contains the magnetization info
from previous run.
"""
# Turn off spin when magmom for every site is smaller than 0.02.
if outcar and outcar.magnetization:
site_magmom = np.array([i['tot'] for i in outcar.magnetization])
ispin = 2 if np.any(site_magmom[np.abs(site_magmom) > 0.02]) else 1
elif vasp_run.is_spin:
ispin = 2
else:
ispin = 1
nbands = int(np.ceil(vasp_run.as_dict()["input"]["parameters"]["NBANDS"]
* 1.2))
incar_settings = {"ISPIN": ispin, "NBANDS": nbands}
for grid in ["NGX", "NGY", "NGZ"]:
if vasp_run.incar.get(grid):
incar_settings.update({grid: vasp_run.incar.get(grid)})
return incar_settings
def get_incar(self, structure):
incar = super(MPNonSCFVaspInputSet, self).get_incar(structure)
incar.pop("MAGMOM", None)
return incar
def get_poscar(self, structure, get_primitive_standard=False):
"""
Get a POSCAR file of the giving structure.
Args:
structure (Structure/IStructure): structure to get POSCAR
get_primitive_standard (bool): if convert the input structure to a
primitive standard structure
"""
if get_primitive_standard:
sym_finder = SpacegroupAnalyzer(structure, symprec=self.sym_prec)
return Poscar(sym_finder.get_primitive_standard_structure(False))
else:
return Poscar(structure)
@staticmethod
def from_previous_vasp_run(previous_vasp_dir, output_dir='.',
mode="Uniform", user_incar_settings=None,
copy_chgcar=True, make_dir_if_not_present=True,
kpoints_density=1000, kpoints_line_density=20):
"""
Generate a set of Vasp input files for NonSCF calculations from a
directory of previous static Vasp run.
Args:
previous_vasp_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
output_dir (str): The directory to write the VASP input files
for the NonSCF calculations. Default to write in the current
directory.
mode (str): Line: Generate k-points along symmetry lines for
bandstructure. Uniform: Generate uniform k-points
grids for DOS.
user_incar_settings (dict): A dict specify customized settings
for INCAR. Must contain a NBANDS value, suggest to use
1.2*(NBANDS from static run).
copy_chgcar (bool): Default to copy CHGCAR from SC run
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
kpoints_density (int): kpoints density for the reciprocal cell
of structure. Might need to increase the default value when
calculating metallic materials.
kpoints_line_density (int): kpoints density to use in line-mode.
Might need to increase the default value when calculating
metallic materials.
"""
user_incar_settings = user_incar_settings or {}
try:
vasp_run = Vasprun(os.path.join(previous_vasp_dir, "vasprun.xml"),
parse_dos=False, parse_eigen=None)
outcar = Outcar(os.path.join(previous_vasp_dir, "OUTCAR"))
previous_incar = vasp_run.incar
except:
traceback.print_exc()
raise RuntimeError("Can't get valid results from previous run: {}"
.format(previous_vasp_dir))
#Get a Magmom-decorated structure
structure = MPNonSCFVaspInputSet.get_structure(vasp_run, outcar,
initial_structure=True)
nscf_incar_settings = MPNonSCFVaspInputSet.get_incar_settings(vasp_run,
outcar)
mpnscfvip = MPNonSCFVaspInputSet(nscf_incar_settings, mode,
kpoints_density=kpoints_density,
kpoints_line_density=kpoints_line_density)
mpnscfvip.write_input(structure, output_dir, make_dir_if_not_present)
if copy_chgcar:
try:
shutil.copyfile(os.path.join(previous_vasp_dir, "CHGCAR"),
os.path.join(output_dir, "CHGCAR"))
except Exception as e:
traceback.print_exc()
raise RuntimeError("Can't copy CHGCAR from SC run" + '\n'
+ str(e))
#Overwrite necessary INCAR parameters from previous runs
previous_incar.update({"IBRION": -1, "ISMEAR": 0, "SIGMA": 0.001,
"LCHARG": False, "LORBIT": 11, "LWAVE": False,
"NSW": 0, "ISYM": 0, "ICHARG": 11})
previous_incar.update(nscf_incar_settings)
previous_incar.update(user_incar_settings)
previous_incar.pop("MAGMOM", None)
previous_incar.write_file(os.path.join(output_dir, "INCAR"))
# Perform checking on INCAR parameters
if any([previous_incar.get("NSW", 0) != 0,
previous_incar["IBRION"] != -1,
previous_incar["ICHARG"] != 11,
any([sum(previous_incar["LDAUU"]) <= 0,
previous_incar["LMAXMIX"] < 4])
if previous_incar.get("LDAU") else False]):
raise ValueError("Incompatible INCAR parameters!")
class MPOpticsNonSCFVaspInputSet(MPNonSCFVaspInputSet):
"""
Implementation of VaspInputSet overriding MaterialsProjectVaspInputSet
for non self-consistent field (NonSCF) calculation with the computation
of the dielectric function that follows a static run
It is recommended to use the NonSCF from_previous_run method to construct
the input set to inherit most of the functions.
Args:
user_incar_settings (dict): A dict specify customized settings
for INCAR. Must contain a NBANDS value, suggest to use
factor*(NBANDS from static run) with factor between 5 and 10.
mode: Line: Generate k-points along symmetry lines for
bandstructure. Uniform: Generate uniform k-points
grids for DOS.
constrain_total_magmom (bool): Whether to constrain the total
magmom (NUPDOWN in INCAR) to be the sum of the expected
MAGMOM for all species. Defaults to False.
kpoints_density (int): kpoints density for the reciprocal cell
of structure. Might need to increase the default value when
calculating metallic materials.
sort_structure (bool): Whether to sort structure. Defaults to
False.
sym_prec (float): Tolerance for symmetry finding
"""
@deprecated(message="Replaced by MPNonSCFSet with optics=True. Will "
"be removed in pmg 4.0.")
def __init__(self, user_incar_settings,
constrain_total_magmom=False, sort_structure=False,
kpoints_density=1000, sym_prec=0.1, nedos=2001):
self.sym_prec = sym_prec
self.nedos = nedos
DictVaspInputSet.__init__(
user_incar_settings, mode="Uniform",
constrain_total_magmom=constrain_total_magmom,
sort_structure=sort_structure,
kpoints_density=kpoints_density, sym_prec=sym_prec)
self.incar_settings.update({"NEDOS": nedos})
self.incar_settings.update({"LOPTICS": True})
@staticmethod
def from_previous_vasp_run(previous_vasp_dir, output_dir='.',
user_incar_settings=None,
copy_chgcar=True, make_dir_if_not_present=True,
nbands_factor=5.0, nedos=2001):
"""
Generate a set of Vasp input files for NonSCF calculations from a
directory of previous static Vasp run.
Args:
previous_vasp_dir (str): The directory contains the outputs(
vasprun.xml and OUTCAR) of previous vasp run.
output_dir (str): The directory to write the VASP input files
for the NonSCF calculations. Default to write in the current
directory.
user_incar_settings (dict): A dict specify customized settings
for INCAR. Must contain a NBANDS value, suggest to use
1.2*(NBANDS from static run).
copy_chgcar (bool): Default to copy CHGCAR from SC run
make_dir_if_not_present (bool): Set to True if you want the
directory (and the whole path) to be created if it is not
present.
nbands_factor (float): Factor by which the number of bands is to be
multiplied. Typical calculations of dielectric functions need a
total number of bands of 5 to 10 times the number of valence
bands.
"""
user_incar_settings = user_incar_settings or {}
try:
vasp_run = Vasprun(os.path.join(previous_vasp_dir, "vasprun.xml"),
parse_dos=False, parse_eigen=False)
outcar = Outcar(os.path.join(previous_vasp_dir, "OUTCAR"))
previous_incar = vasp_run.incar
except:
traceback.print_exc()
raise RuntimeError("Can't get valid results from previous run. prev dir: {}".format(previous_vasp_dir))
#Get a Magmom-decorated structure
structure = MPNonSCFVaspInputSet.get_structure(vasp_run, outcar,
initial_structure=True)
nscf_incar_settings = MPNonSCFVaspInputSet.get_incar_settings(vasp_run,
outcar)
spin_band_settings = MPOpticsNonSCFVaspInputSet.get_ispin_nbands(
vasp_run, outcar, nbands_factor=nbands_factor)
mpnscfvip = MPNonSCFVaspInputSet(nscf_incar_settings, "Uniform")
mpnscfvip.incar_settings.update(spin_band_settings)
mpnscfvip.write_input(structure, output_dir, make_dir_if_not_present)
if copy_chgcar:
try:
shutil.copyfile(os.path.join(previous_vasp_dir, "CHGCAR"),
os.path.join(output_dir, "CHGCAR"))
except Exception as e:
traceback.print_exc()
raise RuntimeError("Can't copy CHGCAR from SC run" + '\n'
+ str(e))
#Overwrite necessary INCAR parameters from previous runs
previous_incar.update({"IBRION": -1, "ISMEAR": 0, "SIGMA": 0.001,
"LCHARG": False, "LORBIT": 11, "LWAVE": False,
"NSW": 0, "ISYM": 0, "ICHARG": 11,
"LOPTICS": True, "NEDOS": nedos})
previous_incar.update(nscf_incar_settings)
previous_incar.update(spin_band_settings)
previous_incar.update(user_incar_settings)
previous_incar.pop("MAGMOM", None)
previous_incar.write_file(os.path.join(output_dir, "INCAR"))
# Perform checking on INCAR parameters
if any([previous_incar.get("NSW", 0) != 0,
previous_incar["IBRION"] != -1,
previous_incar["ICHARG"] != 11,
any([sum(previous_incar["LDAUU"]) <= 0,
previous_incar["LMAXMIX"] < 4])
if previous_incar.get("LDAU") else False]):
raise ValueError("Incompatible INCAR parameters!")
@staticmethod
def get_ispin_nbands(vasp_run, outcar=None, nbands_factor=5.0):
"""
Helper method to get necessary user_incar_settings from previous run.
Args:
vasp_run (Vasprun): Vasprun that contains the final
structure from previous run.
outcar (Outcar): Outcar that contains the magnetization info
from previous run.
"""
# Turn off spin when magmom for every site is smaller than 0.02.
if outcar and outcar.magnetization:
site_magmom = np.array([i['tot'] for i in outcar.magnetization])
ispin = 2 if np.any(site_magmom[np.abs(site_magmom) > 0.02]) else 1
elif vasp_run.is_spin:
ispin = 2
else:
ispin = 1
nbands = int(np.ceil(vasp_run.as_dict()["input"]["parameters"]["NBANDS"]
* nbands_factor))
incar_settings = {"ISPIN": ispin, "NBANDS": nbands}
for grid in ["NGX", "NGY", "NGZ"]:
if vasp_run.incar.get(grid):
incar_settings.update({grid: vasp_run.incar.get(grid)})
return incar_settings
class MVLElasticInputSet(DictVaspInputSet):
"""
MVL denotes VASP input sets that are implemented by the Materials Virtual
Lab (http://www.materialsvirtuallab.org) for various research.
This input set is used to calculate elastic constants in VASP. It is used
in the following work::
Z. Deng, Z. Wang, I.-H. Chu, J. Luo, S. P. Ong.
“Elastic Properties of Alkali Superionic Conductor Electrolytes
from First Principles Calculations”, J. Electrochem. Soc.
2016, 163(2), A67-A74. doi: 10.1149/2.0061602jes
To read the elastic constants, you may use the Outcar class which parses the
elastic constants.
Args:
scale (float): POTIM parameter. The default of 0.015 is usually fine,
but some structures may require a smaller step.
user_incar_settings (dict): A dict specifying additional incar
settings.
"""
@deprecated(message="Replaced by MPElasticSet. Will be removed in pmg 4.0.")
def __init__(self, potim=0.015, user_incar_settings=None):
super(MVLElasticInputSet, self).__init__(
"Materials Virtual Lab Elastic Constant Calculation",
loadfn(os.path.join(MODULE_DIR, "MPVaspInputSet.yaml")))
self.user_incar_settings = user_incar_settings or {}
self.incar_settings.update(self.user_incar_settings)
self.incar_settings.update({"IBRION": 6, "NFREE": 2, "POTIM": potim})
if "NPAR" in self.incar_settings:
del self.incar_settings["NPAR"]
def batch_write_vasp_input(structures, vasp_input_set, output_dir,
make_dir_if_not_present=True, subfolder=None,
sanitize=False, include_cif=False):
"""
Batch write vasp input for a sequence of structures to
output_dir, following the format output_dir/{group}/{formula}_{number}.
Args:
structures ([Structure]): Sequence of Structures.
vasp_input_set (VaspInputSet): VaspInputSet that creates
vasp input files from structures
output_dir (str): Directory to output files
make_dir_if_not_present (bool): Create the directory if not present.
Defaults to True.
subfolder (callable): Function to create subdirectory name from
structure. Defaults to simply "formula_count".
sanitize (bool): Boolean indicating whether to sanitize the
structure before writing the VASP input files. Sanitized output
are generally easier for viewing and certain forms of analysis.
Defaults to False.
include_cif (bool): Whether to output a CIF as well. CIF files are
generally better supported in visualization programs.
"""
for i, s in enumerate(structures):
formula = re.sub("\s+", "", s.formula)
if subfolder is not None:
subdir = subfolder(s)
dirname = os.path.join(output_dir, subdir)
else:
dirname = os.path.join(output_dir, '{}_{}'.format(formula, i))
if sanitize:
s = s.copy(sanitize=True)
vasp_input_set.write_input(
s, dirname, make_dir_if_not_present=make_dir_if_not_present,
include_cif=include_cif
)
|
Bismarrck/pymatgen
|
pymatgen/io/vasp/sets_deprecated.py
|
Python
|
mit
| 63,194
|
[
"VASP",
"pymatgen"
] |
39dfac4b6b3d256e65c65a2fcca7cdaf69dc71485b2ab95c4fc67df7c57439f9
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base classes for probability distributions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import contextlib
import inspect
import types
import warnings
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
_DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [
"batch_shape", "get_batch_shape", "event_shape", "get_event_shape",
"sample_n", "log_prob", "prob", "log_cdf", "cdf", "log_survival_function",
"survival_function", "entropy", "mean", "variance", "std", "mode"]
@six.add_metaclass(abc.ABCMeta)
class _BaseDistribution(object):
"""Abstract base class needed for resolving subclass hierarchy."""
pass
def _copy_fn(fn):
"""Create a deep copy of fn.
Args:
fn: a callable
Returns:
A `FunctionType`: a deep copy of fn.
Raises:
TypeError: if `fn` is not a callable.
"""
if not callable(fn):
raise TypeError("fn is not callable: %s" % fn)
# The blessed way to copy a function. copy.deepcopy fails to create
# a non-reference copy. Since:
# types.FunctionType == type(lambda: None),
# and the docstring for the function type states:
#
# function(code, globals[, name[, argdefs[, closure]]])
#
# Create a function object from a code object and a dictionary.
# ...
#
# Here we can use this to create a new function with the old function's
# code, globals, closure, etc.
return types.FunctionType(
code=fn.__code__, globals=fn.__globals__,
name=fn.__name__, argdefs=fn.__defaults__,
closure=fn.__closure__)
def _update_docstring(old_str, append_str):
"""Update old_str by inserting append_str just before the "Args:" section."""
old_str_lines = old_str.split("\n")
# Step 0: Prepend spaces to all lines of append_str. This is
# necessary for correct markdown generation.
append_str = "\n".join(" %s" % line for line in append_str.split("\n"))
# Step 1: Find mention of "Args":
has_args_ix = [
ix for ix, line in enumerate(old_str_lines)
if line.strip().lower() == "args:"]
if has_args_ix:
final_args_ix = has_args_ix[-1]
return ("\n".join(old_str_lines[:final_args_ix])
+ "\n\n" + append_str + "\n\n"
+ "\n".join(old_str_lines[final_args_ix:]))
else:
return old_str + "\n\n" + append_str
class _DistributionMeta(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
base = baseclasses[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = inspect.getdoc(base_attr_value)
if class_attr_docstring is None:
raise ValueError(
"Expected base class fn to contain a docstring: %s.%s"
% (base.__name__, attr))
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
("Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)))
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@six.add_metaclass(_DistributionMeta)
class Distribution(_BaseDistribution):
"""A generic probability distribution base class.
`Distribution` is a base class for constructing and organizing properties
(e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian).
### Subclassing
Subclasses are expected to implement a leading-underscore version of the
same-named function. The argument signature should be identical except for
the omission of `name="..."`. For example, to enable `log_prob(value,
name="log_prob")` a subclass should implement `_log_prob(value)`.
Subclasses can append to public-level docstrings by providing
docstrings for their method specializations. For example:
```python
@distribution_util.AppendDocstring("Some other details.")
def _log_prob(self, value):
...
```
would add the string "Some other details." to the `log_prob` function
docstring. This is implemented as a simple decorator to avoid python
linter complaining about missing Args/Returns/Raises sections in the
partial docstrings.
### Broadcasting, batching, and shapes
All distributions support batches of independent distributions of that type.
The batch shape is determined by broadcasting together the parameters.
The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and
`log_prob` reflect this broadcasting, as does the return value of `sample` and
`sample_n`.
`sample_n_shape = (n,) + batch_shape + event_shape`, where `sample_n_shape` is
the shape of the `Tensor` returned from `sample_n`, `n` is the number of
samples, `batch_shape` defines how many independent distributions there are,
and `event_shape` defines the shape of samples from each of those independent
distributions. Samples are independent along the `batch_shape` dimensions, but
not necessarily so along the `event_shape` dimensions (depending on the
particulars of the underlying distribution).
Using the `Uniform` distribution as an example:
```python
minval = 3.0
maxval = [[4.0, 6.0],
[10.0, 12.0]]
# Broadcasting:
# This instance represents 4 Uniform distributions. Each has a lower bound at
# 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape.
u = Uniform(minval, maxval)
# `event_shape` is `TensorShape([])`.
event_shape = u.get_event_shape()
# `event_shape_t` is a `Tensor` which will evaluate to [].
event_shape_t = u.event_shape
# Sampling returns a sample per distribution. `samples` has shape
# (5, 2, 2), which is (n,) + batch_shape + event_shape, where n=5,
# batch_shape=(2, 2), and event_shape=().
samples = u.sample_n(5)
# The broadcasting holds across methods. Here we use `cdf` as an example. The
# same holds for `log_cdf` and the likelihood functions.
# `cum_prob` has shape (2, 2) as the `value` argument was broadcasted to the
# shape of the `Uniform` instance.
cum_prob_broadcast = u.cdf(4.0)
# `cum_prob`'s shape is (2, 2), one per distribution. No broadcasting
# occurred.
cum_prob_per_dist = u.cdf([[4.0, 5.0],
[6.0, 7.0]])
# INVALID as the `value` argument is not broadcastable to the distribution's
# shape.
cum_prob_invalid = u.cdf([4.0, 5.0, 6.0])
```
### Parameter values leading to undefined statistics or distributions.
Some distributions do not have well-defined statistics for all initialization
parameter values. For example, the beta distribution is parameterized by
positive real numbers `a` and `b`, and does not have well-defined mode if
`a < 1` or `b < 1`.
The user is given the option of raising an exception or returning `NaN`.
```python
a = tf.exp(tf.matmul(logits, weights_a))
b = tf.exp(tf.matmul(logits, weights_b))
# Will raise exception if ANY batch member has a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=False)
mode = dist.mode().eval()
# Will return NaN for batch members with either a < 1 or b < 1.
dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior
mode = dist.mode().eval()
```
In all cases, an exception is raised if *invalid* parameters are passed, e.g.
```python
# Will raise an exception if any Op is run.
negative_a = -1.0 * a # beta distribution by definition has a > 0.
dist = distributions.beta(negative_a, b, allow_nan_stats=True)
dist.mean().eval()
```
"""
def __init__(self,
dtype,
is_continuous,
is_reparameterized,
validate_args,
allow_nan_stats,
parameters=None,
graph_parents=None,
name=None):
"""Constructs the `Distribution`.
**This is a private method for subclass use.**
Args:
dtype: The type of the event samples. `None` implies no type-enforcement.
is_continuous: Python boolean. If `True` this
`Distribution` is continuous over its supported domain.
is_reparameterized: Python boolean. If `True` this
`Distribution` can be reparameterized in terms of some standard
distribution with a function whose Jacobian is constant for the support
of the standard distribution.
validate_args: Python boolean. Whether to validate input with asserts.
If `validate_args` is `False`, and the inputs are invalid,
correct behavior is not guaranteed.
allow_nan_stats: Python boolean. If `False`, raise an
exception if a statistic (e.g., mean, mode) is undefined for any batch
member. If True, batch members with valid parameters leading to
undefined statistics will return `NaN` for this statistic.
parameters: Python dictionary of parameters used to instantiate this
`Distribution`.
graph_parents: Python list of graph prerequisites of this `Distribution`.
name: A name for this distribution. Default: subclass name.
Raises:
ValueError: if any member of graph_parents is `None` or not a `Tensor`.
"""
graph_parents = [] if graph_parents is None else graph_parents
for i, t in enumerate(graph_parents):
if t is None or not contrib_framework.is_tensor(t):
raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t))
parameters = parameters or {}
self._dtype = dtype
self._is_continuous = is_continuous
self._is_reparameterized = is_reparameterized
self._allow_nan_stats = allow_nan_stats
self._validate_args = validate_args
self._parameters = parameters
self._graph_parents = graph_parents
self._name = name or type(self).__name__
@classmethod
def param_shapes(cls, sample_shape, name="DistributionParamShapes"):
"""Shapes of parameters given the desired shape of a call to `sample()`.
Subclasses should override static method `_param_shapes`.
Args:
sample_shape: `Tensor` or python list/tuple. Desired shape of a call to
`sample()`.
name: name to prepend ops with.
Returns:
`dict` of parameter name to `Tensor` shapes.
"""
with ops.name_scope(name, values=[sample_shape]):
return cls._param_shapes(sample_shape)
@classmethod
def param_static_shapes(cls, sample_shape):
"""param_shapes with static (i.e. TensorShape) shapes.
Args:
sample_shape: `TensorShape` or python list/tuple. Desired shape of a call
to `sample()`.
Returns:
`dict` of parameter name to `TensorShape`.
Raises:
ValueError: if `sample_shape` is a `TensorShape` and is not fully defined.
"""
if isinstance(sample_shape, tensor_shape.TensorShape):
if not sample_shape.is_fully_defined():
raise ValueError("TensorShape sample_shape must be fully defined")
sample_shape = sample_shape.as_list()
params = cls.param_shapes(sample_shape)
static_params = {}
for name, shape in params.items():
static_shape = tensor_util.constant_value(shape)
if static_shape is None:
raise ValueError(
"sample_shape must be a fully-defined TensorShape or list/tuple")
static_params[name] = tensor_shape.TensorShape(static_shape)
return static_params
@staticmethod
def _param_shapes(sample_shape):
raise NotImplementedError("_param_shapes not implemented")
@property
def name(self):
"""Name prepended to all ops created by this `Distribution`."""
return self._name
@property
def dtype(self):
"""The `DType` of `Tensor`s handled by this `Distribution`."""
return self._dtype
@property
def parameters(self):
"""Dictionary of parameters used to instantiate this `Distribution`."""
return self._parameters
@property
def is_continuous(self):
return self._is_continuous
@property
def is_reparameterized(self):
return self._is_reparameterized
@property
def allow_nan_stats(self):
"""Python boolean describing behavior when a stat is undefined.
Stats return +/- infinity when it makes sense. E.g., the variance
of a Cauchy distribution is infinity. However, sometimes the
statistic is undefined, e.g., if a distribution's pdf does not achieve a
maximum within the support of the distribution, the mode is undefined.
If the mean is undefined, then by definition the variance is undefined.
E.g. the mean for Student's T for df = 1 is undefined (no clear way to say
it is either + or - infinity), so the variance = E[(X - mean)^2] is also
undefined.
Returns:
allow_nan_stats: Python boolean.
"""
return self._allow_nan_stats
@property
def validate_args(self):
"""Python boolean indicated possibly expensive checks are enabled."""
return self._validate_args
def copy(self, **override_parameters_kwargs):
"""Creates a deep copy of the distribution.
Note: the copy distribution may continue to depend on the original
intialization arguments.
Args:
**override_parameters_kwargs: String/value dictionary of initialization
arguments to override with new values.
Returns:
distribution: A new instance of `type(self)` intitialized from the union
of self.parameters and override_parameters_kwargs, i.e.,
`dict(self.parameters, **override_parameters_kwargs)`.
"""
parameters = dict(self.parameters, **override_parameters_kwargs)
# Python3 leaks "__class__" into `locals()` so we remove if present.
# TODO(b/32376812): Remove this pop.
parameters.pop("__class__", None)
return type(self)(**parameters)
def _batch_shape(self):
raise NotImplementedError("batch_shape is not implemented")
def batch_shape(self, name="batch_shape"):
"""Shape of a single sample from a single event index as a 1-D `Tensor`.
The product of the dimensions of the `batch_shape` is the number of
independent distributions of this kind the instance represents.
Args:
name: name to give to the op
Returns:
batch_shape: `Tensor`.
"""
with self._name_scope(name):
return self._batch_shape()
def _get_batch_shape(self):
return tensor_shape.TensorShape(None)
def get_batch_shape(self):
"""Shape of a single sample from a single event index as a `TensorShape`.
Same meaning as `batch_shape`. May be only partially defined.
Returns:
batch_shape: `TensorShape`, possibly unknown.
"""
return self._get_batch_shape()
def _event_shape(self):
raise NotImplementedError("event_shape is not implemented")
def event_shape(self, name="event_shape"):
"""Shape of a single sample from a single batch as a 1-D int32 `Tensor`.
Args:
name: name to give to the op
Returns:
event_shape: `Tensor`.
"""
with self._name_scope(name):
return self._event_shape()
def _get_event_shape(self):
return tensor_shape.TensorShape(None)
def get_event_shape(self):
"""Shape of a single sample from a single batch as a `TensorShape`.
Same meaning as `event_shape`. May be only partially defined.
Returns:
event_shape: `TensorShape`, possibly unknown.
"""
return self._get_event_shape()
def _sample_n(self, n, seed=None):
raise NotImplementedError("sample_n is not implemented")
def sample(self, sample_shape=(), seed=None, name="sample",
**condition_kwargs):
"""Generate samples of the specified shape.
Note that a call to `sample()` without arguments will generate a single
sample.
Args:
sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with prepended dimensions `sample_shape`.
"""
with self._name_scope(name, values=[sample_shape]):
sample_shape = ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32, name="sample_shape")
if sample_shape.get_shape().ndims == 0:
return self.sample_n(sample_shape, seed, **condition_kwargs)
sample_shape, total = self._expand_sample_shape(sample_shape)
samples = self.sample_n(total, seed, **condition_kwargs)
output_shape = array_ops.concat(0, [sample_shape, array_ops.slice(
array_ops.shape(samples), [1], [-1])])
output = array_ops.reshape(samples, output_shape)
output.set_shape(tensor_util.constant_value_as_shape(
sample_shape).concatenate(samples.get_shape()[1:]))
return output
def sample_n(self, n, seed=None, name="sample_n", **condition_kwargs):
"""Generate `n` samples.
Args:
n: `Scalar` `Tensor` of type `int32` or `int64`, the number of
observations to sample.
seed: Python integer seed for RNG
name: name to give to the op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
samples: a `Tensor` with a prepended dimension (n,).
Raises:
TypeError: if `n` is not an integer type.
"""
warnings.warn("Please use `sample` instead of `sample_n`. `sample_n` "
"will be deprecated in December 2016.",
PendingDeprecationWarning)
with self._name_scope(name, values=[n]):
n = ops.convert_to_tensor(n, name="n")
if not n.dtype.is_integer:
raise TypeError("n.dtype=%s is not an integer type" % n.dtype)
x = self._sample_n(n, seed, **condition_kwargs)
# Set shape hints.
sample_shape = tensor_shape.TensorShape(
tensor_util.constant_value(n))
batch_ndims = self.get_batch_shape().ndims
event_ndims = self.get_event_shape().ndims
if batch_ndims is not None and event_ndims is not None:
inferred_shape = sample_shape.concatenate(
self.get_batch_shape().concatenate(
self.get_event_shape()))
x.set_shape(inferred_shape)
elif x.get_shape().ndims is not None and x.get_shape().ndims > 0:
x.get_shape()[0].merge_with(sample_shape[0])
if batch_ndims is not None and batch_ndims > 0:
x.get_shape()[1:1+batch_ndims].merge_with(self.get_batch_shape())
if event_ndims is not None and event_ndims > 0:
x.get_shape()[-event_ndims:].merge_with(self.get_event_shape())
return x
def _log_prob(self, value):
raise NotImplementedError("log_prob is not implemented")
def log_prob(self, value, name="log_prob", **condition_kwargs):
"""Log probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_prob(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._prob(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def prob(self, value, name="prob", **condition_kwargs):
"""Probability density/mass function (depending on `is_continuous`).
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._prob(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_prob(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _log_cdf(self, value):
raise NotImplementedError("log_cdf is not implemented")
def log_cdf(self, value, name="log_cdf", **condition_kwargs):
"""Log cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```
log_cdf(x) := Log[ P[X <= x] ]
```
Often, a numerical approximation can be used for `log_cdf(x)` that yields
a more accurate answer than simply taking the logarithm of the `cdf` when
`x << -1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_cdf(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(self._cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _cdf(self, value):
raise NotImplementedError("cdf is not implemented")
def cdf(self, value, name="cdf", **condition_kwargs):
"""Cumulative distribution function.
Given random variable `X`, the cumulative distribution function `cdf` is:
```
cdf(x) := P[X <= x]
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._cdf(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.exp(self._log_cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _log_survival_function(self, value):
raise NotImplementedError("log_survival_function is not implemented")
def log_survival_function(self, value, name="log_survival_function",
**condition_kwargs):
"""Log survival function.
Given random variable `X`, the survival function is defined:
```
log_survival_function(x) = Log[ P[X > x] ]
= Log[ 1 - P[X <= x] ]
= Log[ 1 - cdf(x) ]
```
Typically, different numerical approximations can be used for the log
survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
`Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._log_survival_function(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return math_ops.log(1. - self.cdf(value, **condition_kwargs))
except NotImplementedError:
raise original_exception
def _survival_function(self, value):
raise NotImplementedError("survival_function is not implemented")
def survival_function(self, value, name="survival_function",
**condition_kwargs):
"""Survival function.
Given random variable `X`, the survival function is defined:
```
survival_function(x) = P[X > x]
= 1 - P[X <= x]
= 1 - cdf(x).
```
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type
`self.dtype`.
"""
with self._name_scope(name, values=[value]):
value = ops.convert_to_tensor(value, name="value")
try:
return self._survival_function(value, **condition_kwargs)
except NotImplementedError as original_exception:
try:
return 1. - self.cdf(value, **condition_kwargs)
except NotImplementedError:
raise original_exception
def _entropy(self):
raise NotImplementedError("entropy is not implemented")
def entropy(self, name="entropy"):
"""Shannon entropy in nats."""
with self._name_scope(name):
return self._entropy()
def _mean(self):
raise NotImplementedError("mean is not implemented")
def mean(self, name="mean"):
"""Mean."""
with self._name_scope(name):
return self._mean()
def _variance(self):
raise NotImplementedError("variance is not implemented")
def variance(self, name="variance"):
"""Variance."""
with self._name_scope(name):
return self._variance()
def _std(self):
raise NotImplementedError("std is not implemented")
def std(self, name="std"):
"""Standard deviation."""
with self._name_scope(name):
return self._std()
def _mode(self):
raise NotImplementedError("mode is not implemented")
def mode(self, name="mode"):
"""Mode."""
with self._name_scope(name):
return self._mode()
def log_pdf(self, value, name="log_pdf", **condition_kwargs):
"""Log probability density function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if not `is_continuous`.
"""
warnings.warn("Please use `log_prob` instead of `log_pdf`. `log_pdf` "
"will be deprecated in December 2016.",
PendingDeprecationWarning)
if not self.is_continuous:
raise TypeError("log_pdf is undefined for non-continuous distributions.")
return self.log_prob(value, name=name, **condition_kwargs)
def pdf(self, value, name="pdf", **condition_kwargs):
"""Probability density function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if not `is_continuous`.
"""
warnings.warn("Please use `prob` instead of `pdf`. `pdf` will be "
"deprecated in December 2016.",
PendingDeprecationWarning)
if not self.is_continuous:
raise TypeError("pdf is undefined for non-continuous distributions.")
return self.prob(value, name, **condition_kwargs)
def log_pmf(self, value, name="log_pmf", **condition_kwargs):
"""Log probability mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
log_pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if `is_continuous`.
"""
warnings.warn("Please use `log_prob` instead of `log_pmf`. `log_pmf` will "
"be deprecated in December 2016.",
PendingDeprecationWarning)
if self.is_continuous:
raise TypeError("log_pmf is undefined for continuous distributions.")
return self.log_prob(value, name=name, **condition_kwargs)
def pmf(self, value, name="pmf", **condition_kwargs):
"""Probability mass function.
Args:
value: `float` or `double` `Tensor`.
name: The name to give this op.
**condition_kwargs: Named arguments forwarded to subclass implementation.
Returns:
pmf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with
values of type `self.dtype`.
Raises:
TypeError: if `is_continuous`.
"""
warnings.warn("Please use `prob` instead of `pmf`. `pmf` will be "
"deprecated in December 2016.",
PendingDeprecationWarning)
if self.is_continuous:
raise TypeError("pmf is undefined for continuous distributions.")
return self.prob(value, name=name, **condition_kwargs)
@contextlib.contextmanager
def _name_scope(self, name=None, values=None):
"""Helper function to standardize op scope."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=(
(values or []) + self._graph_parents)) as scope:
yield scope
def _expand_sample_shape(self, sample_shape):
"""Helper to `sample` which ensures sample_shape is 1D."""
sample_shape_static_val = tensor_util.constant_value(sample_shape)
ndims = sample_shape.get_shape().ndims
if sample_shape_static_val is None:
if ndims is None or not sample_shape.get_shape().is_fully_defined():
ndims = array_ops.rank(sample_shape)
expanded_shape = distribution_util.pick_vector(
math_ops.equal(ndims, 0),
np.array((1,), dtype=dtypes.int32.as_numpy_dtype()),
array_ops.shape(sample_shape))
sample_shape = array_ops.reshape(sample_shape, expanded_shape)
total = math_ops.reduce_prod(sample_shape) # reduce_prod([]) == 1
else:
if ndims is None:
raise ValueError(
"Shouldn't be here; ndims cannot be none when we have a "
"tf.constant shape.")
if ndims == 0:
sample_shape_static_val = np.reshape(sample_shape_static_val, [1])
sample_shape = ops.convert_to_tensor(
sample_shape_static_val,
dtype=dtypes.int32,
name="sample_shape")
total = np.prod(sample_shape_static_val,
dtype=dtypes.int32.as_numpy_dtype())
return sample_shape, total
|
tongwang01/tensorflow
|
tensorflow/contrib/distributions/python/ops/distribution.py
|
Python
|
apache-2.0
| 33,841
|
[
"Gaussian"
] |
218a7a23696e46a01b6e44c2dc7cf4a9681c5df7510ec41b1a0a48b88756e532
|
from __future__ import print_function
import sys
import mdtraj as md
from simtk.openmm import app
import simtk.openmm as mm
from simtk import unit
if len(sys.argv) != 4:
print('usage %s <cuda device index> <trajectory index (for output file)> <model index of starting conformation>')
exit(1)
pdb = md.load('100-fs-peptide-400K.pdb')
forcefield = app.ForceField('amber99sbildn.xml', 'amber99_obc.xml')
system = forcefield.createSystem(pdb.topology.to_openmm(), nonbondedMethod=app.CutoffNonPeriodic,
nonbondedCutoff=1.0*unit.nanometers, constraints=app.HBonds)
integrator = mm.LangevinIntegrator(300*unit.kelvin, 91.0/unit.picoseconds,
2.0*unit.femtoseconds)
integrator.setConstraintTolerance(0.00001)
platform = mm.Platform.getPlatformByName('CUDA')
properties = {'CudaPrecision': 'mixed', 'CudaDeviceIndex': sys.argv[1]}
simulation = app.Simulation(pdb.topology.to_openmm(), system, integrator, platform, properties)
simulation.context.setPositions(pdb.xyz[int(sys.argv[3])])
simulation.context.setVelocitiesToTemperature(300*unit.kelvin)
nsteps = int((500*unit.nanoseconds) / (2*unit.femtoseconds))
interval = int((10*unit.picoseconds) / (2*unit.femtoseconds))
simulation.reporters.append(app.StateDataReporter(open('trajectory-%s.log' % sys.argv[2], 'w', 0),
interval, step=True, time=True, progress=True,
potentialEnergy=True, temperature=True, remainingTime=True,
speed=True, totalSteps=nsteps, separator='\t'))
# equilibrate
simulation.step(int(100*unit.picoseconds / (2*unit.femtoseconds)))
# now add the trajectory reporter.
simulation.reporters.append(app.DCDReporter('trajectory-%s.dcd' % sys.argv[2], interval))
simulation.step(nsteps)
|
msmbuilder/msmb_data
|
msmb_data/fs_peptide/simulate_fs.py
|
Python
|
lgpl-2.1
| 1,685
|
[
"MDTraj",
"OpenMM"
] |
20ac657e49ed743ad56597c72ff6e8f7f5c6a44a52293ec445031589190a2287
|
from __future__ import print_function, division
import random
from sympy import Derivative
from sympy.core.basic import Basic
from sympy.core.compatibility import is_sequence, as_int, range
from sympy.core.function import count_ops
from sympy.core.decorators import call_highest_priority
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.functions.elementary.trigonometric import cos, sin
from sympy.functions.elementary.miscellaneous import sqrt
from sympy.simplify import simplify as _simplify
from sympy.utilities.misc import filldedent
from sympy.utilities.decorator import doctest_depends_on
from sympy.matrices.matrices import (MatrixBase,
ShapeError, a2idx, classof)
def _iszero(x):
"""Returns True if x is zero."""
return x.is_zero
class DenseMatrix(MatrixBase):
is_MatrixExpr = False
_op_priority = 10.01
_class_priority = 4
def __getitem__(self, key):
"""Return portion of self defined by key. If the key involves a slice
then a list will be returned (if key is a single slice) or a matrix
(if key was a tuple involving a slice).
Examples
========
>>> from sympy import Matrix, I
>>> m = Matrix([
... [1, 2 + I],
... [3, 4 ]])
If the key is a tuple that doesn't involve a slice then that element
is returned:
>>> m[1, 0]
3
When a tuple key involves a slice, a matrix is returned. Here, the
first column is selected (all rows, column 0):
>>> m[:, 0]
Matrix([
[1],
[3]])
If the slice is not a tuple then it selects from the underlying
list of elements that are arranged in row order and a list is
returned if a slice is involved:
>>> m[0]
1
>>> m[::2]
[1, 3]
"""
if isinstance(key, tuple):
i, j = key
try:
i, j = self.key2ij(key)
return self._mat[i*self.cols + j]
except (TypeError, IndexError):
if isinstance(i, slice):
# XXX remove list() when PY2 support is dropped
i = list(range(self.rows))[i]
elif is_sequence(i):
pass
else:
i = [i]
if isinstance(j, slice):
# XXX remove list() when PY2 support is dropped
j = list(range(self.cols))[j]
elif is_sequence(j):
pass
else:
j = [j]
return self.extract(i, j)
else:
# row-wise decomposition of matrix
if isinstance(key, slice):
return self._mat[key]
return self._mat[a2idx(key)]
def __setitem__(self, key, value):
raise NotImplementedError()
@property
def is_Identity(self):
if not self.is_square:
return False
if not all(self[i, i] == 1 for i in range(self.rows)):
return False
for i in range(self.rows):
for j in range(i + 1, self.cols):
if self[i, j] or self[j, i]:
return False
return True
def tolist(self):
"""Return the Matrix as a nested Python list.
Examples
========
>>> from sympy import Matrix, ones
>>> m = Matrix(3, 3, range(9))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
>>> ones(3, 0).tolist()
[[], [], []]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> ones(0, 3).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
return [self._mat[i: i + self.cols]
for i in range(0, len(self), self.cols)]
def row(self, i):
"""Elementary row selector.
Examples
========
>>> from sympy import eye
>>> eye(2).row(0)
Matrix([[1, 0]])
See Also
========
col
row_op
row_swap
row_del
row_join
row_insert
"""
return self[i, :]
def col(self, j):
"""Elementary column selector.
Examples
========
>>> from sympy import eye
>>> eye(2).col(0)
Matrix([
[1],
[0]])
See Also
========
row
col_op
col_swap
col_del
col_join
col_insert
"""
return self[:, j]
def _eval_trace(self):
"""Calculate the trace of a square matrix.
Examples
========
>>> from sympy.matrices import eye
>>> eye(3).trace()
3
"""
trace = 0
for i in range(self.cols):
trace += self._mat[i*self.cols + i]
return trace
def _eval_determinant(self):
return self.det()
def _eval_transpose(self):
"""Matrix transposition.
Examples
========
>>> from sympy import Matrix, I
>>> m=Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m.transpose()
Matrix([
[ 1, 3],
[2 + I, 4]])
>>> m.T == m.transpose()
True
See Also
========
conjugate: By-element conjugation
"""
a = []
for i in range(self.cols):
a.extend(self._mat[i::self.cols])
return self._new(self.cols, self.rows, a)
def _eval_conjugate(self):
"""By-element conjugation.
See Also
========
transpose: Matrix transposition
H: Hermite conjugation
D: Dirac conjugation
"""
out = self._new(self.rows, self.cols,
lambda i, j: self[i, j].conjugate())
return out
def _eval_adjoint(self):
return self.T.C
def _eval_inverse(self, **kwargs):
"""Return the matrix inverse using the method indicated (default
is Gauss elimination).
kwargs
======
method : ('GE', 'LU', or 'ADJ')
iszerofunc
try_block_diag
Notes
=====
According to the ``method`` keyword, it calls the appropriate method:
GE .... inverse_GE(); default
LU .... inverse_LU()
ADJ ... inverse_ADJ()
According to the ``try_block_diag`` keyword, it will try to form block
diagonal matrices using the method get_diag_blocks(), invert these
individually, and then reconstruct the full inverse matrix.
Note, the GE and LU methods may require the matrix to be simplified
before it is inverted in order to properly detect zeros during
pivoting. In difficult cases a custom zero detection function can
be provided by setting the ``iszerosfunc`` argument to a function that
should return True if its argument is zero. The ADJ routine computes
the determinant and uses that to detect singular matrices in addition
to testing for zeros on the diagonal.
See Also
========
inverse_LU
inverse_GE
inverse_ADJ
"""
from sympy.matrices import diag
method = kwargs.get('method', 'GE')
iszerofunc = kwargs.get('iszerofunc', _iszero)
if kwargs.get('try_block_diag', False):
blocks = self.get_diag_blocks()
r = []
for block in blocks:
r.append(block.inv(method=method, iszerofunc=iszerofunc))
return diag(*r)
M = self.as_mutable()
if method == "GE":
rv = M.inverse_GE(iszerofunc=iszerofunc)
elif method == "LU":
rv = M.inverse_LU(iszerofunc=iszerofunc)
elif method == "ADJ":
rv = M.inverse_ADJ(iszerofunc=iszerofunc)
else:
# make sure to add an invertibility check (as in inverse_LU)
# if a new method is added.
raise ValueError("Inversion method unrecognized")
return self._new(rv)
def _eval_diff(self, *args, **kwargs):
if kwargs.pop("evaluate", True):
return self.diff(*args)
else:
return Derivative(self, *args, **kwargs)
def equals(self, other, failing_expression=False):
"""Applies ``equals`` to corresponding elements of the matrices,
trying to prove that the elements are equivalent, returning True
if they are, False if any pair is not, and None (or the first
failing expression if failing_expression is True) if it cannot
be decided if the expressions are equivalent or not. This is, in
general, an expensive operation.
Examples
========
>>> from sympy.matrices import Matrix
>>> from sympy.abc import x
>>> from sympy import cos
>>> A = Matrix([x*(x - 1), 0])
>>> B = Matrix([x**2 - x, 0])
>>> A == B
False
>>> A.simplify() == B.simplify()
True
>>> A.equals(B)
True
>>> A.equals(2)
False
See Also
========
sympy.core.expr.equals
"""
try:
if self.shape != other.shape:
return False
rv = True
for i in range(self.rows):
for j in range(self.cols):
ans = self[i, j].equals(other[i, j], failing_expression)
if ans is False:
return False
elif ans is not True and rv is True:
rv = ans
return rv
except AttributeError:
return False
def __eq__(self, other):
try:
if self.shape != other.shape:
return False
if isinstance(other, Matrix):
return self._mat == other._mat
elif isinstance(other, MatrixBase):
return self._mat == Matrix(other)._mat
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def _cholesky(self):
"""Helper function of cholesky.
Without the error checks.
To be used privately. """
L = zeros(self.rows, self.rows)
for i in range(self.rows):
for j in range(i):
L[i, j] = (1 / L[j, j])*(self[i, j] -
sum(L[i, k]*L[j, k] for k in range(j)))
L[i, i] = sqrt(self[i, i] -
sum(L[i, k]**2 for k in range(i)))
return self._new(L)
def _LDLdecomposition(self):
"""Helper function of LDLdecomposition.
Without the error checks.
To be used privately.
"""
D = zeros(self.rows, self.rows)
L = eye(self.rows)
for i in range(self.rows):
for j in range(i):
L[i, j] = (1 / D[j, j])*(self[i, j] - sum(
L[i, k]*L[j, k]*D[k, k] for k in range(j)))
D[i, i] = self[i, i] - sum(L[i, k]**2*D[k, k]
for k in range(i))
return self._new(L), self._new(D)
def _lower_triangular_solve(self, rhs):
"""Helper function of function lower_triangular_solve.
Without the error checks.
To be used privately.
"""
X = zeros(self.rows, rhs.cols)
for j in range(rhs.cols):
for i in range(self.rows):
if self[i, i] == 0:
raise TypeError("Matrix must be non-singular.")
X[i, j] = (rhs[i, j] - sum(self[i, k]*X[k, j]
for k in range(i))) / self[i, i]
return self._new(X)
def _upper_triangular_solve(self, rhs):
"""Helper function of function upper_triangular_solve.
Without the error checks, to be used privately. """
X = zeros(self.rows, rhs.cols)
for j in range(rhs.cols):
for i in reversed(range(self.rows)):
if self[i, i] == 0:
raise ValueError("Matrix must be non-singular.")
X[i, j] = (rhs[i, j] - sum(self[i, k]*X[k, j]
for k in range(i + 1, self.rows))) / self[i, i]
return self._new(X)
def _diagonal_solve(self, rhs):
"""Helper function of function diagonal_solve,
without the error checks, to be used privately.
"""
return self._new(rhs.rows, rhs.cols, lambda i, j: rhs[i, j] / self[i, i])
def applyfunc(self, f):
"""Apply a function to each element of the matrix.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 2, lambda i, j: i*2+j)
>>> m
Matrix([
[0, 1],
[2, 3]])
>>> m.applyfunc(lambda i: 2*i)
Matrix([
[0, 2],
[4, 6]])
"""
if not callable(f):
raise TypeError("`f` must be callable.")
out = self._new(self.rows, self.cols, list(map(f, self._mat)))
return out
def reshape(self, rows, cols):
"""Reshape the matrix. Total number of elements must remain the same.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 3, lambda i, j: 1)
>>> m
Matrix([
[1, 1, 1],
[1, 1, 1]])
>>> m.reshape(1, 6)
Matrix([[1, 1, 1, 1, 1, 1]])
>>> m.reshape(3, 2)
Matrix([
[1, 1],
[1, 1],
[1, 1]])
"""
if len(self) != rows*cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
return self._new(rows, cols, lambda i, j: self._mat[i*cols + j])
def as_mutable(self):
"""Returns a mutable version of this matrix
Examples
========
>>> from sympy import ImmutableMatrix
>>> X = ImmutableMatrix([[1, 2], [3, 4]])
>>> Y = X.as_mutable()
>>> Y[1, 1] = 5 # Can set values in Y
>>> Y
Matrix([
[1, 2],
[3, 5]])
"""
return Matrix(self)
def as_immutable(self):
"""Returns an Immutable version of this Matrix
"""
from .immutable import ImmutableMatrix as cls
if self.rows and self.cols:
return cls._new(self.tolist())
return cls._new(self.rows, self.cols, [])
@classmethod
def zeros(cls, r, c=None):
"""Return an r x c matrix of zeros, square if c is omitted."""
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return cls._new(r, c, [cls._sympify(0)]*r*c)
@classmethod
def eye(cls, n):
"""Return an n x n identity matrix."""
n = as_int(n)
mat = [cls._sympify(0)]*n*n
mat[::n + 1] = [cls._sympify(1)]*n
return cls._new(n, n, mat)
############################
# Mutable matrix operators #
############################
@call_highest_priority('__radd__')
def __add__(self, other):
return super(DenseMatrix, self).__add__(_force_mutable(other))
@call_highest_priority('__add__')
def __radd__(self, other):
return super(DenseMatrix, self).__radd__(_force_mutable(other))
@call_highest_priority('__rsub__')
def __sub__(self, other):
return super(DenseMatrix, self).__sub__(_force_mutable(other))
@call_highest_priority('__sub__')
def __rsub__(self, other):
return super(DenseMatrix, self).__rsub__(_force_mutable(other))
@call_highest_priority('__rmul__')
def __mul__(self, other):
return super(DenseMatrix, self).__mul__(_force_mutable(other))
@call_highest_priority('__mul__')
def __rmul__(self, other):
return super(DenseMatrix, self).__rmul__(_force_mutable(other))
@call_highest_priority('__div__')
def __div__(self, other):
return super(DenseMatrix, self).__div__(_force_mutable(other))
@call_highest_priority('__truediv__')
def __truediv__(self, other):
return super(DenseMatrix, self).__truediv__(_force_mutable(other))
@call_highest_priority('__rpow__')
def __pow__(self, other):
return super(DenseMatrix, self).__pow__(other)
@call_highest_priority('__pow__')
def __rpow__(self, other):
raise NotImplementedError("Matrix Power not defined")
def _force_mutable(x):
"""Return a matrix as a Matrix, otherwise return x."""
if getattr(x, 'is_Matrix', False):
return x.as_mutable()
elif isinstance(x, Basic):
return x
elif hasattr(x, '__array__'):
a = x.__array__()
if len(a.shape) == 0:
return sympify(a)
return Matrix(x)
return x
class MutableDenseMatrix(DenseMatrix, MatrixBase):
@classmethod
def _new(cls, *args, **kwargs):
rows, cols, flat_list = cls._handle_creation_inputs(*args, **kwargs)
self = object.__new__(cls)
self.rows = rows
self.cols = cols
self._mat = list(flat_list) # create a shallow copy
return self
def __new__(cls, *args, **kwargs):
return cls._new(*args, **kwargs)
def as_mutable(self):
return self.copy()
def __setitem__(self, key, value):
"""
Examples
========
>>> from sympy import Matrix, I, zeros, ones
>>> m = Matrix(((1, 2+I), (3, 4)))
>>> m
Matrix([
[1, 2 + I],
[3, 4]])
>>> m[1, 0] = 9
>>> m
Matrix([
[1, 2 + I],
[9, 4]])
>>> m[1, 0] = [[0, 1]]
To replace row r you assign to position r*m where m
is the number of columns:
>>> M = zeros(4)
>>> m = M.cols
>>> M[3*m] = ones(1, m)*2; M
Matrix([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[2, 2, 2, 2]])
And to replace column c you can assign to position c:
>>> M[2] = ones(m, 1)*4; M
Matrix([
[0, 0, 4, 0],
[0, 0, 4, 0],
[0, 0, 4, 0],
[2, 2, 4, 2]])
"""
rv = self._setitem(key, value)
if rv is not None:
i, j, value = rv
self._mat[i*self.cols + j] = value
def copyin_matrix(self, key, value):
"""Copy in values from a matrix into the given bounds.
Parameters
==========
key : slice
The section of this matrix to replace.
value : Matrix
The matrix to copy values from.
Examples
========
>>> from sympy.matrices import Matrix, eye
>>> M = Matrix([[0, 1], [2, 3], [4, 5]])
>>> I = eye(3)
>>> I[:3, :2] = M
>>> I
Matrix([
[0, 1, 0],
[2, 3, 0],
[4, 5, 1]])
>>> I[0, 1] = M
>>> I
Matrix([
[0, 0, 1],
[2, 2, 3],
[4, 4, 5]])
See Also
========
copyin_list
"""
rlo, rhi, clo, chi = self.key2bounds(key)
shape = value.shape
dr, dc = rhi - rlo, chi - clo
if shape != (dr, dc):
raise ShapeError(filldedent("The Matrix `value` doesn't have the "
"same dimensions "
"as the in sub-Matrix given by `key`."))
for i in range(value.rows):
for j in range(value.cols):
self[i + rlo, j + clo] = value[i, j]
def copyin_list(self, key, value):
"""Copy in elements from a list.
Parameters
==========
key : slice
The section of this matrix to replace.
value : iterable
The iterable to copy values from.
Examples
========
>>> from sympy.matrices import eye
>>> I = eye(3)
>>> I[:2, 0] = [1, 2] # col
>>> I
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
>>> I[1, :2] = [[3, 4]]
>>> I
Matrix([
[1, 0, 0],
[3, 4, 0],
[0, 0, 1]])
See Also
========
copyin_matrix
"""
if not is_sequence(value):
raise TypeError("`value` must be an ordered iterable, not %s." % type(value))
return self.copyin_matrix(key, Matrix(value))
def zip_row_op(self, i, k, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], self[k, j])``.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.zip_row_op(1, 0, lambda v, u: v + 2*u); M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
row
row_op
col_op
"""
i0 = i*self.cols
k0 = k*self.cols
ri = self._mat[i0: i0 + self.cols]
rk = self._mat[k0: k0 + self.cols]
self._mat[i0: i0 + self.cols] = [ f(x, y) for x, y in zip(ri, rk) ]
def row_op(self, i, f):
"""In-place operation on row ``i`` using two-arg functor whose args are
interpreted as ``(self[i, j], j)``.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.row_op(1, lambda v, j: v + 2*M[0, j]); M
Matrix([
[1, 0, 0],
[2, 1, 0],
[0, 0, 1]])
See Also
========
row
zip_row_op
col_op
"""
i0 = i*self.cols
ri = self._mat[i0: i0 + self.cols]
self._mat[i0: i0 + self.cols] = [ f(x, j) for x, j in zip(ri, list(range(self.cols))) ]
def col_op(self, j, f):
"""In-place operation on col j using two-arg functor whose args are
interpreted as (self[i, j], i).
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.col_op(1, lambda v, i: v + 2*M[i, 0]); M
Matrix([
[1, 2, 0],
[0, 1, 0],
[0, 0, 1]])
See Also
========
col
row_op
"""
self._mat[j::self.cols] = [f(*t) for t in list(zip(self._mat[j::self.cols], list(range(self.rows))))]
def row_swap(self, i, j):
"""Swap the two given rows of the matrix in-place.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[0, 1], [1, 0]])
>>> M
Matrix([
[0, 1],
[1, 0]])
>>> M.row_swap(0, 1)
>>> M
Matrix([
[1, 0],
[0, 1]])
See Also
========
row
col_swap
"""
for k in range(0, self.cols):
self[i, k], self[j, k] = self[j, k], self[i, k]
def col_swap(self, i, j):
"""Swap the two given columns of the matrix in-place.
Examples
========
>>> from sympy.matrices import Matrix
>>> M = Matrix([[1, 0], [1, 0]])
>>> M
Matrix([
[1, 0],
[1, 0]])
>>> M.col_swap(0, 1)
>>> M
Matrix([
[0, 1],
[0, 1]])
See Also
========
col
row_swap
"""
for k in range(0, self.rows):
self[k, i], self[k, j] = self[k, j], self[k, i]
def row_del(self, i):
"""Delete the given row.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.row_del(1)
>>> M
Matrix([
[1, 0, 0],
[0, 0, 1]])
See Also
========
row
col_del
"""
if i < -self.rows or i >= self.rows:
raise IndexError("Index out of range: 'i = %s', valid -%s <= i"
" < %s" % (i, self.rows, self.rows))
if i < 0:
i += self.rows
del self._mat[i*self.cols:(i+1)*self.cols]
self.rows -= 1
def col_del(self, i):
"""Delete the given column.
Examples
========
>>> from sympy.matrices import eye
>>> M = eye(3)
>>> M.col_del(1)
>>> M
Matrix([
[1, 0],
[0, 0],
[0, 1]])
See Also
========
col
row_del
"""
if i < -self.cols or i >= self.cols:
raise IndexError("Index out of range: 'i=%s', valid -%s <= i < %s"
% (i, self.cols, self.cols))
for j in range(self.rows - 1, -1, -1):
del self._mat[i + j*self.cols]
self.cols -= 1
# Utility functions
def simplify(self, ratio=1.7, measure=count_ops):
"""Applies simplify to the elements of a matrix in place.
This is a shortcut for M.applyfunc(lambda x: simplify(x, ratio, measure))
See Also
========
sympy.simplify.simplify.simplify
"""
for i in range(len(self._mat)):
self._mat[i] = _simplify(self._mat[i], ratio=ratio,
measure=measure)
def fill(self, value):
"""Fill the matrix with the scalar value.
See Also
========
zeros
ones
"""
self._mat = [value]*len(self)
MutableMatrix = Matrix = MutableDenseMatrix
###########
# Numpy Utility Functions:
# list2numpy, matrix2numpy, symmarray, rot_axis[123]
###########
def list2numpy(l, dtype=object): # pragma: no cover
"""Converts python list of SymPy expressions to a NumPy array.
See Also
========
matrix2numpy
"""
from numpy import empty
a = empty(len(l), dtype)
for i, s in enumerate(l):
a[i] = s
return a
def matrix2numpy(m, dtype=object): # pragma: no cover
"""Converts SymPy's matrix to a NumPy array.
See Also
========
list2numpy
"""
from numpy import empty
a = empty(m.shape, dtype)
for i in range(m.rows):
for j in range(m.cols):
a[i, j] = m[i, j]
return a
@doctest_depends_on(modules=('numpy',))
def symarray(prefix, shape, **kwargs): # pragma: no cover
"""Create a numpy ndarray of symbols (as an object array).
The created symbols are named ``prefix_i1_i2_``... You should thus provide a
non-empty prefix if you want your symbols to be unique for different output
arrays, as SymPy symbols with identical names are the same object.
Parameters
----------
prefix : string
A prefix prepended to the name of every symbol.
shape : int or tuple
Shape of the created array. If an int, the array is one-dimensional; for
more than one dimension the shape must be a tuple.
\*\*kwargs : dict
keyword arguments passed on to Symbol
Examples
========
These doctests require numpy.
>>> from sympy import symarray
>>> symarray('', 3)
[_0 _1 _2]
If you want multiple symarrays to contain distinct symbols, you *must*
provide unique prefixes:
>>> a = symarray('', 3)
>>> b = symarray('', 3)
>>> a[0] == b[0]
True
>>> a = symarray('a', 3)
>>> b = symarray('b', 3)
>>> a[0] == b[0]
False
Creating symarrays with a prefix:
>>> symarray('a', 3)
[a_0 a_1 a_2]
For more than one dimension, the shape must be given as a tuple:
>>> symarray('a', (2, 3))
[[a_0_0 a_0_1 a_0_2]
[a_1_0 a_1_1 a_1_2]]
>>> symarray('a', (2, 3, 2))
[[[a_0_0_0 a_0_0_1]
[a_0_1_0 a_0_1_1]
[a_0_2_0 a_0_2_1]]
<BLANKLINE>
[[a_1_0_0 a_1_0_1]
[a_1_1_0 a_1_1_1]
[a_1_2_0 a_1_2_1]]]
For setting assumptions of the underlying Symbols:
>>> [s.is_real for s in symarray('a', 2, real=True)]
[True, True]
"""
from numpy import empty, ndindex
arr = empty(shape, dtype=object)
for index in ndindex(shape):
arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))),
**kwargs)
return arr
def rot_axis3(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 3-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis3
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis3(theta)
Matrix([
[ 1/2, sqrt(3)/2, 0],
[-sqrt(3)/2, 1/2, 0],
[ 0, 0, 1]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis3(pi/2)
Matrix([
[ 0, 1, 0],
[-1, 0, 0],
[ 0, 0, 1]])
See Also
========
rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((ct, st, 0),
(-st, ct, 0),
(0, 0, 1))
return Matrix(lil)
def rot_axis2(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 2-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis2
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis2(theta)
Matrix([
[ 1/2, 0, -sqrt(3)/2],
[ 0, 1, 0],
[sqrt(3)/2, 0, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis2(pi/2)
Matrix([
[0, 0, -1],
[0, 1, 0],
[1, 0, 0]])
See Also
========
rot_axis1: Returns a rotation matrix for a rotation of theta (in radians)
about the 1-axis
rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((ct, 0, -st),
(0, 1, 0),
(st, 0, ct))
return Matrix(lil)
def rot_axis1(theta):
"""Returns a rotation matrix for a rotation of theta (in radians) about
the 1-axis.
Examples
========
>>> from sympy import pi
>>> from sympy.matrices import rot_axis1
A rotation of pi/3 (60 degrees):
>>> theta = pi/3
>>> rot_axis1(theta)
Matrix([
[1, 0, 0],
[0, 1/2, sqrt(3)/2],
[0, -sqrt(3)/2, 1/2]])
If we rotate by pi/2 (90 degrees):
>>> rot_axis1(pi/2)
Matrix([
[1, 0, 0],
[0, 0, 1],
[0, -1, 0]])
See Also
========
rot_axis2: Returns a rotation matrix for a rotation of theta (in radians)
about the 2-axis
rot_axis3: Returns a rotation matrix for a rotation of theta (in radians)
about the 3-axis
"""
ct = cos(theta)
st = sin(theta)
lil = ((1, 0, 0),
(0, ct, st),
(0, -st, ct))
return Matrix(lil)
###############
# Functions
###############
def matrix_multiply_elementwise(A, B):
"""Return the Hadamard product (elementwise product) of A and B
>>> from sympy.matrices import matrix_multiply_elementwise
>>> from sympy.matrices import Matrix
>>> A = Matrix([[0, 1, 2], [3, 4, 5]])
>>> B = Matrix([[1, 10, 100], [100, 10, 1]])
>>> matrix_multiply_elementwise(A, B)
Matrix([
[ 0, 10, 200],
[300, 40, 5]])
See Also
========
__mul__
"""
if A.shape != B.shape:
raise ShapeError()
shape = A.shape
return classof(A, B)._new(shape[0], shape[1],
lambda i, j: A[i, j]*B[i, j])
def ones(r, c=None):
"""Returns a matrix of ones with ``r`` rows and ``c`` columns;
if ``c`` is omitted a square matrix will be returned.
See Also
========
zeros
eye
diag
"""
from .dense import Matrix
c = r if c is None else c
r = as_int(r)
c = as_int(c)
return Matrix(r, c, [S.One]*r*c)
def zeros(r, c=None, cls=None):
"""Returns a matrix of zeros with ``r`` rows and ``c`` columns;
if ``c`` is omitted a square matrix will be returned.
See Also
========
ones
eye
diag
"""
if cls is None:
from .dense import Matrix as cls
return cls.zeros(r, c)
def eye(n, cls=None):
"""Create square identity matrix n x n
See Also
========
diag
zeros
ones
"""
if cls is None:
from sympy.matrices import Matrix as cls
return cls.eye(n)
def diag(*values, **kwargs):
"""Create a sparse, diagonal matrix from a list of diagonal values.
Notes
=====
When arguments are matrices they are fitted in resultant matrix.
The returned matrix is a mutable, dense matrix. To make it a different
type, send the desired class for keyword ``cls``.
Examples
========
>>> from sympy.matrices import diag, Matrix, ones
>>> diag(1, 2, 3)
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> diag(*[1, 2, 3])
Matrix([
[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
The diagonal elements can be matrices; diagonal filling will
continue on the diagonal from the last element of the matrix:
>>> from sympy.abc import x, y, z
>>> a = Matrix([x, y, z])
>>> b = Matrix([[1, 2], [3, 4]])
>>> c = Matrix([[5, 6]])
>>> diag(a, 7, b, c)
Matrix([
[x, 0, 0, 0, 0, 0],
[y, 0, 0, 0, 0, 0],
[z, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0],
[0, 0, 3, 4, 0, 0],
[0, 0, 0, 0, 5, 6]])
When diagonal elements are lists, they will be treated as arguments
to Matrix:
>>> diag([1, 2, 3], 4)
Matrix([
[1, 0],
[2, 0],
[3, 0],
[0, 4]])
>>> diag([[1, 2, 3]], 4)
Matrix([
[1, 2, 3, 0],
[0, 0, 0, 4]])
A given band off the diagonal can be made by padding with a
vertical or horizontal "kerning" vector:
>>> hpad = ones(0, 2)
>>> vpad = ones(2, 0)
>>> diag(vpad, 1, 2, 3, hpad) + diag(hpad, 4, 5, 6, vpad)
Matrix([
[0, 0, 4, 0, 0],
[0, 0, 0, 5, 0],
[1, 0, 0, 0, 6],
[0, 2, 0, 0, 0],
[0, 0, 3, 0, 0]])
The type is mutable by default but can be made immutable by setting
the ``mutable`` flag to False:
>>> type(diag(1))
<class 'sympy.matrices.dense.MutableDenseMatrix'>
>>> from sympy.matrices import ImmutableMatrix
>>> type(diag(1, cls=ImmutableMatrix))
<class 'sympy.matrices.immutable.ImmutableMatrix'>
See Also
========
eye
"""
from .sparse import MutableSparseMatrix
cls = kwargs.pop('cls', None)
if cls is None:
from .dense import Matrix as cls
if kwargs:
raise ValueError('unrecognized keyword%s: %s' % (
's' if len(kwargs) > 1 else '',
', '.join(kwargs.keys())))
rows = 0
cols = 0
values = list(values)
for i in range(len(values)):
m = values[i]
if isinstance(m, MatrixBase):
rows += m.rows
cols += m.cols
elif is_sequence(m):
m = values[i] = Matrix(m)
rows += m.rows
cols += m.cols
else:
rows += 1
cols += 1
res = MutableSparseMatrix.zeros(rows, cols)
i_row = 0
i_col = 0
for m in values:
if isinstance(m, MatrixBase):
res[i_row:i_row + m.rows, i_col:i_col + m.cols] = m
i_row += m.rows
i_col += m.cols
else:
res[i_row, i_col] = m
i_row += 1
i_col += 1
return cls._new(res)
def jordan_cell(eigenval, n):
"""
Create matrix of Jordan cell kind:
Examples
========
>>> from sympy.matrices import jordan_cell
>>> from sympy.abc import x
>>> jordan_cell(x, 4)
Matrix([
[x, 1, 0, 0],
[0, x, 1, 0],
[0, 0, x, 1],
[0, 0, 0, x]])
"""
n = as_int(n)
out = zeros(n)
for i in range(n - 1):
out[i, i] = eigenval
out[i, i + 1] = S.One
out[n - 1, n - 1] = eigenval
return out
def hessian(f, varlist, constraints=[]):
"""Compute Hessian matrix for a function f wrt parameters in varlist
which may be given as a sequence or a row/column vector. A list of
constraints may optionally be given.
Examples
========
>>> from sympy import Function, hessian, pprint
>>> from sympy.abc import x, y
>>> f = Function('f')(x, y)
>>> g1 = Function('g')(x, y)
>>> g2 = x**2 + 3*y
>>> pprint(hessian(f, (x, y), [g1, g2]))
[ d d ]
[ 0 0 --(g(x, y)) --(g(x, y)) ]
[ dx dy ]
[ ]
[ 0 0 2*x 3 ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 2*x ---(f(x, y)) -----(f(x, y))]
[dx 2 dy dx ]
[ dx ]
[ ]
[ 2 2 ]
[d d d ]
[--(g(x, y)) 3 -----(f(x, y)) ---(f(x, y)) ]
[dy dy dx 2 ]
[ dy ]
References
==========
http://en.wikipedia.org/wiki/Hessian_matrix
See Also
========
sympy.matrices.mutable.Matrix.jacobian
wronskian
"""
# f is the expression representing a function f, return regular matrix
if isinstance(varlist, MatrixBase):
if 1 not in varlist.shape:
raise ShapeError("`varlist` must be a column or row vector.")
if varlist.cols == 1:
varlist = varlist.T
varlist = varlist.tolist()[0]
if is_sequence(varlist):
n = len(varlist)
if not n:
raise ShapeError("`len(varlist)` must not be zero.")
else:
raise ValueError("Improper variable list in hessian function")
if not getattr(f, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
m = len(constraints)
N = m + n
out = zeros(N)
for k, g in enumerate(constraints):
if not getattr(g, 'diff'):
# check differentiability
raise ValueError("Function `f` (%s) is not differentiable" % f)
for i in range(n):
out[k, i + m] = g.diff(varlist[i])
for i in range(n):
for j in range(i, n):
out[i + m, j + m] = f.diff(varlist[i]).diff(varlist[j])
for i in range(N):
for j in range(i + 1, N):
out[j, i] = out[i, j]
return out
def GramSchmidt(vlist, orthonormal=False):
"""
Apply the Gram-Schmidt process to a set of vectors.
see: http://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process
"""
out = []
m = len(vlist)
for i in range(m):
tmp = vlist[i]
for j in range(i):
tmp -= vlist[i].project(out[j])
if not tmp.values():
raise ValueError(
"GramSchmidt: vector set not linearly independent")
out.append(tmp)
if orthonormal:
for i in range(len(out)):
out[i] = out[i].normalized()
return out
def wronskian(functions, var, method='bareis'):
"""
Compute Wronskian for [] of functions
::
| f1 f2 ... fn |
| f1' f2' ... fn' |
| . . . . |
W(f1, ..., fn) = | . . . . |
| . . . . |
| (n) (n) (n) |
| D (f1) D (f2) ... D (fn) |
see: http://en.wikipedia.org/wiki/Wronskian
See Also
========
sympy.matrices.mutable.Matrix.jacobian
hessian
"""
from .dense import Matrix
for index in range(0, len(functions)):
functions[index] = sympify(functions[index])
n = len(functions)
if n == 0:
return 1
W = Matrix(n, n, lambda i, j: functions[i].diff(var, j))
return W.det(method)
def casoratian(seqs, n, zero=True):
"""Given linear difference operator L of order 'k' and homogeneous
equation Ly = 0 we want to compute kernel of L, which is a set
of 'k' sequences: a(n), b(n), ... z(n).
Solutions of L are linearly independent iff their Casoratian,
denoted as C(a, b, ..., z), do not vanish for n = 0.
Casoratian is defined by k x k determinant::
+ a(n) b(n) . . . z(n) +
| a(n+1) b(n+1) . . . z(n+1) |
| . . . . |
| . . . . |
| . . . . |
+ a(n+k-1) b(n+k-1) . . . z(n+k-1) +
It proves very useful in rsolve_hyper() where it is applied
to a generating set of a recurrence to factor out linearly
dependent solutions and return a basis:
>>> from sympy import Symbol, casoratian, factorial
>>> n = Symbol('n', integer=True)
Exponential and factorial are linearly independent:
>>> casoratian([2**n, factorial(n)], n) != 0
True
"""
from .dense import Matrix
seqs = list(map(sympify, seqs))
if not zero:
f = lambda i, j: seqs[j].subs(n, n + i)
else:
f = lambda i, j: seqs[j].subs(n, i)
k = len(seqs)
return Matrix(k, k, f).det()
def randMatrix(r, c=None, min=0, max=99, seed=None, symmetric=False,
percent=100, prng=None):
"""Create random matrix with dimensions ``r`` x ``c``. If ``c`` is omitted
the matrix will be square. If ``symmetric`` is True the matrix must be
square. If ``percent`` is less than 100 then only approximately the given
percentage of elements will be non-zero.
The pseudo-random number generator used to generate matrix is chosen in the
following way.
* If ``prng`` is supplied, it will be used as random number generator.
It should be an instance of :class:`random.Random`, or at least have
``randint`` and ``shuffle`` methods with same signatures.
* if ``prng`` is not supplied but ``seed`` is supplied, then new
:class:`random.Random` with given ``seed`` will be created;
* otherwise, a new :class:`random.Random` with default seed will be used.
Examples
========
>>> from sympy.matrices import randMatrix
>>> randMatrix(3) # doctest:+SKIP
[25, 45, 27]
[44, 54, 9]
[23, 96, 46]
>>> randMatrix(3, 2) # doctest:+SKIP
[87, 29]
[23, 37]
[90, 26]
>>> randMatrix(3, 3, 0, 2) # doctest:+SKIP
[0, 2, 0]
[2, 0, 1]
[0, 0, 1]
>>> randMatrix(3, symmetric=True) # doctest:+SKIP
[85, 26, 29]
[26, 71, 43]
[29, 43, 57]
>>> A = randMatrix(3, seed=1)
>>> B = randMatrix(3, seed=2)
>>> A == B # doctest:+SKIP
False
>>> A == randMatrix(3, seed=1)
True
>>> randMatrix(3, symmetric=True, percent=50) # doctest:+SKIP
[0, 68, 43]
[0, 68, 0]
[0, 91, 34]
"""
if c is None:
c = r
# Note that ``Random()`` is equivalent to ``Random(None)``
prng = prng or random.Random(seed)
if symmetric and r != c:
raise ValueError(
'For symmetric matrices, r must equal c, but %i != %i' % (r, c))
if not symmetric:
m = Matrix._new(r, c, lambda i, j: prng.randint(min, max))
else:
m = zeros(r)
for i in range(r):
for j in range(i, r):
m[i, j] = prng.randint(min, max)
for i in range(r):
for j in range(i):
m[i, j] = m[j, i]
if percent == 100:
return m
else:
z = int(r*c*percent // 100)
m._mat[:z] = [S.Zero]*z
prng.shuffle(m._mat)
return m
|
kaichogami/sympy
|
sympy/matrices/dense.py
|
Python
|
bsd-3-clause
| 44,238
|
[
"DIRAC"
] |
a09a1e5309c88f4caacf4eca616d3c8501806563ad77cbcc68c77c39ee19c863
|
writePdbAA = {'A' : {
'Res_name': 'ALA',
'atoms':['N','CA','C','O','CB'],
'atom_type':['N','C','C','O','C']},
'R' : {
'Res_name': 'ARG',
'atoms':['N','CA','C','O','CB','CG','CD','NE','CZ','NH1','NH2'],
'atom_type':['N','C','C','O','C','C','C','N','C','N','N']},
'N' : {
'Res_name': 'ASN',
'atoms':['N','CA','C','O','CB','CG','OD1','ND2'],
'atom_type':['N','C','C','O','C','C','O','N']},
'D' : {
'Res_name': 'ASP',
'atoms':['N','CA','C','O','CB','CG','OD1','OD2'],
'atom_type':['N','C','C','O','C','C','O','O']},
'C' : {
'Res_name': 'CYS',
'atoms':['N','CA','C','O','CB','SG'],
'atom_type':['N','C','C','O','C','S']},
'E' : {
'Res_name': 'GLU',
'atoms':['N','CA','C','O','CB','CG','CD','OE1','OE2'],
'atom_type':['N','C','C','O','C','C','C','O','O']},
'Q' : {
'Res_name': 'GLN',
'atoms':['N','CA','C','O','CB','CG','CD','OE1','NE2'],
'atom_type':['N','C','C','O','C','C','C','O','N']},
'G' : {
'Res_name': 'GLY',
'atoms':['N','CA','C','O'],
'atom_type':['N','C','C','O']},
'H' : {
'Res_name': 'HIS',
'atoms':['N','CA','C','O','CB','CG','CD2','ND1','CE1','NE2'],
'atom_type':['N','C','C','O','C','C','C','N','C','N']},
'I' : {
'Res_name': 'ILE',
'atoms':['N','CA','C','O','CB','CG1','CG2','CD1'],
'atom_type':['N','C','C','O','C','C','C','C']},
'L' : {
'Res_name': 'LEU',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2'],
'atom_type':['N','C','C','O','C','C','C','C']},
'K' : {
'Res_name': 'LYS',
'atoms':['N','CA','C','O','CB','CG','CD','CE','NZ'],
'atom_type':['N','C','C','O','C','C','C','C','N']},
'M' : {
'Res_name': 'MET',
'atoms':['N','CA','C','O','CB','CG','SD','CE'],
'atom_type':['N','C','C','O','C','C','S','C']},
'F' : {
'Res_name': 'PHE',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2','CE1','CE2','CZ'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C']},
'P' : {
'Res_name': 'PRO',
'atoms':['N','CA','C','O','CB','CG','CD'],
'atom_type':['N','C','C','O','C','C','C']},
'S' : {
'Res_name': 'SER',
'atoms':['N','CA','C','O','CB','OG'],
'atom_type':['N','C','C','O','C','O']},
'T' : {
'Res_name': 'THR',
'atoms':['N','CA','C','O','CB', 'OG1','CG2'],
'atom_type':['N','C','C','O','C','O','C']},
'W' : {
'Res_name': 'TRP',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2','NE1','CE2','CE3','CZ2','CZ3','CH2'],
'atom_type':['N','C','C','O','C','C','C','C','N','C','C','C','C','C']},
'Y' : {
'Res_name': 'TYR',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2','CE1','CE2','CZ', 'OH'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C','O']},
'V' : {
'Res_name': 'VAL',
'atoms':['N','CA','C','O','CB','CG1','CG2'],
'atom_type':['N','C','C','O','C','C','C']},
##unnatural amino acids
'Pheiodo' : {
'Res_name': 'PHI',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2','CE1','CE2','CZ', 'I'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C','I'],
'link_rec':[(0,1),(1,2), (2,3), (3,4,4), (2,5), (5,6),(6,7), (6,8,8), (7,9,9),(8,10),(9,11), (10,11,11),(11,12)]},
'Phebromo' : {
'Res_name': 'PBR',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2','CE1','CE2','CZ', 'BR'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C','Br'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (5,6),(6,7), (6,8,8), (7,9,9),(8,10),(9,11), (10,11,11),(11,12)]},
'Phefluoro' : {
'Res_name': 'P5F',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2','CE1','CE2','CZ', 'F1','F2','F3','F4','F5'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C','F','F','F','F','F','F'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (5,6),(6,7), (6,8,8), (7,9,9),(8,10),(9,11), (10,11,11),(7,13),(8,12),(9,15),(10,14),(11,16)]},
'Phe(I)' : {
'Res_name': 'PHI',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2','CE1','CE2','CZ', 'I'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C','I'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (5,6),(6,7), (6,8,8), (7,9,9),(8,10),(9,11), (10,11,11),(11,12)]},
'Phe(Br)' : {
'Res_name': 'PBR',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2','CE1','CE2','CZ', 'BR'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C','Br'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (5,6),(6,7), (6,8,8), (7,9,9),(8,10),(9,11), (10,11,11),(11,12)]},
'Phe(5F)' : {
'Res_name': 'P5F',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2','CE1','CE2','CZ', 'F1','F2','F3','F4','F5'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C','F','F','F','F','F','F'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (5,6),(6,7), (6,8,8), (7,9,9),(8,10),(9,11), (10,11,11),(7,13),(8,12),(9,15),(10,14),(11,16)]},
'O' : {
'Res_name': 'ORN',
'atoms':['N','CA','C','O','CB','CG','CD','NE'],
'atom_type':['N','C','C','O','C','C','C','N+1'],
'link_rec': [(1,2), (2,3), (3,4,4),(2,5),(5,6),(6,7),(7,8)]},
'Nle' : {
'Res_name': 'NLE',
'atoms':['N','CA','C','O','CB','CG','CD','CE'],
'atom_type':['N','C','C','O','C','C','C','C'],
'link_rec': [(1,2), (2,3), (3,4,4),(2,5),(5,6),(6,7),(7,8)]},
'Nva' : {
'Res_name': 'NVA',
'atoms':['N','CA','C','O','CB','CG','CD'],
'atom_type':['N','C','C','O','C','C','C'],
'link_rec': [(1,2), (2,3), (3,4,4),(2,5),(5,6),(6,7)]},
'Pra' : {
'Res_name': 'PRA',
'atoms':['N','CA','C','O','CB','CG','CD'],
'atom_type':['N','C','C','O','C','C','C'],
'link_rec': [(1,2), (2,3), (3,4,4),(2,5),(5,6),(6,7,7),(6,7)]},
'Hao' : {
'Res_name': 'HAO',
'atoms':['N','N02','C','O','C08','O06','C06','C05','OM','CM','C04','C03','C02','C07','N01','C01','O01'],
'atom_type':['N','N','C','O','C','O','C','C','O','C','C','C','C','C','N','C','O'],
'link_rec': [(1,2),(2,5),(5,6,6), (5,7),(7,8,8), (7,14),(8,9), (9,10), (8, 11), (11,12,12), (12,13), (13,14,14), (13, 15), (15,16), (16,17,17),(16,3),(3,4,4) ]},
##Misc
'Ac':{
'Res_name': 'ACE',
'atoms':['O','C','CH3'],
'atom_type':['O','C','C']},
##NMe Amino Acids
'Ala(NMe)' : {
'Res_name': 'MAA',
'atoms':['N','CA','C','O','CB','CN'],
'atom_type':['N','C','C','O','C','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (1,6)]},
'Gly(NMe)' : {
'Res_name': 'SAR',
'atoms':['N','CA','C','O','CN'],
'atom_type':['N','C','C','O','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (1,5)]},
'Ile(NMe)' : {
'Res_name': 'IML',
'atoms':['N','CA','C','O','CB','CG1','CG2','CD1','CN'],
'atom_type':['N','C','C','O','C','C','C','C','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (5,6), (5,7), (6,8), (1,9)]},
'Leu(NMe)' : {
'Res_name': 'MLE',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2','CN'],
'atom_type':['N','C','C','O','C','C','C','C','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (5,6), (6,7), (6,8), (1,9)]},
'Nle(NMe)' : {
'Res_name': 'MNL',
'atoms':['N','CA','C','O','CB','CG','CD','CE','CN'],
'atom_type':['N','C','C','O','C','C','C','C','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (5,6), (6,7), (7,8), (1,9)]},
'Nva(NMe)' : {
'Res_name': 'MNV',
'atoms':['N','CA','C','O','CB','CG','CD','CN'],
'atom_type':['N','C','C','O','C','C','C','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (5,6), (6,7), (1,8)]},
'Phe(NMe)' : {
'Res_name': 'MEA',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2','CE1','CE2','CZ','CN'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (5,6),(6,7), (6,8,8), (7,9,9),(8,10),(9,11), (10,11,11),(1,12)]},
'Tyr(NMe)' : {
'Res_name': 'YNM',
'atoms':['N','CA','C','O','CB','CG','CD1','CD2','CE1','CE2','CZ', 'OH', 'CN'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C','O','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (5,6),(6,7), (6,8,8), (7,9,9),(8,10),(9,11), (10,11,11),(11,12),(1,13)]},
'Val(NMe)' : {
'Res_name': 'MNV',
'atoms':['N','CA','C','O','CB','CG1','CG2','CN'],
'atom_type':['N','C','C','O','C','C','C','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (2,5), (5,6),(5,7),(1,8)]},
##Peptoids
'Nab' : {
'Res_name': 'NAB',
'atoms':['N','CA','C','O','CA5','CB', 'CG', 'CD', 'NE'],
'atom_type':['N','C','C','O','C','C','C','C','N+1'],
'link_rec':[(1,2), (2,3), (3,4,4), (1,5), (5,6),(6,7), (7,8),(8,9)]},
'Nbn' : {
'Res_name': 'NBN',
'atoms':['N','CA','C','O','CA5','CB', 'CG','CG1','CD3','CD4','CE3'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (1,5), (5,6), (6,7,7) ,(6,8),(7,9), (8,10,10), (9,11,11),(10,11)]},
'Namd' : {
'Res_name': 'NMD',
'atoms':['N','CA','C','O','CA5','CB', 'NG', 'OG1'],
'atom_type':['N','C','C','O','C','C','N','O'],
'link_rec':[(1,2), (2,3), (3,4,4), (1,5), (5,6),(6,7), (6,8,8)]},
'Ncm' : {
'Res_name': 'NCM',
'atoms':['N','CA','C','O','CA5','CB', 'OG1', 'OG2'],
'atom_type':['N','C','C','O','C','C','O','O'],
'link_rec':[(1,2), (2,3), (3,4,4), (1,5), (5,6),(6,7), (6,8,8)]},
'Npe' : {
'Res_name': 'NPE',
'atoms':['N','CA','C','O','CA5','CB', 'CG','CD1','CD2','CE1','CE2','CZ1'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (1,5), (5,6),(6,7), (7,8), (7,9,9),(8,10,10),(10,12),(9,11),(11,12,12)]},
'Nae' : {
'Res_name': 'NAE',
'atoms':['N','CA','C','O','CA5','CB', 'NG'],
'atom_type':['N','C','C','O','C','C','N+1'],
'link_rec':[(1,2), (2,3), (3,4,4), (1,5), (5,6),(6,7)]},
'Nce' : {
'Res_name': 'NCE',
'atoms':['N','CA','C','O','CA5','CB', 'CG','OD1','OD2'],
'atom_type':['N','C','C','O','C','C','C','O','O-1'],
'link_rec':[(1,2), (2,3), (3,4,4), (1,5), (5,6),(6,7), (7,8,8), (7,9)]},
'Ndc' : {
'Res_name': 'NDC',
'atoms':['N','CA','C','O','CA5','CB', 'CG', 'CD', 'CE', 'CZ', 'CH','CT','CI','CK'],
'atom_type':['N','C','C','O','C','C','C','C','C','C','C','C','C','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (1,5), (5,6),(6,7), (7,8), (8,9),(9,10),(10,11),(11,12), (12,13),(13,14)]},
'Nte' : {
'Res_name': 'NTE',
'atoms':['N','CA','C','O','CA5','CB', 'OG', 'CD', 'CE', 'OZ', 'CH','CT','OI','CK'],
'atom_type':['N','C','C','O','C','C','O','C','C','O','C','C','O','C'],
'link_rec':[(1,2), (2,3), (3,4,4), (1,5), (5,6),(6,7), (7,8), (8,9),(9,10),(10,11),(11,12), (12,13),(13,14)]},
}
atom_positions_beta_up = {'N':[0.000, 0.000, 0.000],
'CA':[1.221, 0.870, 0.000],
'CH3':[1.221, 0.870, 0.000],
'C':[2.442, 0.000, 0.000],
'O':[2.442,-1.200, 0.000],
'CB':[1.221, 1.74, 1.221],
'CG':[1.221,3.250,1.221],
'SG':[1.221,3.450,1.221],
'CG1':[2.442,2.610,1.221],
'CG2':[0.200, 2.610, 1.421],
'OG':[1.221,3.250,1.221],
'OG1':[1.221, 0.300, 2.121],
'CD':[0.000,4.120,1.221],
'SD':[0.221,4.220,1.221],
'CD1':[2.442,4.120,1.221],
'CD2':[0.000,4.120,1.221],
'ND1':[2.442,4.120,1.221],
'ND2':[2.442,4.120,1.221],
'OD1':[0.000,4.120,1.221],
'OD2':[2.442,4.120,1.221],
'CE':[0.000,5.620,1.221],
'CE1':[2.442,5.341,1.221],
'CE2':[0.200,5.341,1.221],
'CE3':[-1.221,3.550,1.600],
'OE1':[-1.170,3.750,1.421],
'OE2':[0.200,5.250,1.221],
'NE1':[1.800,5.541,1.221],
'NE2':[0.551,5.341,1.221],
'NE':[0.000,5.620,1.221],
'NZ':[-1.221,6.490,1.221],
'CZ2':[-1.000, 6.200, 1.500],
'CZ3':[-2.200, 4.120, 1.500],
'CZ':[1.221,6.320,1.221],
'CH2':[-2.200,5.520,1.500],
'NH1':[2.521,6.490,1.221],
'NH2':[1.221,7.990,1.221],
'OH':[1.221,7.820,1.221],
'I':[1.221,8.020,1.221],
'BR':[1.221,8.020,1.221],
'F1':[-1.000,3.250,1.421],
'F2':[3.44,3.250,1.421],
'F3':[-1.000,6.320,1.221],
'F4':[3.44,6.320,1.221],
'F5':[1.221,7.820,1.221],
'CN':[0.000, -1.450, 0.600]}
##Peptoid starting positions
peptoid = { 'N':[0.000, 0.000, 0.000],
'CA':[1.221, 0.870, 0.000],
'CH3':[1.221, 0.870, 0.000],
'C':[2.442, 0.000, 0.000],
'O':[2.442,-1.200, 0.000],
'CA5':[0.000, -1.450, 0.000],
'CB':[-1.250,-2.250,0.000],
'CG':[-1.250,-3.750,0.000],
'OG':[-1.250,-3.750,0.000],
'CG1':[-2.400, -1.450, 0.000],
'NG':[-1.250,-3.750,0.000],
'OG1':[-2.400,-2.250,0.000],
'OG2':[-1.250,-4.100,0.000],
'CD':[-2.450,-4.500,0.000],
'CD1':[-2.450,-4.500,0.000],
'CD2':[-0.050,-4.500,0.000],
'CD3':[-2.450,-4.570,0.000],
'CD4':[-3.600,-2.250,0.000],
'CE':[-2.450,-5.900,0.000],
'CE1':[-2.450,-5.900,0.000],
'CE2':[-0.050,-5.900,0.000],
'CE3':[-3.600,-3.700,0.000],
'NE':[-2.450,-6.000,0.000],
'CZ':[-3.650, -6.700,0.000],
'OZ':[-3.650, -6.700,0.000],
'CZ1':[-1.250, -6.700,0.000],
'OD1':[-2.450,-4.800,0.000],
'OD2':[-0.050,-4.800,0.000],
'CH':[-3.650, -8.200,0.000],
'CT':[-4.850, -8.800,0.000],
'CI':[-4.850, -10.300,0.000],
'OI':[-4.850, -10.300,0.000],
'CK':[-6.050, -11.100,0.000],
'CL':[-6.050, -12.600,0.000],
}
hao = { 'N':[0.000, 0.000, 0.000],
'N02':[1.221, 0.870, 0.000],
'C08':[2.442, 0.000, 0.000],
'O06':[2.442,-1.200, 0.000],
'C06':[3.663, 0.870, 0.000],
'C05':[3.663, 2.370, 0.000],
'OM':[2.442, 3.200, 0.000],
'CM':[2.442, 4.600, 0.000],
'C04':[4.884, 3.170, 0.000],
'C07':[4.884, 0.000, 0.000],
'C03':[6.106, 2.300, 0.000],
'C02':[6.106, 0.870, 0.000],
'N01':[7.307, 0.00, 0.000],
'C01':[8.508, 0.870, 0.000],
'O01':[8.508, 2.270, 0.000],
'C':[9.708, 0.000, 0.000],
'O':[9.708,-1.200, 0.000]}
peptoid_tube = { 'N':[0.000, 0.000, 0.000],
'CA':[1.221, 0.870, 0.000],
'CH3':[1.221, 0.870, 0.000],
'C':[2.442, 0.000, 0.000],
'O':[2.442,-1.200, 0.000],
'CA5':[0.000, -1.450, 0.000],
'CB':[-1.250,-2.250,0.000],
'CG':[-1.250,-3.750,0.000],
'OG':[-1.250,-3.750,0.000],
'CG1':[-2.400, -1.450, 0.000],
'NG':[-1.250,-3.750,0.000],
'OG1':[-2.400,-2.250,0.000],
'OG2':[-1.250,-4.100,0.000],
'CD':[-2.450,-4.500,0.000],
'CD1':[-2.450,-4.500,0.000],
'CD2':[-0.050,-4.500,0.000],
'CD3':[-2.450,-4.570,0.000],
'CD4':[-3.600,-2.250,0.000],
'CE':[-2.450,-5.900,0.000],
'CE1':[-2.450,-5.900,0.000],
'CE2':[-0.050,-5.900,0.000],
'CE3':[-3.600,-3.700,0.000],
'NE':[-2.450,-6.000,0.000],
'CZ':[-3.650, -6.700,0.000],
'OZ':[-3.650, -6.700,0.000],
'CZ1':[-1.250, -6.700,0.000],
'OD1':[-2.450,-4.800,0.000],
'OD2':[-0.050,-4.800,0.000],
'CH':[-3.650, -8.200,0.000],
'CT':[-4.850, -8.800,0.000],
'CI':[-4.850, -10.300,0.000],
'OI':[-4.850, -10.300,0.000],
'CK':[-6.050, -11.100,0.000],
'CL':[-6.050, -12.600,0.000],
}
|
rspencer27/Mass-Spec-App
|
main/PDB_coord_old.py
|
Python
|
apache-2.0
| 16,056
|
[
"NAMD"
] |
512a3199f4d43fcb905f76af845bec3563f341254feae3b638e2a979f0a001e3
|
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2014 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import numpy
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import moose
def makeModel():
"""
This example illustrates how to set up a oscillatory Turing pattern
in 1-D using reaction diffusion calculations.
Reaction system is::
s ---a---> a // s goes to a, catalyzed by a.
s ---a---> b // s goes to b, catalyzed by a.
a ---b---> s // a goes to s, catalyzed by b.
b -------> s // b is degraded irreversibly to s.
in sum, **a** has a positive feedback onto itself and also forms **b**.
**b** has a negative feedback onto **a**.
Finally, the diffusion constant for **a** is 1/10 that of **b**.
This chemical system is present in a 1-dimensional (cylindrical)
compartment. The entire reaction-diffusion system is set up
within the script.
"""
# create container for model
r0 = 1e-6 # m
r1 = 1e-6 # m
num = 100
diffLength = 1e-6 # m
len = num * diffLength # m
diffConst = 5e-12 # m^2/sec
motorRate = 1e-6 # m/sec
concA = 1 # millimolar
dt4 = 0.02 # for the diffusion
dt5 = 0.2 # for the reaction
model = moose.Neutral( 'model' )
compartment = moose.CylMesh( '/model/compartment' )
compartment.r0 = r0
compartment.r1 = r1
compartment.x0 = 0
compartment.x1 = len
compartment.diffLength = diffLength
assert( compartment.numDiffCompts == num )
# create molecules and reactions
a = moose.Pool( '/model/compartment/a' )
b = moose.Pool( '/model/compartment/b' )
s = moose.Pool( '/model/compartment/s' )
e1 = moose.MMenz( '/model/compartment/e1' )
e2 = moose.MMenz( '/model/compartment/e2' )
e3 = moose.MMenz( '/model/compartment/e3' )
r1 = moose.Reac( '/model/compartment/r1' )
moose.connect( e1, 'sub', s, 'reac' )
moose.connect( e1, 'prd', a, 'reac' )
moose.connect( a, 'nOut', e1, 'enzDest' )
e1.Km = 1
e1.kcat = 1
moose.connect( e2, 'sub', s, 'reac' )
moose.connect( e2, 'prd', b, 'reac' )
moose.connect( a, 'nOut', e2, 'enzDest' )
e2.Km = 1
e2.kcat = 0.5
moose.connect( e3, 'sub', a, 'reac' )
moose.connect( e3, 'prd', s, 'reac' )
moose.connect( b, 'nOut', e3, 'enzDest' )
e3.Km = 0.1
e3.kcat = 1
moose.connect( r1, 'sub', b, 'reac' )
moose.connect( r1, 'prd', s, 'reac' )
r1.Kf = 0.3 # 1/sec
r1.Kb = 0 # 1/sec
# Assign parameters
a.diffConst = diffConst/10
b.diffConst = diffConst
s.diffConst = 0
# Make solvers
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
dsolve = moose.Dsolve( '/model/dsolve' )
# Set up clocks. The dsolver to know before assigning stoich
moose.setClock( 4, dt4 )
moose.setClock( 5, dt5 )
moose.useClock( 4, '/model/dsolve', 'process' )
# Ksolve must be scheduled after dsolve.
moose.useClock( 5, '/model/compartment/ksolve', 'process' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = compartment
stoich.ksolve = ksolve
stoich.dsolve = dsolve
stoich.path = "/model/compartment/##"
assert( dsolve.numPools == 3 )
a.vec.concInit = [0.1]*num
a.vec[0].concInit *= 1.2 # slight perturbation at one end.
b.vec.concInit = [0.1]*num
s.vec.concInit = [1]*num
def displayPlots():
a = moose.element( '/model/compartment/a' )
b = moose.element( '/model/compartment/b' )
pos = numpy.arange( 0, a.vec.conc.size, 1 )
pylab.plot( pos, a.vec.conc, label='a' )
pylab.plot( pos, b.vec.conc, label='b' )
pylab.legend()
pylab.show()
def main():
runtime = 400
displayInterval = 2
makeModel()
dsolve = moose.element( '/model/dsolve' )
moose.reinit()
#moose.start( runtime ) # Run the model for 10 seconds.
a = moose.element( '/model/compartment/a' )
b = moose.element( '/model/compartment/b' )
s = moose.element( '/model/compartment/s' )
img = mpimg.imread( 'turingPatternTut.png' )
#imgplot = plt.imshow( img )
#plt.show()
plt.ion()
fig = plt.figure( figsize=(12,10) )
png = fig.add_subplot(211)
imgplot = plt.imshow( img )
ax = fig.add_subplot(212)
ax.set_ylim( 0, 0.5 )
plt.ylabel( 'Conc (mM)' )
plt.xlabel( 'Position along cylinder (microns)' )
pos = numpy.arange( 0, a.vec.conc.size, 1 )
line1, = ax.plot( pos, a.vec.conc, label='a' )
line2, = ax.plot( pos, b.vec.conc, label='b' )
timeLabel = plt.text(60, 0.4, 'time = 0')
plt.legend()
fig.canvas.draw()
for t in range( displayInterval, runtime, displayInterval ):
moose.start( displayInterval )
line1.set_ydata( a.vec.conc )
line2.set_ydata( b.vec.conc )
timeLabel.set_text( "time = %d" % t )
fig.canvas.draw()
print( "Hit 'enter' to exit" )
raw_input()
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
|
dilawar/moose-full
|
moose-examples/tutorials/ChemicalOscillators/TuringOneDim.py
|
Python
|
gpl-2.0
| 5,380
|
[
"MOOSE"
] |
244771bfe757d1d1485c6428252be4340a4550b73462de7f405a7430c242f0ef
|
from numpy import *
import inspect
from operator import itemgetter
def procdf(function,allvars,procaxis,dimidx=0):
iterate = False
if (dimidx <= len(shape(allvars[0]))):
if (procaxis[dimidx] == False):
iterate = True
if iterate:
#procaxis[dimidx] == False !!!
#so the first item shape of each variable allvars is allvars[0].shape[0]
dataout = []
for axidx in range(allvars[0].shape[0]):
allvarsin = []
for evar in allvars:
allvarsin.append(evar[axidx])
dtotmp, outcoords = procdf(function,allvarsin,procaxis,dimidx=dimidx+1)
dataout.append(dtotmp)
else:
allvarstemp = []
for evar in allvars:
# workaround for memory leak when just running 'dataouttemp = function(allvars)' in case of netcdf file references
if type(evar) != ndarray:
allvarstemp.append(evar)
else:
allvarstemp.append(array(evar))
# end workaround
# # if only one variable, then don't pass it as a list
# if len(allvarstemp == 1):
# allvarstemp = allvarstemp[0]
dataouttemp = function(allvarstemp)
if (type(dataouttemp).__name__ == 'list'):
dataout = dataouttemp[0]
outcoords = dataouttemp[1]
# vardimsout...
else:
dataout = dataouttemp
shapeout = shape(dataout)
# print 'shapeout', shapeout
outcoords = []
for eshape in shapeout:
# <False> means that the dimension given by the output of the
# function is not specified. Afterwards, it should be checked
# whether this dimension is the same as the input dimension
# If so, this should become None
outcoords.append(False)
# print 'outcoords', outcoords
return dataout, outcoords
def multifunc(function,allvars,procaxis,vardims):
"""
function: the function that has to be applied on the variable
procaxis:
Description: indicates for each variable (ranked from 0 to ... ) that
have to be processed
Dimensions: the amount of variables occuring in any variable:
Example: (False, True, True, False, ...).
vardims:
Description: list of 'pointers' to dimensions for each variable array
Dimensions: (amount of variables, dimensions of that variable) ,
so the second dimension is not fixed for each row
Example: [[3, 1 , 2 , 4 , ...],
[2, 3 , 4 , 6 , ...],
....
]
"""
# add dimensions to variables (i.e. expand array)
#in case they are missing (tbi: make this unnecessary)
dimcount = len(procaxis)
for ivar,evar in enumerate(allvars):
for idim in reversed(range(len(vardims[ivar]))): #reversed(range(dimcount)):
if vardims[ivar][idim] == None: # vardims[ivar]:
# if a function has to be applied on that dimension,
#only add an 'axis' for that dimension
if procaxis[ivar] == True:
# print 1,ivar, allvars[ivar].shape
allvars[ivar] = allvars[ivar][newaxis,:]
# print ivar, allvars[ivar].shape
# else: length of dimension is
# considered the same for all variables!
# so multiple copies are made from the dimension
# of an arbitrary other variable
# (we just take the first that has this variable)
else:
ldone = False
for ivar2,evar2 in enumerate(allvars):
if idim in vardims[ivar2]:
if ldone == False:
# print 2,ivar, allvars[ivar].shape
# dimension length: evar2.shape[vardims[ivar2].index(idim)
allvars[ivar] = repeat(allvars[ivar][newaxis,:],evar2.shape[vardims[ivar2].index(idim)],0)
# print ivar, allvars[ivar].shape
ldone = True
vardims[ivar][idim] = -1
for idim2 in range(len(vardims[ivar])): #reversed(range(dimcount)):
if vardims[ivar][idim2] != None:
vardims[ivar][idim2] = vardims[ivar][idim2] + 1
# put dimensions of all variables arrays in the same order
# as the standard order (which is implicitely given by vardims)
# for ivar,evar in enumerate(allvars):
# print evar.shape, vardims[ivar]
for ivar,evar in enumerate(allvars):
allvars[ivar] = transpose(evar,vardims[ivar])
# for ivar,evar in enumerate(allvars):
# print 'shape before transposition:',evar.shape, vardims[ivar]
# print 'procaxis:', procaxis
# if a certain dimension has
for idim in range(len(procaxis)):
# get maximum dimension
maxdim = 0
for ivar,evar in enumerate(allvars):
maxdim = max(shape(evar)[idim],maxdim)
for ivar,evar in enumerate(allvars):
if ((shape(evar)[idim] == 1) & (maxdim > 1)):
# print maxdim,idim
allvars[ivar] = repeat(allvars[ivar],maxdim,idim)
# tmpdim = range(len(procaxis))
# tmpdim[idim] = 0
# print 'tmpdim 1', tmpdim
# for idim2 in range(idim):
# tmpdim[idim2] = tmpdim[idim2] + 1
# print 'tmpdim', tmpdim
# allvars[ivar] = transpose(allvars[ivar],tmpdim)
# allvars[ivar] = repeat(allvars[ivar][:],maxdim,0)
# inv = range(len(tmpdim))
# for idim2, edim2 in enumerate(tmpdim):
# inv[edim2] = idim2
# allvars[ivar] = transpose(allvars[ivar],inv)
# for ivar,evar in enumerate(allvars):
# print 'shape after expansion:',evar.shape, vardims[ivar]
# print 'procaxis:', procaxis
# now transpose all matrices in a similar way in such a way that the dimensions
# to be processed are at the end
refsorted = sorted(zip(procaxis,range(dimcount)), key=itemgetter(0,1))
trns = [] # the transpose indices
trnsprocaxis = [] # procaxis after transpose
#print('data processing started')
for irefsorted,erefsorted in enumerate(refsorted):
trns.append(erefsorted[1])
trnsprocaxis.append(erefsorted[0])
for ivar,evar in enumerate(allvars):
allvars[ivar] = transpose(evar,trns)
# ...
# for ivar,evar in enumerate(allvars):
# print 'shape after transposition:',evar.shape
dataouttrns,outcoordstrnstmp = procdf(function,allvars,trnsprocaxis)
# print 'dataouttrns',shape(dataouttrns)
#print('shapedataouttrns', shape(dataouttrns), outcoordstrnstmp)
#
# # in case the function introduces extra dimensions: Add these extra dimensions in front of the
# # processed dimensions (we accually put them at the end here but this doens't matter
# amount of processed input dimensions
procdim = len(where(array(trnsprocaxis) == True)[0])
# amount of non-processed input dimensions
nonprocdim = len(where(array(trnsprocaxis) == False)[0])
# amount of extra dimensions introduced by the function
extradims = len(shape(dataouttrns)) - len(trnsprocaxis)
# print('extradims',extradims)
trnsoutaxis = list(trnsprocaxis)
# from procdf, the extra dimension is assumed in front of the processed input dimensions
# (and after the previous extra dimensions). It is put in front of all output dimensions
# We put each extra dimension in front of all the others for the output
# so we raise each reference by the number of extra dimensions
for i in range(extradims):
for itrns, etrns in enumerate(trns):
trns[itrns] = trns[itrns]+1
for i in range(extradims):
trnsoutaxis.append(True)
# from procdf, the extra dimension is assumed in front of the processed input dimensions
# (and after the previous extra dimensions). It is put in front of all output dimensions
trns.insert(nonprocdim+i,i)
# Reorganize the output coordinate list, so that they
# are aligned with the standard dimension numbering. So add 'nones' where needed
outcoordstrns = []
iocttmp = 0
for i,etrnsoutaxis in enumerate(trnsoutaxis):
if etrnsoutaxis: # if the function is applied along the current dimension
outcoordstrns.append(outcoordstrnstmp[iocttmp])
iocttmp = iocttmp + 1
else:
# <None> means that the current dimension corresponds to the input data
# dimensions (is trivially none when this dimension isn't processed)
outcoordstrns.append(None)
#print('data processing ended')
# build the 'inverse permutation operator'
inv = range(len(trns))
for itrns, etrns in enumerate(trns):
inv[etrns] = itrns
# inverse permutation of the output data
# print dataout.shape, inv
dataout = transpose(dataouttrns,inv)
outcoords = []
# inverse permuation of the output coordinates
for iinv,einv in enumerate(inv):
outcoords.append(outcoordstrns[einv])
return dataout,outcoords
|
hendrikwout/sciproc
|
sciproc/scimulti.py
|
Python
|
gpl-3.0
| 9,619
|
[
"NetCDF"
] |
d987d959c49caa3572907ae3b50c6f313c2c427d2dfd259f92921436b84e93f9
|
from askbot.conf.settings_wrapper import settings
from askbot.conf.super_groups import LOGIN_USERS_COMMUNICATION
from askbot.deps import livesettings
from django.utils.translation import ugettext_lazy as _
EMAIL_TEXT = livesettings.ConfigurationGroup(
'EMAIL_TEXT',
_('Email template phrases'),
super_group=LOGIN_USERS_COMMUNICATION
)
settings.register(
livesettings.StringValue(
EMAIL_TEXT,
'EMAIL_TEXT_SHORT_WELCOME',
description = _('Short welcome message, for subject line'),
default = _('Welcome to {{ SITE_NAME }}!'),
help_text = _(
'<b>NOTE: All email text settings allow placeholders: {{ USER_NAME }}, {{ SITE_NAME }} and {{ SITE_LINK }}.</b>'
)
)
)
settings.register(
livesettings.LongStringValue(
EMAIL_TEXT,
'EMAIL_TEXT_LONG_WELCOME',
description = _('Longer welcome message, for email body'),
default = _('<p>Please visit {{ SITE_NAME }} at {{ SITE_LINK }}, we look forward to your posts.</p>'),
)
)
settings.register(
livesettings.LongStringValue(
EMAIL_TEXT,
'EMAIL_TEXT_FOOTER',
description=_('Email footer'),
default=_('<p>Sincerely,<br>{{ SITE_NAME }} Administrator</p>')
)
)
settings.register(
livesettings.LongStringValue(
EMAIL_TEXT,
'EMAIL_TEXT_BATCH_ALERT_HEADER',
description=_('Header for the batch email alerts'),
default=_("""<p>Dear {{ USER_NAME }},</p>
<p>{{ SITE_NAME }} has these updates, please have a look:</p>""")
)
)
|
knowledgepoint-devs/askbot-devel
|
askbot/conf/email_text.py
|
Python
|
gpl-3.0
| 1,588
|
[
"VisIt"
] |
41ab31ff780fc43ea9c4fc75947fa3945934100ae2713c4c0ccaa6c756a1e5b4
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from recipe_engine.types import freeze
DEPS = [
'build/adb',
'build/chromium',
'build/chromium_android',
'build/chromium_tests',
'build/test_utils',
'depot_tools/bot_update',
'depot_tools/gclient',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
]
REPO_URL = 'https://chromium.googlesource.com/chromium/src.git'
def _CreateTestSpec(name, perf_id, required_apks, num_device_shards=1,
num_host_shards=1, target_bits=64,
browser_name=None, remove_system_webview=False,
enable_platform_mode=False):
def _CreateShardTestSpec(name, perf_id, required_apks, num_device_shards,
num_host_shards, shard_index, target_bits,
browser_name, remove_system_webview,
enable_platform_mode):
spec = {
'perf_id': perf_id,
'required_apks': required_apks,
'bucket': 'chrome-perf',
'num_device_shards': num_device_shards,
'num_host_shards': num_host_shards,
'shard_index': shard_index,
'test_spec_file': 'chromium.perf.json',
'max_battery_temp': 350,
'known_devices_file': '.known_devices',
'browser_name': browser_name,
'remove_system_webview': remove_system_webview,
'enable_platform_mode': enable_platform_mode,
}
if target_bits == 32:
builder_name = 'Android Builder'
elif target_bits == 64:
builder_name = 'Android arm64 Builder'
spec['recipe_config'] = 'tests_arm64'
spec['path'] = lambda api: '%s/full-build-linux_%s.zip' % (
builder_name, api.properties['parent_revision'])
return spec
tester_spec = {}
for shard_index in xrange(num_host_shards):
builder_name = '%s (%d)' % (name, shard_index + 1)
tester_spec[builder_name] = _CreateShardTestSpec(
name, perf_id, required_apks, num_device_shards, num_host_shards,
shard_index, target_bits, browser_name, remove_system_webview,
enable_platform_mode)
return tester_spec
def _ChromiumPerfTesters():
testers = [
_CreateTestSpec('Android Galaxy S5 Perf', 'android-galaxy-s5',
required_apks=['ChromePublic.apk'], num_device_shards=7,
num_host_shards=3, target_bits=32, enable_platform_mode=True),
_CreateTestSpec('Android Nexus5 Perf', 'android-nexus5',
required_apks=['ChromePublic.apk'], num_device_shards=7,
num_host_shards=3, target_bits=32, enable_platform_mode=True),
_CreateTestSpec('Android Nexus5X Perf', 'android-nexus5X',
required_apks=['ChromePublic.apk'], num_device_shards=7,
num_host_shards=3, enable_platform_mode=True),
_CreateTestSpec('Android Nexus6 Perf', 'android-nexus6',
required_apks=['ChromePublic.apk'], num_device_shards=7,
num_host_shards=3, target_bits=32),
_CreateTestSpec('Android Nexus7v2 Perf', 'android-nexus7v2',
required_apks=['ChromePublic.apk'], num_device_shards=7,
num_host_shards=3, target_bits=32, enable_platform_mode=True),
_CreateTestSpec('Android Nexus9 Perf', 'android-nexus9',
required_apks=['ChromePublic.apk'], num_device_shards=7,
num_host_shards=3),
_CreateTestSpec('Android One Perf', 'android-one',
required_apks=['ChromePublic.apk'], num_device_shards=7,
num_host_shards=3, target_bits=32),
_CreateTestSpec('Android Nexus5X WebView Perf', 'android-webview-nexus5X',
required_apks=['SystemWebView.apk', 'SystemWebViewShell.apk'],
num_device_shards=7, num_host_shards=3, target_bits=64,
browser_name='android-webview', remove_system_webview=True),
_CreateTestSpec('Android Nexus6 WebView Perf', 'android-webview-nexus6',
required_apks=['SystemWebView.apk', 'SystemWebViewShell.apk'],
num_device_shards=6, num_host_shards=3, target_bits=32,
browser_name='android-webview', remove_system_webview=True),
]
master_spec = {}
for spec in testers:
master_spec.update(spec)
return master_spec
def _ChromiumPerfFyiTesters():
testers = [
_CreateTestSpec('Android Power Nexus 5X Perf', 'android-power-nexus-5x',
required_apks=['ChromePublic.apk'], num_device_shards=7,
num_host_shards=1),
]
master_spec = {}
for spec in testers:
master_spec.update(spec)
return master_spec
BUILDERS = freeze({
'chromium.perf': _ChromiumPerfTesters(),
'chromium.perf.fyi': _ChromiumPerfFyiTesters(),
})
def RunSteps(api):
mastername = api.properties['mastername']
buildername = api.properties['buildername']
# TODO(akuegel): Move the configs in builders.py in chromium_tests to this
# recipe, and get rid of duplications.
builder = dict(BUILDERS[mastername][buildername])
builder_config = builder.get('recipe_config', 'base_config')
kwargs = {
'REPO_NAME':'src',
'REPO_URL':REPO_URL,
'INTERNAL':False,
'BUILD_CONFIG':'Release',
'TARGET_PLATFORM':'android',
}
api.chromium_android.set_config(builder_config, **kwargs)
api.chromium.set_config(builder_config, **kwargs)
api.gclient.set_config('perf')
api.gclient.apply_config('android')
bot_update_step = api.bot_update.ensure_checkout()
test_spec_file = builder.get('test_spec_file')
test_spec = {}
if test_spec_file:
test_spec = api.chromium_tests.read_test_spec(api, test_spec_file)
scripts_compile_targets = \
api.chromium_tests.get_compile_targets_for_scripts()
builder['tests'] = api.chromium_tests.generate_tests_from_test_spec(
api, test_spec, builder, buildername, mastername, False, None,
scripts_compile_targets, [api.chromium_tests.steps.generate_script],
bot_update_step)
api.path['checkout'] = api.path['slave_build'].join('src')
api.chromium_android.clean_local_files()
# TODO(jbudorick): Remove this after resolving
# https://github.com/catapult-project/catapult/issues/2901
devil_path = api.path['checkout'].join('third_party', 'catapult', 'devil')
api.python.inline(
'initialize devil',
"""
import sys
sys.path.append(sys.argv[1])
from devil import devil_env
devil_env.config.Initialize()
devil_env.config.PrefetchPaths(dependencies=['adb'])
""",
args=[devil_path])
api.adb.set_adb_path(
devil_path.join('bin', 'deps', 'linux2', 'x86_64', 'bin', 'adb'))
api.chromium_android.download_build(bucket=builder['bucket'],
path=builder['path'](api))
api.chromium_android.common_tests_setup_steps(
perf_setup=True,
remove_system_webview=builder.get('remove_system_webview', False))
required_apks = builder.get('required_apks', [])
for apk in required_apks:
api.chromium_android.adb_install_apk(apk)
api.chromium_android.host_info(
args=api.chromium_tests.get_common_args_for_scripts())
test_runner = api.chromium_tests.create_test_runner(
api, builder.get('tests', []))
try:
failures = []
if test_runner:
try:
test_runner()
except api.step.StepFailure as f:
failures.append(f)
dynamic_perf_tests = api.chromium_tests.steps.DynamicPerfTests(
builder['perf_id'], 'android', None,
max_battery_temp=builder.get('max_battery_temp'),
num_device_shards=builder['num_device_shards'],
num_host_shards=builder.get('num_host_shards', 1),
shard_index=builder.get('shard_index', 0),
override_browser_name=builder.get('browser_name'),
enable_platform_mode=builder.get('enable_platform_mode'))
dynamic_perf_tests.run(api, None)
if failures:
raise api.step.StepFailure('src-side perf tests failed %s' % failures)
finally:
api.chromium_android.common_tests_final_steps(
logcat_gs_bucket='chromium-android')
def _sanitize_nonalpha(text):
return ''.join(c if c.isalnum() else '_' for c in text)
def GenTests(api):
for mastername, builders in BUILDERS.iteritems():
for buildername in builders:
yield (
api.test('full_%s_%s' % (_sanitize_nonalpha(mastername),
_sanitize_nonalpha(buildername))) +
api.properties.generic(
path_config='kitchen',
repo_name='src',
repo_url=REPO_URL,
mastername=mastername,
buildername=buildername,
parent_buildername='parent_buildername',
parent_buildnumber='1729',
parent_revision='deadbeef',
revision='deadbeef',
slavename='slavename',
target='Release'))
yield (api.test('provision_devices') +
api.properties.generic(
path_config='kitchen',
repo_name='src',
repo_url=REPO_URL,
mastername='chromium.perf',
buildername='Android Nexus5 Perf (1)',
parent_buildername='parent_buildername',
parent_buildnumber='1729',
parent_revision='deadbeef',
revision='deadbeef',
slavename='slavename',
target='Release')
+ api.step_data('provision_devices', retcode=1))
yield (api.test('get_perf_test_list_old_data') +
api.properties.generic(
path_config='kitchen',
repo_name='src',
repo_url=REPO_URL,
mastername='chromium.perf',
buildername='Android Nexus5 Perf (1)',
parent_buildername='parent_buildername',
parent_buildnumber='1729',
parent_revision='deadbeef',
revision='deadbeef',
slavename='slavename',
target='Release') +
api.override_step_data(
'get perf test list',
api.json.output(['perf_test.foo', 'page_cycler.foo'])))
yield (api.test('src_side_script_fails') +
api.properties.generic(
path_config='kitchen',
repo_name='src',
repo_url=REPO_URL,
mastername='chromium.perf',
buildername='Android Nexus5 Perf (1)',
parent_buildername='parent_buildername',
parent_buildnumber='1729',
parent_revision='deadbeef',
revision='deadbeef',
slavename='slavename',
target='Release') +
api.override_step_data(
'read test spec (chromium.perf.json)',
api.json.output({
"Android Nexus5 Perf (1)": {
"scripts": [
{
"name": "host_info",
"script": "host_info.py"
}]}})) +
api.step_data('host_info', retcode=1))
yield (api.test('test_failure') +
api.properties.generic(
path_config='kitchen',
repo_name='src',
repo_url=REPO_URL,
mastername='chromium.perf',
buildername='Android Nexus5 Perf (1)',
parent_buildername='parent_buildername',
parent_buildnumber='1729',
parent_revision='deadbeef',
revision='deadbeef',
slavename='slavename',
target='Release') +
api.override_step_data(
'perf_test.foo', retcode=1))
yield (api.test('missing_device') +
api.properties.generic(
path_config='kitchen',
repo_name='src',
repo_url=REPO_URL,
mastername='chromium.perf',
buildername='Android Nexus5 Perf (1)',
parent_buildername='parent_buildername',
parent_buildnumber='1729',
parent_revision='deadbeef',
revision='deadbeef',
slavename='slavename',
target='Release') +
api.override_step_data(
'perf_test.foo', retcode=87))
yield (api.test('host_info_failure') +
api.properties.generic(
path_config='kitchen',
repo_name='src',
repo_url=REPO_URL,
mastername='chromium.perf',
buildername='Android Nexus5 Perf (1)',
parent_buildername='parent_buildername',
parent_buildnumber='1729',
parent_revision='deadbeef',
revision='deadbeef',
slavename='slavename',
target='Release') +
api.override_step_data(
'Host Info',
api.json.output({
'valid': True,
'failures': ['Failure A', 'Failure B'],
'_host_info': {
'os_system': 'os_system',
'os_release': 'os_release',
'processor': 'processor',
'num_cpus': 'num_cpus',
'free_disk_space': 'free_disk_space',
'python_version': 'python_version',
'python_path': 'python_path',
'devices': [{
"usb_status": True,
"blacklisted": None,
"ro.build.fingerprint": "fingerprint",
"battery": {
"status": "5",
"scale": "100",
"temperature": "240",
"level": "100",
"technology": "Li-ion",
"AC powered": "false",
"health": "2",
"voltage": "4302",
"Wireless powered": "false",
"USB powered": "true",
"Max charging current": "500000",
"present": "true"
},
"adb_status": "device",
"imei_slice": "",
"ro.build.product": "bullhead",
"ro.build.id": "MDB08Q",
"serial": "00d0d567893340f4",
"wifi_ip": ""
}]
}}),
retcode=87))
|
geminy/aidear
|
oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/infra/recipes/android/perf.py
|
Python
|
gpl-3.0
| 14,074
|
[
"Galaxy"
] |
343abefa40aea95c2336c0373c797e1a94e90e17951b75f57ee39c24a87c5f32
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import os
import MDAnalysisTests
class TestRelativeImports(object):
"""Relative imports are banned in unit testing modules (Issue #189), so run tests to enforce this policy."""
path_to_testing_modules = MDAnalysisTests.__path__[0]
# Exclusion path relative to MDAnalysisTests
exclusions = ['/plugins']
@classmethod
def is_excluded(cls, path):
leaf = path[len(cls.path_to_testing_modules):]
return leaf in cls.exclusions
@staticmethod
def _run_test_relative_import(testing_module):
with open(testing_module, 'r') as test_module_file_object:
for lineno, line in enumerate(test_module_file_object, start=1):
if 'from .' in line and 'import' in line \
and not 'test_imports' in testing_module:
raise AssertionError(
"A relative import statement was found in "
"module {testing_module} at linenumber {lineno}.".format(**vars()))
def test_relative_imports(self):
for dirpath, dirnames, files in os.walk(self.path_to_testing_modules):
if self.is_excluded(dirpath):
continue
for f in filter(lambda x: x.endswith('.py'), files):
fpath = os.path.join(dirpath, f)
if self.is_excluded(fpath):
continue
yield self._run_test_relative_import, fpath
|
alejob/mdanalysis
|
testsuite/MDAnalysisTests/test_imports.py
|
Python
|
gpl-2.0
| 2,473
|
[
"MDAnalysis"
] |
8dfa2201985a147ddddd253dfc823d158db564fab169db0d2a34bc7a23197243
|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
import sys
from functools import partial
from PyQt5.Qt import (
QMainWindow, Qt, QApplication, pyqtSignal, QLabel, QIcon, QFormLayout, QSize,
QDialog, QSpinBox, QCheckBox, QDialogButtonBox, QToolButton, QMenu, QInputDialog)
from calibre.gui2 import error_dialog
from calibre.gui2.tweak_book import actions, tprefs, editors
from calibre.gui2.tweak_book.editor.canvas import Canvas
class ResizeDialog(QDialog): # {{{
def __init__(self, width, height, parent=None):
QDialog.__init__(self, parent)
self.l = l = QFormLayout(self)
self.setLayout(l)
self.aspect_ratio = width / float(height)
l.addRow(QLabel(_('Choose the new width and height')))
self._width = w = QSpinBox(self)
w.setMinimum(1)
w.setMaximum(10 * width)
w.setValue(width)
w.setSuffix(' px')
l.addRow(_('&Width:'), w)
self._height = h = QSpinBox(self)
h.setMinimum(1)
h.setMaximum(10 * height)
h.setValue(height)
h.setSuffix(' px')
l.addRow(_('&Height:'), h)
w.valueChanged.connect(partial(self.keep_ar, 'width'))
h.valueChanged.connect(partial(self.keep_ar, 'height'))
self.ar = ar = QCheckBox(_('Keep &aspect ratio'))
ar.setChecked(True)
l.addRow(ar)
self.resize(self.sizeHint())
self.bb = bb = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
bb.accepted.connect(self.accept)
bb.rejected.connect(self.reject)
l.addRow(bb)
def keep_ar(self, which):
if self.ar.isChecked():
val = getattr(self, which)
oval = val / self.aspect_ratio if which == 'width' else val * self.aspect_ratio
other = getattr(self, '_height' if which == 'width' else '_width')
other.blockSignals(True)
other.setValue(oval)
other.blockSignals(False)
@dynamic_property
def width(self):
def fget(self):
return self._width.value()
def fset(self, val):
self._width.setValue(val)
return property(fget=fget, fset=fset)
@dynamic_property
def height(self):
def fget(self):
return self._height.value()
def fset(self, val):
self._height.setValue(val)
return property(fget=fget, fset=fset)
# }}}
class Editor(QMainWindow):
has_line_numbers = False
modification_state_changed = pyqtSignal(object)
undo_redo_state_changed = pyqtSignal(object, object)
data_changed = pyqtSignal(object)
cursor_position_changed = pyqtSignal() # dummy
copy_available_state_changed = pyqtSignal(object)
def __init__(self, syntax, parent=None):
QMainWindow.__init__(self, parent)
if parent is None:
self.setWindowFlags(Qt.Widget)
self.is_synced_to_container = False
self.syntax = syntax
self._is_modified = False
self.copy_available = self.cut_available = False
self.quality = 90
self.canvas = Canvas(self)
self.setCentralWidget(self.canvas)
self.create_toolbars()
self.canvas.image_changed.connect(self.image_changed)
self.canvas.undo_redo_state_changed.connect(self.undo_redo_state_changed)
self.canvas.selection_state_changed.connect(self.update_clipboard_actions)
@dynamic_property
def is_modified(self):
def fget(self):
return self._is_modified
def fset(self, val):
self._is_modified = val
self.modification_state_changed.emit(val)
return property(fget=fget, fset=fset)
@dynamic_property
def current_editing_state(self):
def fget(self):
return {}
def fset(self, val):
pass
return property(fget=fget, fset=fset)
@property
def undo_available(self):
return self.canvas.undo_action.isEnabled()
@property
def redo_available(self):
return self.canvas.redo_action.isEnabled()
@dynamic_property
def current_line(self):
def fget(self):
return 0
def fset(self, val):
pass
return property(fget=fget, fset=fset)
@property
def number_of_lines(self):
return 0
def pretty_print(self, name):
return False
def change_document_name(self, newname):
pass
def get_raw_data(self):
return self.canvas.get_image_data(quality=self.quality)
@dynamic_property
def data(self):
def fget(self):
return self.get_raw_data()
def fset(self, val):
self.canvas.load_image(val)
return property(fget=fget, fset=fset)
def replace_data(self, raw, only_if_different=True):
# We ignore only_if_different as it is useless in our case, and
# there is no easy way to check two images for equality
self.data = raw
def apply_settings(self, prefs=None, dictionaries_changed=False):
pass
def go_to_line(self, *args, **kwargs):
pass
def save_state(self):
for bar in self.bars:
if bar.isFloating():
return
tprefs['image-editor-state'] = bytearray(self.saveState())
def restore_state(self):
state = tprefs.get('image-editor-state', None)
if state is not None:
self.restoreState(state)
def set_focus(self):
self.canvas.setFocus(Qt.OtherFocusReason)
def undo(self):
self.canvas.undo_action.trigger()
def redo(self):
self.canvas.redo_action.trigger()
def copy(self):
self.canvas.copy()
def cut(self):
return error_dialog(self, _('Not allowed'), _(
'Cutting of images is not allowed. If you want to delete the image, use'
' the files browser to do it.'), show=True)
def paste(self):
self.canvas.paste()
# Search and replace {{{
def mark_selected_text(self, *args, **kwargs):
pass
def find(self, *args, **kwargs):
return False
def replace(self, *args, **kwargs):
return False
def all_in_marked(self, *args, **kwargs):
return 0
@property
def selected_text(self):
return ''
# }}}
def image_changed(self, new_image):
self.is_synced_to_container = False
self._is_modified = True
self.copy_available = self.canvas.is_valid
self.copy_available_state_changed.emit(self.copy_available)
self.data_changed.emit(self)
self.modification_state_changed.emit(True)
self.fmt_label.setText(' ' + (self.canvas.original_image_format or '').upper())
im = self.canvas.current_image
self.size_label.setText('{0} x {1}{2}'.format(im.width(), im.height(), ' px'))
def break_cycles(self):
self.canvas.break_cycles()
self.canvas.image_changed.disconnect()
self.canvas.undo_redo_state_changed.disconnect()
self.canvas.selection_state_changed.disconnect()
self.modification_state_changed.disconnect()
self.undo_redo_state_changed.disconnect()
self.data_changed.disconnect()
self.cursor_position_changed.disconnect()
self.copy_available_state_changed.disconnect()
def contextMenuEvent(self, ev):
ev.ignore()
def create_toolbars(self):
self.action_bar = b = self.addToolBar(_('File actions tool bar'))
b.setObjectName('action_bar') # Needed for saveState
for x in ('undo', 'redo'):
b.addAction(getattr(self.canvas, '%s_action' % x))
self.edit_bar = b = self.addToolBar(_('Edit actions tool bar'))
b.setObjectName('edit-actions-bar')
for x in ('copy', 'paste'):
ac = actions['editor-%s' % x]
setattr(self, 'action_' + x, b.addAction(ac.icon(), x, getattr(self, x)))
self.update_clipboard_actions()
b.addSeparator()
self.action_trim = ac = b.addAction(QIcon(I('trim.png')), _('Trim image'), self.canvas.trim_image)
self.action_rotate = ac = b.addAction(QIcon(I('rotate-right.png')), _('Rotate image'), self.canvas.rotate_image)
self.action_resize = ac = b.addAction(QIcon(I('resize.png')), _('Resize image'), self.resize_image)
b.addSeparator()
self.action_filters = ac = b.addAction(QIcon(I('filter.png')), _('Image filters'))
b.widgetForAction(ac).setPopupMode(QToolButton.InstantPopup)
self.filters_menu = m = QMenu()
ac.setMenu(m)
m.addAction(_('Auto-trim image'), self.canvas.autotrim_image)
m.addAction(_('Sharpen image'), self.sharpen_image)
m.addAction(_('Blur image'), self.blur_image)
m.addAction(_('De-speckle image'), self.canvas.despeckle_image)
self.info_bar = b = self.addToolBar(_('Image information bar'))
b.setObjectName('image_info_bar')
self.fmt_label = QLabel('')
b.addWidget(self.fmt_label)
b.addSeparator()
self.size_label = QLabel('')
b.addWidget(self.size_label)
self.bars = [self.action_bar, self.edit_bar, self.info_bar]
for x in self.bars:
x.setFloatable(False)
x.topLevelChanged.connect(self.toolbar_floated)
x.setIconSize(QSize(tprefs['toolbar_icon_size'], tprefs['toolbar_icon_size']))
self.restore_state()
def toolbar_floated(self, floating):
if not floating:
self.save_state()
for ed in editors.itervalues():
if ed is not self:
ed.restore_state()
def update_clipboard_actions(self, *args):
if self.canvas.has_selection:
self.action_copy.setText(_('Copy selected region'))
self.action_paste.setText(_('Paste into selected region'))
else:
self.action_copy.setText(_('Copy image'))
self.action_paste.setText(_('Paste image'))
def resize_image(self):
im = self.canvas.current_image
d = ResizeDialog(im.width(), im.height(), self)
if d.exec_() == d.Accepted:
self.canvas.resize_image(d.width, d.height)
def sharpen_image(self):
val, ok = QInputDialog.getInt(self, _('Sharpen image'), _(
'The standard deviation for the Gaussian sharpen operation (higher means more sharpening)'), value=3, min=1, max=20)
if ok:
self.canvas.sharpen_image(sigma=val)
def blur_image(self):
val, ok = QInputDialog.getInt(self, _('Blur image'), _(
'The standard deviation for the Gaussian blur operation (higher means more blurring)'), value=3, min=1, max=20)
if ok:
self.canvas.blur_image(sigma=val)
def launch_editor(path_to_edit, path_is_raw=False):
app = QApplication([])
if path_is_raw:
raw = path_to_edit
else:
with open(path_to_edit, 'rb') as f:
raw = f.read()
t = Editor('raster_image')
t.data = raw
t.show()
app.exec_()
if __name__ == '__main__':
launch_editor(sys.argv[-1])
|
jeanlinux/calibre
|
src/calibre/gui2/tweak_book/editor/image.py
|
Python
|
gpl-3.0
| 11,317
|
[
"Gaussian"
] |
13df68ea54bc5a84fe1ddd0a18b1d136b980a8c2f4d8b4959598443039edb923
|
#
# mainTab
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'experiment'
tab.settings['Output file name'] = 'fpsq.exp'
#
# Handle the special case of the first scenario
#
self.notebook.switchScenario(0,scenarioType="Single crystal")
#
#
#
tab = self.notebook.mainTab
tab.settings['Program'] = 'experiment'
tab.settings['Output file name'] = 'fpsq.exp'
tab.settings['Excel file name'] = ''
tab.settings['Script file name'] = 'script.py'
tab.settings['QM program'] = ''
tab.settings['Hessian symmetrisation'] = 'symm'
#
#
tab = self.notebook.settingsTab
tab.settings['Eckart flag'] = True
tab.settings['Neutral Born charges'] = False
tab.settings['Sigma value'] = 5
tab.settings['Mass definition'] = 'average'
tab.settings['Optical permittivity edited'] = False
tab.sigmas_cm1 = [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5]
#
#
tab = self.notebook.scenarios[0]
tab.settings['Legend'] = 'c | y theta = 30'
tab.settings['Scenario type'] = 'Single crystal'
tab.settings['Unique direction - h'] = 1
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 0
tab.settings['Azimuthal angle'] = 0.0
tab.settings['Angle of incidence'] = 30.0
tab.settings['Superstrate dielectric'] = 1.0
tab.settings['Substrate dielectric'] = 1.0
tab.settings['Superstrate depth'] = 999.0
tab.settings['Substrate depth'] = 999.0
tab.settings['Film thickness'] = 100.0
tab.settings['Mode'] = 'Thick slab'
tab.settings['Frequency units'] = 'wavenumber'
#
#
self.notebook.addScenario(scenarioType="Single crystal")
tab = self.notebook.scenarios[1]
tab.settings['Legend'] = 'c | x theta = 30'
tab.settings['Scenario type'] = 'Single crystal'
tab.settings['Unique direction - h'] = 1
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 0
tab.settings['Azimuthal angle'] = 90.0
tab.settings['Angle of incidence'] = 30.0
tab.settings['Superstrate dielectric'] = 1.0
tab.settings['Substrate dielectric'] = 1.0
tab.settings['Superstrate depth'] = 999.0
tab.settings['Substrate depth'] = 999.0
tab.settings['Film thickness'] = 100.0
tab.settings['Mode'] = 'Thick slab'
tab.settings['Frequency units'] = 'wavenumber'
#
#
self.notebook.addScenario(scenarioType="Single crystal")
tab = self.notebook.scenarios[2]
tab.settings['Legend'] = 'c | y theta = 60'
tab.settings['Scenario type'] = 'Single crystal'
tab.settings['Unique direction - h'] = 1
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 0
tab.settings['Azimuthal angle'] = 0.0
tab.settings['Angle of incidence'] = 60.0
tab.settings['Superstrate dielectric'] = 1.0
tab.settings['Substrate dielectric'] = 1.0
tab.settings['Superstrate depth'] = 999.0
tab.settings['Substrate depth'] = 999.0
tab.settings['Film thickness'] = 100.0
tab.settings['Mode'] = 'Thick slab'
tab.settings['Frequency units'] = 'wavenumber'
#
#
self.notebook.addScenario(scenarioType="Single crystal")
tab = self.notebook.scenarios[3]
tab.settings['Legend'] = 'c | x theta = 60'
tab.settings['Scenario type'] = 'Single crystal'
tab.settings['Unique direction - h'] = 1
tab.settings['Unique direction - k'] = 0
tab.settings['Unique direction - l'] = 0
tab.settings['Azimuthal angle'] = 90.0
tab.settings['Angle of incidence'] = 60.0
tab.settings['Superstrate dielectric'] = 1.0
tab.settings['Substrate dielectric'] = 1.0
tab.settings['Superstrate depth'] = 999.0
tab.settings['Substrate depth'] = 999.0
tab.settings['Film thickness'] = 100.0
tab.settings['Mode'] = 'Thick slab'
tab.settings['Frequency units'] = 'wavenumber'
#
#
tab = self.notebook.analysisTab
tab.settings['Minimum frequency'] = -1
tab.settings['Maximum frequency'] = 400
tab.settings['title'] = 'Analysis'
tab.settings['Covalent radius scaling'] = 1.1
tab.settings['Bonding tolerance'] = 0.1
tab.settings['Bar width'] = 0.5
#
#
tab = self.notebook.viewerTab
tab.settings['Atom scaling'] = 0.5
tab.settings['Maximum displacement'] = 1.0
tab.settings['Bond colour'] = [80, 80, 80, 255]
tab.settings['Bond radius'] = 0.1
tab.settings['Cell colour'] = [255, 0, 0, 255]
tab.settings['Cell radius'] = 0.1
tab.settings['Background colour'] = [120, 120, 120, 255]
tab.settings['Arrow colour'] = [0, 255, 0, 255]
tab.settings['Arrow radius'] = 0.07
tab.settings['Number of phase steps'] = 41
tab.settings['Super Cell'] = [1, 1, 1]
#
#
tab = self.notebook.fitterTab
tab.settings['Excel file name'] = ''
tab.settings['Plot title'] = 'Experimental and Calculated Spectral Comparison'
tab.settings['Fitting type'] = 'Minimise x-correlation'
tab.settings['Number of iterations'] = 20
tab.settings['Frequency scaling factor'] = 1.0
tab.settings['Optimise frequency scaling'] = False
tab.settings['Spectrum scaling'] = False
tab.settings['Spectrum scaling factor'] = 1.0
tab.settings['Independent y-axes'] = True
tab.settings['Spectral difference threshold'] = 0.05
tab.settings['HPFilter lambda'] = 7.0
tab.settings['Baseline removal'] = False
tab.settings['Scenario index'] = 0
#
#
tab = self.notebook.plottingTab
tab.settings['Minimum frequency'] = 350
tab.settings['Maximum frequency'] = 1400
tab.settings['Frequency increment'] = 0.2
tab.settings['Molar definition'] = 'Unit cells'
tab.settings['Number of atoms'] = 1
tab.settings['Plot type'] = 'Crystal Reflectance (P polarisation)'
tab.settings['Plot title'] = ''
tab.settings['concentration'] = 13.743950329674377
|
JohnKendrick/PDielec
|
Examples/Experiment/fpsq/script.py
|
Python
|
mit
| 5,416
|
[
"CRYSTAL"
] |
045f233d626fecfff365a807285e886154b41df4cc71f674c5dab1c27df1ea1a
|
# -*- coding: utf-8 -*-
#
# conncomp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Create two populations of pyramidal cells and two populations of interneurons
-----------------------------------------------------------------------------
Create two populations of pyramidal cells and two populations of interneurons
on a 30x30 grid. Connect with two projections, one pyr->pyr, one pyr->in, and
visualize.
BCCN Tutorial @ CNS*09
Hans Ekkehard Plesser, UMB
"""
import nest
import matplotlib.pyplot as plt
import numpy as np
nest.ResetKernel()
nest.set_verbosity('M_WARNING')
nest.CopyModel('iaf_psc_alpha', 'pyr')
nest.CopyModel('iaf_psc_alpha', 'in')
#########################################################################
# same positions for all populations
pos = nest.spatial.grid(shape=[30, 30], extent=[3., 3.])
a_pyr = nest.Create('pyr', positions=pos)
a_in = nest.Create('in', positions=pos)
b_pyr = nest.Create('pyr', positions=pos)
b_in = nest.Create('in', positions=pos)
nest.Connect(a_pyr, b_pyr, {'rule': 'pairwise_bernoulli',
'p': 0.5,
'mask': {'circular': {'radius': 0.5}}})
nest.Connect(a_pyr, b_in, {'rule': 'pairwise_bernoulli',
'p': 0.2,
'mask': {'circular': {'radius': 1.}}})
plt.clf()
######################################################
# plot targets of neurons in different grid locations
# obtain node id for center: pick first node of composite
ctr_index = 30 * 15 + 15
ctr_id = a_pyr[ctr_index:ctr_index + 1]
# get all projection targets of center neuron
conn = nest.GetConnections(ctr_id)
tgts = conn.get('target')
tpyr = nest.GetTargetPositions(ctr_id, b_pyr)[0]
tin = nest.GetTargetPositions(ctr_id, b_in)[0]
tpyr_x = np.array([x for x, y in tpyr])
tpyr_y = np.array([y for x, y in tpyr])
tin_x = np.array([x for x, y in tin])
tin_y = np.array([y for x, y in tin])
# scatter-plot
plt.scatter(tpyr_x - 0.02, tpyr_y - 0.02, 20, 'b', zorder=10)
plt.scatter(tin_x + 0.02, tin_y + 0.02, 20, 'r', zorder=10)
# mark locations with background grey circle
plt.plot(tpyr_x, tpyr_y, 'o', markerfacecolor=(0.7, 0.7, 0.7),
markersize=10, markeredgewidth=0, zorder=1, label='_nolegend_')
plt.plot(tin_x, tin_y, 'o', markerfacecolor=(0.7, 0.7, 0.7),
markersize=10, markeredgewidth=0, zorder=1, label='_nolegend_')
# mark sender position with transparent red circle
ctrpos = nest.GetPosition(ctr_id)
plt.gca().add_patch(plt.Circle(ctrpos, radius=0.15, zorder=99,
fc='r', alpha=0.4, ec='none'))
# mark mask positions with open red/blue circles
plt.gca().add_patch(plt.Circle(ctrpos, radius=0.5, zorder=2,
fc='none', ec='b', lw=3))
plt.gca().add_patch(plt.Circle(ctrpos, radius=1.0, zorder=2,
fc='none', ec='r', lw=3))
# mark layer edge
plt.gca().add_patch(plt.Rectangle((-1.5, -1.5), 3.0, 3.0, zorder=1,
fc='none', ec='k', lw=3))
# beautify
plt.axes().set_xticks(np.arange(-1.5, 1.55, 0.5))
plt.axes().set_yticks(np.arange(-1.5, 1.55, 0.5))
plt.grid(True)
plt.axis([-1.6, 1.6, -1.6, 1.6])
plt.axes().set_aspect('equal', 'box')
plt.show()
|
sdiazpier/nest-simulator
|
pynest/examples/spatial/conncomp.py
|
Python
|
gpl-2.0
| 3,879
|
[
"NEURON"
] |
d67bc5b0c0260e2b4290211f5e904b065854cee1278a95ea6fafca8b93b203d4
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class P4est(AutotoolsPackage):
"""Dynamic management of a collection (a forest) of adaptive octrees in
parallel"""
homepage = "http://www.p4est.org"
url = "http://p4est.github.io/release/p4est-1.1.tar.gz"
maintainers = ['davydden']
version('2.0', 'c522c5b69896aab39aa5a81399372a19a6b03fc6200d2d5d677d9a22fe31029a')
version('1.1', '37ba7f4410958cfb38a2140339dbf64f')
# build dependencies
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool@2.4.2:', type='build')
# other dependencies
depends_on('mpi')
depends_on('zlib')
def configure_args(self):
return [
'--enable-mpi',
'--enable-shared',
'--disable-vtk-binary',
'--without-blas',
'CPPFLAGS=-DSC_LOG_PRIORITY=SC_LP_ESSENTIAL',
'CFLAGS=-O2',
'CC=%s' % self.spec['mpi'].mpicc,
'CXX=%s' % self.spec['mpi'].mpicxx,
'FC=%s' % self.spec['mpi'].mpifc,
'F77=%s' % self.spec['mpi'].mpif77
]
|
skosukhin/spack
|
var/spack/repos/builtin/packages/p4est/package.py
|
Python
|
lgpl-2.1
| 2,340
|
[
"VTK"
] |
49c8a49006ad3bff43f555fd32608e11fdbf410cfa4af62e52e82581d0fda895
|
# Copyright (c) 2013-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2013-2014 Google, Inc.
# Copyright (c) 2014-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2014 Vlad Temian <vladtemian@gmail.com>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Cezar <celnazli@bitdefender.com>
# Copyright (c) 2015 Chris Rebert <code@rebertia.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016 Jared Garst <cultofjared@gmail.com>
# Copyright (c) 2017 Renat Galimov <renat2017@gmail.com>
# Copyright (c) 2017 Martin <MartinBasti@users.noreply.github.com>
# Copyright (c) 2017 Christopher Zurcher <zurcher@users.noreply.github.com>
# Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 Banjamin Freeman <befreeman@users.noreply.github.com>
# Copyright (c) 2018 Ioana Tagirta <ioana.tagirta@gmail.com>
# Copyright (c) 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2019 Julien Palard <julien@palard.fr>
# Copyright (c) 2019 laike9m <laike9m@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Robert Schweizer <robert_schweizer@gmx.de>
# Copyright (c) 2019 fadedDexofan <fadedDexofan@gmail.com>
# Copyright (c) 2020 Sorin Sbarnea <ssbarnea@redhat.com>
# Copyright (c) 2020 Federico Bond <federicobond@gmail.com>
# Copyright (c) 2020 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2020 谭九鼎 <109224573@qq.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2021 Daniël van Noord <13665637+DanielNoord@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Yilei "Dolee" Yang <yileiyang@google.com>
# Copyright (c) 2021 Matus Valo <matusvalo@users.noreply.github.com>
# Copyright (c) 2021 victor <16359131+jiajunsu@users.noreply.github.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""Checkers for various standard library functions."""
import sys
from collections.abc import Iterable
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set
import astroid
from astroid import nodes
from pylint import interfaces
from pylint.checkers import BaseChecker, DeprecatedMixin, utils
from pylint.interfaces import IAstroidChecker
if TYPE_CHECKING:
from pylint.lint import PyLinter
OPEN_FILES_MODE = ("open", "file")
OPEN_FILES_ENCODING = ("open", "read_text", "write_text")
UNITTEST_CASE = "unittest.case"
THREADING_THREAD = "threading.Thread"
COPY_COPY = "copy.copy"
OS_ENVIRON = "os._Environ"
ENV_GETTERS = ("os.getenv",)
SUBPROCESS_POPEN = "subprocess.Popen"
SUBPROCESS_RUN = "subprocess.run"
OPEN_MODULE = {"_io", "pathlib"}
DEBUG_BREAKPOINTS = ("builtins.breakpoint", "sys.breakpointhook", "pdb.set_trace")
LRU_CACHE = {
"functools.lru_cache", # Inferred for @lru_cache
"functools._lru_cache_wrapper.wrapper", # Inferred for @lru_cache() on >= Python 3.8
"functools.lru_cache.decorating_function", # Inferred for @lru_cache() on <= Python 3.7
}
NON_INSTANCE_METHODS = {"builtins.staticmethod", "builtins.classmethod"}
DEPRECATED_MODULES = {
(0, 0, 0): {"tkinter.tix", "fpectl"},
(3, 2, 0): {"optparse"},
(3, 3, 0): {"xml.etree.cElementTree"},
(3, 4, 0): {"imp"},
(3, 5, 0): {"formatter"},
(3, 6, 0): {"asynchat", "asyncore"},
(3, 7, 0): {"macpath"},
(3, 9, 0): {"lib2to3", "parser", "symbol", "binhex"},
(3, 10, 0): {"distutils"},
}
DEPRECATED_ARGUMENTS = {
(0, 0, 0): {
"int": ((None, "x"),),
"bool": ((None, "x"),),
"float": ((None, "x"),),
},
(3, 8, 0): {
"asyncio.tasks.sleep": ((None, "loop"),),
"asyncio.tasks.gather": ((None, "loop"),),
"asyncio.tasks.shield": ((None, "loop"),),
"asyncio.tasks.wait_for": ((None, "loop"),),
"asyncio.tasks.wait": ((None, "loop"),),
"asyncio.tasks.as_completed": ((None, "loop"),),
"asyncio.subprocess.create_subprocess_exec": ((None, "loop"),),
"asyncio.subprocess.create_subprocess_shell": ((4, "loop"),),
"gettext.translation": ((5, "codeset"),),
"gettext.install": ((2, "codeset"),),
"functools.partialmethod": ((None, "func"),),
"weakref.finalize": ((None, "func"), (None, "obj")),
"profile.Profile.runcall": ((None, "func"),),
"cProfile.Profile.runcall": ((None, "func"),),
"bdb.Bdb.runcall": ((None, "func"),),
"trace.Trace.runfunc": ((None, "func"),),
"curses.wrapper": ((None, "func"),),
"unittest.case.TestCase.addCleanup": ((None, "function"),),
"concurrent.futures.thread.ThreadPoolExecutor.submit": ((None, "fn"),),
"concurrent.futures.process.ProcessPoolExecutor.submit": ((None, "fn"),),
"contextlib._BaseExitStack.callback": ((None, "callback"),),
"contextlib.AsyncExitStack.push_async_callback": ((None, "callback"),),
"multiprocessing.managers.Server.create": ((None, "c"), (None, "typeid")),
"multiprocessing.managers.SharedMemoryServer.create": (
(None, "c"),
(None, "typeid"),
),
},
(3, 9, 0): {"random.Random.shuffle": ((1, "random"),)},
}
DEPRECATED_DECORATORS = {
(3, 8, 0): {"asyncio.coroutine"},
(3, 3, 0): {
"abc.abstractclassmethod",
"abc.abstractstaticmethod",
"abc.abstractproperty",
},
(3, 4, 0): {"importlib.util.module_for_loader"},
}
DEPRECATED_METHODS: Dict = {
0: {
"cgi.parse_qs",
"cgi.parse_qsl",
"ctypes.c_buffer",
"distutils.command.register.register.check_metadata",
"distutils.command.sdist.sdist.check_metadata",
"tkinter.Misc.tk_menuBar",
"tkinter.Menu.tk_bindForTraversal",
},
2: {
(2, 6, 0): {
"commands.getstatus",
"os.popen2",
"os.popen3",
"os.popen4",
"macostools.touched",
},
(2, 7, 0): {
"unittest.case.TestCase.assertEquals",
"unittest.case.TestCase.assertNotEquals",
"unittest.case.TestCase.assertAlmostEquals",
"unittest.case.TestCase.assertNotAlmostEquals",
"unittest.case.TestCase.assert_",
"xml.etree.ElementTree.Element.getchildren",
"xml.etree.ElementTree.Element.getiterator",
"xml.etree.ElementTree.XMLParser.getiterator",
"xml.etree.ElementTree.XMLParser.doctype",
},
},
3: {
(3, 0, 0): {
"inspect.getargspec",
"failUnlessEqual",
"assertEquals",
"failIfEqual",
"assertNotEquals",
"failUnlessAlmostEqual",
"assertAlmostEquals",
"failIfAlmostEqual",
"assertNotAlmostEquals",
"failUnless",
"assert_",
"failUnlessRaises",
"failIf",
"assertRaisesRegexp",
"assertRegexpMatches",
"assertNotRegexpMatches",
},
(3, 1, 0): {
"base64.encodestring",
"base64.decodestring",
"ntpath.splitunc",
"os.path.splitunc",
"os.stat_float_times",
},
(3, 2, 0): {
"cgi.escape",
"configparser.RawConfigParser.readfp",
"xml.etree.ElementTree.Element.getchildren",
"xml.etree.ElementTree.Element.getiterator",
"xml.etree.ElementTree.XMLParser.getiterator",
"xml.etree.ElementTree.XMLParser.doctype",
},
(3, 3, 0): {
"inspect.getmoduleinfo",
"logging.warn",
"logging.Logger.warn",
"logging.LoggerAdapter.warn",
"nntplib._NNTPBase.xpath",
"platform.popen",
"sqlite3.OptimizedUnicode",
"time.clock",
},
(3, 4, 0): {
"importlib.find_loader",
"importlib.abc.Loader.load_module",
"importlib.abc.Loader.module_repr",
"importlib.abc.PathEntryFinder.find_loader",
"importlib.abc.PathEntryFinder.find_module",
"plistlib.readPlist",
"plistlib.writePlist",
"plistlib.readPlistFromBytes",
"plistlib.writePlistToBytes",
},
(3, 4, 4): {"asyncio.tasks.async"},
(3, 5, 0): {
"fractions.gcd",
"inspect.formatargspec",
"inspect.getcallargs",
"platform.linux_distribution",
"platform.dist",
},
(3, 6, 0): {
"importlib._bootstrap_external.FileLoader.load_module",
"_ssl.RAND_pseudo_bytes",
},
(3, 7, 0): {
"sys.set_coroutine_wrapper",
"sys.get_coroutine_wrapper",
"aifc.openfp",
"threading.Thread.isAlive",
"asyncio.Task.current_task",
"asyncio.Task.all_task",
"locale.format",
"ssl.wrap_socket",
"ssl.match_hostname",
"sunau.openfp",
"wave.openfp",
},
(3, 8, 0): {
"gettext.lgettext",
"gettext.ldgettext",
"gettext.lngettext",
"gettext.ldngettext",
"gettext.bind_textdomain_codeset",
"gettext.NullTranslations.output_charset",
"gettext.NullTranslations.set_output_charset",
"threading.Thread.isAlive",
},
(3, 9, 0): {
"binascii.b2a_hqx",
"binascii.a2b_hqx",
"binascii.rlecode_hqx",
"binascii.rledecode_hqx",
},
(3, 10, 0): {
"_sqlite3.enable_shared_cache",
"importlib.abc.Finder.find_module",
"pathlib.Path.link_to",
"zipimport.zipimporter.load_module",
"zipimport.zipimporter.find_module",
"zipimport.zipimporter.find_loader",
"threading.currentThread",
"threading.activeCount",
"threading.Condition.notifyAll",
"threading.Event.isSet",
"threading.Thread.setName",
"threading.Thread.getName",
"threading.Thread.isDaemon",
"threading.Thread.setDaemon",
"cgi.log",
},
},
}
DEPRECATED_CLASSES = {
(3, 3, 0): {
"importlib.abc": {
"Finder",
},
"pkgutil": {
"ImpImporter",
"ImpLoader",
},
"collections": {
"Awaitable",
"Coroutine",
"AsyncIterable",
"AsyncIterator",
"AsyncGenerator",
"Hashable",
"Iterable",
"Iterator",
"Generator",
"Reversible",
"Sized",
"Container",
"Callable",
"Collection",
"Set",
"MutableSet",
"Mapping",
"MutableMapping",
"MappingView",
"KeysView",
"ItemsView",
"ValuesView",
"Sequence",
"MutableSequence",
"ByteString",
},
},
(3, 9, 0): {
"smtpd": {
"MailmanProxy",
}
},
}
def _check_mode_str(mode):
# check type
if not isinstance(mode, str):
return False
# check syntax
modes = set(mode)
_mode = "rwatb+Ux"
creating = "x" in modes
if modes - set(_mode) or len(mode) > len(modes):
return False
# check logic
reading = "r" in modes
writing = "w" in modes
appending = "a" in modes
text = "t" in modes
binary = "b" in modes
if "U" in modes:
if writing or appending or creating:
return False
reading = True
if text and binary:
return False
total = reading + writing + appending + creating
if total > 1:
return False
if not (reading or writing or appending or creating):
return False
return True
class StdlibChecker(DeprecatedMixin, BaseChecker):
__implements__ = (IAstroidChecker,)
name = "stdlib"
msgs = {
"W1501": (
'"%s" is not a valid mode for open.',
"bad-open-mode",
"Python supports: r, w, a[, x] modes with b, +, "
"and U (only with r) options. "
"See https://docs.python.org/2/library/functions.html#open",
),
"W1502": (
"Using datetime.time in a boolean context.",
"boolean-datetime",
"Using datetime.time in a boolean context can hide "
"subtle bugs when the time they represent matches "
"midnight UTC. This behaviour was fixed in Python 3.5. "
"See https://bugs.python.org/issue13936 for reference.",
{"maxversion": (3, 5)},
),
"W1503": (
"Redundant use of %s with constant value %r",
"redundant-unittest-assert",
"The first argument of assertTrue and assertFalse is "
"a condition. If a constant is passed as parameter, that "
"condition will be always true. In this case a warning "
"should be emitted.",
),
"W1505": (
"Using deprecated method %s()",
"deprecated-method",
"The method is marked as deprecated and will be removed in "
"a future version of Python. Consider looking for an "
"alternative in the documentation.",
),
"W1506": (
"threading.Thread needs the target function",
"bad-thread-instantiation",
"The warning is emitted when a threading.Thread class "
"is instantiated without the target function being passed. "
"By default, the first parameter is the group param, not the target param.",
),
"W1507": (
"Using copy.copy(os.environ). Use os.environ.copy() instead. ",
"shallow-copy-environ",
"os.environ is not a dict object but proxy object, so "
"shallow copy has still effects on original object. "
"See https://bugs.python.org/issue15373 for reference.",
),
"E1507": (
"%s does not support %s type argument",
"invalid-envvar-value",
"Env manipulation functions support only string type arguments. "
"See https://docs.python.org/3/library/os.html#os.getenv.",
),
"W1508": (
"%s default type is %s. Expected str or None.",
"invalid-envvar-default",
"Env manipulation functions return None or str values. "
"Supplying anything different as a default may cause bugs. "
"See https://docs.python.org/3/library/os.html#os.getenv.",
),
"W1509": (
"Using preexec_fn keyword which may be unsafe in the presence "
"of threads",
"subprocess-popen-preexec-fn",
"The preexec_fn parameter is not safe to use in the presence "
"of threads in your application. The child process could "
"deadlock before exec is called. If you must use it, keep it "
"trivial! Minimize the number of libraries you call into."
"https://docs.python.org/3/library/subprocess.html#popen-constructor",
),
"W1510": (
"Using subprocess.run without explicitly set `check` is not recommended.",
"subprocess-run-check",
"The check parameter should always be used with explicitly set "
"`check` keyword to make clear what the error-handling behavior is."
"https://docs.python.org/3/library/subprocess.html#subprocess.run",
),
"W1511": (
"Using deprecated argument %s of method %s()",
"deprecated-argument",
"The argument is marked as deprecated and will be removed in the future.",
),
"W1512": (
"Using deprecated class %s of module %s",
"deprecated-class",
"The class is marked as deprecated and will be removed in the future.",
),
"W1513": (
"Using deprecated decorator %s()",
"deprecated-decorator",
"The decorator is marked as deprecated and will be removed in the future.",
),
"W1514": (
"Using open without explicitly specifying an encoding",
"unspecified-encoding",
"It is better to specify an encoding when opening documents. "
"Using the system default implicitly can create problems on other operating systems. "
"See https://www.python.org/dev/peps/pep-0597/",
),
"W1515": (
"Leaving functions creating breakpoints in production code is not recommended",
"forgotten-debug-statement",
"Calls to breakpoint(), sys.breakpointhook() and pdb.set_trace() should be removed "
"from code that is not actively being debugged.",
),
"W1516": (
"'lru_cache' without 'maxsize' will keep all method args alive indefinitely, including 'self'",
"lru-cache-decorating-method",
"By decorating a method with lru_cache the 'self' argument will be linked to "
"the lru_cache function and therefore never garbage collected. Unless your instance "
"will never need to be garbage collected (singleton) it is recommended to refactor "
"code to avoid this pattern or add a maxsize to the cache.",
),
}
def __init__(self, linter: Optional["PyLinter"] = None) -> None:
BaseChecker.__init__(self, linter)
self._deprecated_methods: Set[Any] = set()
self._deprecated_methods.update(DEPRECATED_METHODS[0])
for since_vers, func_list in DEPRECATED_METHODS[sys.version_info[0]].items():
if since_vers <= sys.version_info:
self._deprecated_methods.update(func_list)
self._deprecated_attributes = {}
for since_vers, func_list in DEPRECATED_ARGUMENTS.items():
if since_vers <= sys.version_info:
self._deprecated_attributes.update(func_list)
self._deprecated_classes = {}
for since_vers, class_list in DEPRECATED_CLASSES.items():
if since_vers <= sys.version_info:
self._deprecated_classes.update(class_list)
self._deprecated_modules = set()
for since_vers, mod_list in DEPRECATED_MODULES.items():
if since_vers <= sys.version_info:
self._deprecated_modules.update(mod_list)
self._deprecated_decorators = set()
for since_vers, decorator_list in DEPRECATED_DECORATORS.items():
if since_vers <= sys.version_info:
self._deprecated_decorators.update(decorator_list)
def _check_bad_thread_instantiation(self, node):
if not node.kwargs and not node.keywords and len(node.args) <= 1:
self.add_message("bad-thread-instantiation", node=node)
def _check_for_preexec_fn_in_popen(self, node):
if node.keywords:
for keyword in node.keywords:
if keyword.arg == "preexec_fn":
self.add_message("subprocess-popen-preexec-fn", node=node)
def _check_for_check_kw_in_run(self, node):
kwargs = {keyword.arg for keyword in (node.keywords or ())}
if "check" not in kwargs:
self.add_message("subprocess-run-check", node=node)
def _check_shallow_copy_environ(self, node: nodes.Call) -> None:
arg = utils.get_argument_from_call(node, position=0)
try:
inferred_args = arg.inferred()
except astroid.InferenceError:
return
for inferred in inferred_args:
if inferred.qname() == OS_ENVIRON:
self.add_message("shallow-copy-environ", node=node)
break
@utils.check_messages(
"bad-open-mode",
"redundant-unittest-assert",
"deprecated-method",
"deprecated-argument",
"bad-thread-instantiation",
"shallow-copy-environ",
"invalid-envvar-value",
"invalid-envvar-default",
"subprocess-popen-preexec-fn",
"subprocess-run-check",
"deprecated-class",
"unspecified-encoding",
"forgotten-debug-statement",
)
def visit_call(self, node: nodes.Call) -> None:
"""Visit a Call node."""
self.check_deprecated_class_in_call(node)
for inferred in utils.infer_all(node.func):
if inferred is astroid.Uninferable:
continue
if inferred.root().name in OPEN_MODULE:
if (
isinstance(node.func, nodes.Name)
and node.func.name in OPEN_FILES_MODE
):
self._check_open_mode(node)
if (
isinstance(node.func, nodes.Name)
and node.func.name in OPEN_FILES_ENCODING
or isinstance(node.func, nodes.Attribute)
and node.func.attrname in OPEN_FILES_ENCODING
):
self._check_open_encoded(node, inferred.root().name)
elif inferred.root().name == UNITTEST_CASE:
self._check_redundant_assert(node, inferred)
elif isinstance(inferred, nodes.ClassDef):
if inferred.qname() == THREADING_THREAD:
self._check_bad_thread_instantiation(node)
elif inferred.qname() == SUBPROCESS_POPEN:
self._check_for_preexec_fn_in_popen(node)
elif isinstance(inferred, nodes.FunctionDef):
name = inferred.qname()
if name == COPY_COPY:
self._check_shallow_copy_environ(node)
elif name in ENV_GETTERS:
self._check_env_function(node, inferred)
elif name == SUBPROCESS_RUN:
self._check_for_check_kw_in_run(node)
elif name in DEBUG_BREAKPOINTS:
self.add_message("forgotten-debug-statement", node=node)
self.check_deprecated_method(node, inferred)
@utils.check_messages("boolean-datetime")
def visit_unaryop(self, node: nodes.UnaryOp) -> None:
if node.op == "not":
self._check_datetime(node.operand)
@utils.check_messages("boolean-datetime")
def visit_if(self, node: nodes.If) -> None:
self._check_datetime(node.test)
@utils.check_messages("boolean-datetime")
def visit_ifexp(self, node: nodes.IfExp) -> None:
self._check_datetime(node.test)
@utils.check_messages("boolean-datetime")
def visit_boolop(self, node: nodes.BoolOp) -> None:
for value in node.values:
self._check_datetime(value)
@utils.check_messages("lru-cache-decorating-method")
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
if node.decorators and isinstance(node.parent, nodes.ClassDef):
self._check_lru_cache_decorators(node.decorators)
def _check_lru_cache_decorators(self, decorators: nodes.Decorators) -> None:
"""Check if instance methods are decorated with functools.lru_cache."""
lru_cache_nodes: List[nodes.NodeNG] = []
for d_node in decorators.nodes:
try:
for infered_node in d_node.infer():
q_name = infered_node.qname()
if q_name in NON_INSTANCE_METHODS:
return
if q_name not in LRU_CACHE:
return
# Check if there is a maxsize argument to the call
if isinstance(d_node, nodes.Call):
try:
utils.get_argument_from_call(
d_node, position=0, keyword="maxsize"
)
return
except utils.NoSuchArgumentError:
pass
lru_cache_nodes.append(d_node)
break
except astroid.InferenceError:
pass
for lru_cache_node in lru_cache_nodes:
self.add_message(
"lru-cache-decorating-method",
node=lru_cache_node,
confidence=interfaces.INFERENCE,
)
def _check_redundant_assert(self, node, infer):
if (
isinstance(infer, astroid.BoundMethod)
and node.args
and isinstance(node.args[0], nodes.Const)
and infer.name in {"assertTrue", "assertFalse"}
):
self.add_message(
"redundant-unittest-assert",
args=(infer.name, node.args[0].value),
node=node,
)
def _check_datetime(self, node):
"""Check that a datetime was inferred.
If so, emit boolean-datetime warning.
"""
try:
inferred = next(node.infer())
except astroid.InferenceError:
return
if (
isinstance(inferred, astroid.Instance)
and inferred.qname() == "datetime.time"
):
self.add_message("boolean-datetime", node=node)
def _check_open_mode(self, node):
"""Check that the mode argument of an open or file call is valid."""
try:
mode_arg = utils.get_argument_from_call(node, position=1, keyword="mode")
except utils.NoSuchArgumentError:
return
if mode_arg:
mode_arg = utils.safe_infer(mode_arg)
if isinstance(mode_arg, nodes.Const) and not _check_mode_str(
mode_arg.value
):
self.add_message(
"bad-open-mode",
node=node,
args=mode_arg.value or str(mode_arg.value),
)
def _check_open_encoded(self, node: nodes.Call, open_module: str) -> None:
"""Check that the encoded argument of an open call is valid."""
mode_arg = None
try:
if open_module == "_io":
mode_arg = utils.get_argument_from_call(
node, position=1, keyword="mode"
)
elif open_module == "pathlib":
mode_arg = utils.get_argument_from_call(
node, position=0, keyword="mode"
)
except utils.NoSuchArgumentError:
pass
if mode_arg:
mode_arg = utils.safe_infer(mode_arg)
if (
not mode_arg
or isinstance(mode_arg, nodes.Const)
and (not mode_arg.value or "b" not in mode_arg.value)
):
encoding_arg = None
try:
if open_module == "pathlib":
if node.func.attrname == "read_text":
encoding_arg = utils.get_argument_from_call(
node, position=0, keyword="encoding"
)
elif node.func.attrname == "write_text":
encoding_arg = utils.get_argument_from_call(
node, position=1, keyword="encoding"
)
else:
encoding_arg = utils.get_argument_from_call(
node, position=2, keyword="encoding"
)
else:
encoding_arg = utils.get_argument_from_call(
node, position=3, keyword="encoding"
)
except utils.NoSuchArgumentError:
self.add_message("unspecified-encoding", node=node)
if encoding_arg:
encoding_arg = utils.safe_infer(encoding_arg)
if isinstance(encoding_arg, nodes.Const) and encoding_arg.value is None:
self.add_message("unspecified-encoding", node=node)
def _check_env_function(self, node, infer):
env_name_kwarg = "key"
env_value_kwarg = "default"
if node.keywords:
kwargs = {keyword.arg: keyword.value for keyword in node.keywords}
else:
kwargs = None
if node.args:
env_name_arg = node.args[0]
elif kwargs and env_name_kwarg in kwargs:
env_name_arg = kwargs[env_name_kwarg]
else:
env_name_arg = None
if env_name_arg:
self._check_invalid_envvar_value(
node=node,
message="invalid-envvar-value",
call_arg=utils.safe_infer(env_name_arg),
infer=infer,
allow_none=False,
)
if len(node.args) == 2:
env_value_arg = node.args[1]
elif kwargs and env_value_kwarg in kwargs:
env_value_arg = kwargs[env_value_kwarg]
else:
env_value_arg = None
if env_value_arg:
self._check_invalid_envvar_value(
node=node,
infer=infer,
message="invalid-envvar-default",
call_arg=utils.safe_infer(env_value_arg),
allow_none=True,
)
def _check_invalid_envvar_value(self, node, infer, message, call_arg, allow_none):
if call_arg in (astroid.Uninferable, None):
return
name = infer.qname()
if isinstance(call_arg, nodes.Const):
emit = False
if call_arg.value is None:
emit = not allow_none
elif not isinstance(call_arg.value, str):
emit = True
if emit:
self.add_message(message, node=node, args=(name, call_arg.pytype()))
else:
self.add_message(message, node=node, args=(name, call_arg.pytype()))
def deprecated_modules(self):
"""Callback returning the deprecated modules."""
return self._deprecated_modules
def deprecated_methods(self):
return self._deprecated_methods
def deprecated_arguments(self, method: str):
return self._deprecated_attributes.get(method, ())
def deprecated_classes(self, module: str):
return self._deprecated_classes.get(module, ())
def deprecated_decorators(self) -> Iterable:
return self._deprecated_decorators
def register(linter: "PyLinter") -> None:
linter.register_checker(StdlibChecker(linter))
|
PyCQA/pylint
|
pylint/checkers/stdlib.py
|
Python
|
gpl-2.0
| 30,910
|
[
"VisIt"
] |
12ac907bc8bbc2c4ab418fb104a92a734de8e3938c8cffd88f501c353791bae0
|
""" :mod: DMSRequestOperationsBase
====================
Just a collector of common functions
"""
__RCSID__ = "$Id $"
from DIRAC import S_OK, S_ERROR
from DIRAC.RequestManagementSystem.Client.Operation import Operation
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getRegistrationProtocols
from DIRAC.RequestManagementSystem.Client.File import File
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
class DMSRequestOperationsBase( OperationHandlerBase ):
def __init__( self, operation = None, csPath = None ):
OperationHandlerBase.__init__( self, operation, csPath )
self.registrationProtocols = getRegistrationProtocols()
def checkSEsRSS( self, checkSEs = None, access = 'WriteAccess' ):
""" check SEs.
By default, we check the SEs for WriteAccess, but it is configurable
"""
if not checkSEs:
checkSEs = self.operation.targetSEList
elif type( checkSEs ) == str:
checkSEs = [checkSEs]
if access == 'ReadAccess':
seType = 'sourceSE'
else:
seType = 'targetSE'
bannedSEs = []
for checkSE in checkSEs:
seStatus = self.rssSEStatus( checkSE, access, retries = 5 )
if not seStatus["OK"]:
self.log.error( 'Failed to get SE status', seStatus["Message"] )
error = "unknown %s: %s" % ( seType, checkSE )
for opFile in self.operation:
opFile.Error = error
self.operation.Error = error
return S_ERROR( error )
if not seStatus["Value"]:
self.log.info( "%s %s is banned for %s right now" % ( seType.capitalize(), checkSE, access ) )
bannedSEs.append( checkSE )
self.operation.Error = "banned %s: %s;" % ( seType, checkSE )
return S_OK( bannedSEs )
def getRegisterOperation( self, opFile, targetSE, type = 'RegisterFile', catalog = None ):
""" add RegisterReplica operation for file
:param File opFile: operation file
:param str targetSE: target SE
"""
# # add RegisterReplica operation
registerOperation = Operation()
registerOperation.Type = type
registerOperation.TargetSE = targetSE
if catalog:
registerOperation.Catalog = catalog
registerFile = File()
registerFile.LFN = opFile.LFN
registerFile.PFN = StorageElement( targetSE ).getURL( opFile.LFN, protocol = self.registrationProtocols ).get( 'Value', {} ).get( 'Successful', {} ).get( opFile.LFN )
registerFile.GUID = opFile.GUID
registerFile.Checksum = opFile.Checksum
registerFile.ChecksumType = opFile.ChecksumType
registerFile.Size = opFile.Size
registerOperation.addFile( registerFile )
return registerOperation
|
fibbo/DIRAC
|
DataManagementSystem/Agent/RequestOperations/DMSRequestOperationsBase.py
|
Python
|
gpl-3.0
| 2,815
|
[
"DIRAC"
] |
134759014fd88e28b0defea24b045d45e16403ab82971a0ec7ec128b97c01289
|
# coding: utf-8
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from pypln.backend.workers import Tokenizer
from utils import TaskTest
class TestTokenizerWorker(TaskTest):
def test_tokenizer_should_receive_text_and_return_tokens(self):
doc = {'text': 'The sky is blue, the sun is yellow. This is another sentence.'}
expected_tokens = ['The', 'sky', 'is', 'blue', ',', 'the', 'sun', 'is',
'yellow', '.', 'This', 'is', 'another', 'sentence', '.']
expected_sentences = [['The', 'sky', 'is', 'blue', ',', 'the', 'sun',
'is', 'yellow', '.'], ['This', 'is', 'another', 'sentence', '.']]
doc_id = self.collection.insert(doc, w=1)
Tokenizer().delay(doc_id)
refreshed_document = self.collection.find_one({'_id': doc_id})
tokens = refreshed_document['tokens']
sentences = refreshed_document['sentences']
self.assertEqual(tokens, expected_tokens)
self.assertEqual(sentences, expected_sentences)
|
NAMD/pypln.backend
|
tests/test_worker_tokenizer.py
|
Python
|
gpl-3.0
| 1,683
|
[
"NAMD"
] |
13136765f43f86c2ecd2fbf3ae8f01c1219f98195a2b094b66ae43daae3ed11a
|
"""Testing the IQ stanzas.
IQ stanzas are described at http://xmpp.org/rfcs/rfc6120.html#stanzas-semantics-iq
type attribute must be one of get, set, result, or error.
id attribute is mandatory.
The exciting parts are in the <query> sub element.
Example:
<iq from="alice@wonderland.lit/pda"
id="ld823itz"
to="alice@wonderland.lit"
type="get">
<query xmlns="jabber:iq:roster"/>
</iq>
iq = Iq(from="alice@wonderland.lit/pda", to="alice@wonderland.lit",
type="get", query="jabber:iq:roster")
FIXME: that is likely wrong.
IQ is covered in section 6, Exchanging IQ Stanzas, in RFC 6121.
"""
import chupycabra
from chupycabra import NS_AGENT, NS_AGENTS, NS_AUTH, NS_OOB, NS_REGISTER, NS_XROSTER, \
NS_TIME, NS_VERSION, NS_PASS, NS_BROWSE, NS_LAST, NS_PRIVACY, NS_ROSTER
import pytest
iq_namespaces = [NS_AGENT, NS_AGENTS, NS_AUTH, NS_OOB, NS_REGISTER, NS_XROSTER,
NS_TIME, NS_VERSION, NS_PASS, NS_BROWSE, NS_LAST, NS_PRIVACY]
query_payload = """<query xmlns='jabber:iq:roster'>
<item jid='juliet@example.com'
name='Juliet'
subscription='both'>
<group>Friends</group>
</item>
<item jid='benvolio@example.org'
name='Benvolio'
subscription='to'/>
<item jid='mercutio@example.org'
name='Mercutio'
subscription='from'/>
</query>
"""
def test_create_iq():
iq = chupycabra.Iq()
assert iq.__str__() == '<iq />'
@pytest.mark.parametrize('iq_namespace', iq_namespaces)
def test_iq_create_query_namepace(iq_namespace):
"""This just returns the namespace text."""
iq = chupycabra.Iq(type='get', query=iq_namespace)
assert iq.getQuery() == iq_namespace
def test_iq_create_queryPayload():
"""I think setQueryPayload is broken. It's always making a new query element, not modifying it.
The example client doesn't modify it, so I think it's always been broken.
>>> otherquery = chupycabra.Iq(type='result')
>>> otherquery
<iq type='result' />
>>> otherquery.setQueryPayload(query_payload)
>>> otherquery
<iq type='result'><query><query xmlns = 'jabber:iq:roster' >
<item jid='juliet@example.com' name='Juliet' subscription='both'>
<group>Friends</group>
</item>
<item jid='benvolio@example.org' name='Benvolio' subscription='to' />
<item jid='mercutio@example.org' name='Mercutio' subscription='from' />
</query></query></iq>
"""
iq = chupycabra.Iq(type='result')
iq.setQuery(NS_ROSTER)
iq.setQueryPayload(query_payload)
def test_iq_create_queryNode():
pass
def test_iq_create_roster_result():
"""I'd think this would work, but there are some bugs to fix. I need to
look more in the xmlstream code.
>>> q = chupycabra.Iq(to='romeo@example.net/orchard', type='result')
>>> q.setQuery(chupycabra.NS_ROSTER)
<xmlstream.Node instance at 0x7f99e0268368>
>>> q.setQueryPayload(query_payload)
>>> q
<iq to='romeo@example.net/orchard' type='result'><query xmlns = 'jabber:iq:roster' ><query>
<item jid='juliet@example.com' name='Juliet' subscription='both'>
<group>Friends</group>
</item>
<item jid='benvolio@example.org' name='Benvolio' subscription='to' />
<item jid='mercutio@example.org' name='Mercutio' subscription='from' />
</query></query></iq>
"""
pass
|
andrewdied/chupycabra
|
tests/test_iq.py
|
Python
|
lgpl-3.0
| 3,602
|
[
"exciting"
] |
9947c702a98453fb13d04a09295a6e13a6b722ed2ab6cbd2c50773caa86d296f
|
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (gregory_r_warnes@groton.pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Client.py,v 1.16 2004/02/18 04:05:59 warnes Exp $'
from version import __version__
#import xml.sax
import urllib
from types import *
import re
import base64
# SOAPpy modules
from Errors import *
from Config import Config
from Parser import parseSOAPRPC
from SOAPBuilder import buildSOAP
from Utilities import *
from Types import faultType, simplify
################################################################################
# Client
################################################################################
def SOAPUserAgent():
return "SOAPpy " + __version__ + " (pywebsvcs.sf.net)"
class SOAPAddress:
def __init__(self, url, config = Config):
proto, uri = urllib.splittype(url)
# apply some defaults
if uri[0:2] != '//':
if proto != None:
uri = proto + ':' + uri
uri = '//' + uri
proto = 'http'
host, path = urllib.splithost(uri)
try:
int(host)
host = 'localhost:' + host
except:
pass
if not path:
path = '/'
if proto not in ('http', 'https', 'httpg'):
raise IOError, "unsupported SOAP protocol"
if proto == 'httpg' and not config.GSIclient:
raise AttributeError, \
"GSI client not supported by this Python installation"
if proto == 'https' and not config.SSLclient:
raise AttributeError, \
"SSL client not supported by this Python installation"
self.user,host = urllib.splituser(host)
self.proto = proto
self.host = host
self.path = path
def __str__(self):
return "%(proto)s://%(host)s%(path)s" % self.__dict__
__repr__ = __str__
class HTTPTransport:
def getNS(self, original_namespace, data):
"""Extract the (possibly extended) namespace from the returned
SOAP message."""
if type(original_namespace) == StringType:
pattern="xmlns:\w+=['\"](" + original_namespace + "[^'\"]*)['\"]"
match = re.search(pattern, data)
if match:
return match.group(1)
else:
return original_namespace
else:
return original_namespace
# Need a Timeout someday?
def call(self, addr, data, namespace, soapaction = None, encoding = None,
http_proxy = None, config = Config):
import httplib
if not isinstance(addr, SOAPAddress):
addr = SOAPAddress(addr, config)
# Build a request
if http_proxy:
real_addr = http_proxy
real_path = addr.proto + "://" + addr.host + addr.path
else:
real_addr = addr.host
real_path = addr.path
if addr.proto == 'httpg':
from pyGlobus.io import CHANNEL_MODE, DELEGATION_MODE, GSIHTTP
CHANNEL_MODE = 1
DELEGATION_MODE = 1
r = GSIHTTP(real_addr)
elif addr.proto == 'https':
r = httplib.HTTPS(real_addr)
else:
r = httplib.HTTP(real_addr)
r.putrequest("POST", real_path)
r.putheader("Host", addr.host)
r.putheader("User-agent", SOAPUserAgent())
t = 'text/xml';
if encoding != None:
t += '; charset="%s"' % encoding
r.putheader("Content-type", t)
r.putheader("Content-length", str(len(data)))
# if user is not a user:passwd format
# we'll receive a failure from the server. . .I guess (??)
if addr.user != None:
val = base64.encodestring(addr.user)
r.putheader('Authorization','Basic ' + val.replace('\012',''))
# This fixes sending either "" or "None"
if soapaction == None or len(soapaction) == 0:
r.putheader("SOAPAction", "")
else:
r.putheader("SOAPAction", '"%s"' % soapaction)
if config.dumpHeadersOut:
s = 'Outgoing HTTP headers'
debugHeader(s)
print "POST %s %s" % (real_path, r._http_vsn_str)
print "Host:", addr.host
print "User-agent: SOAPpy " + __version__ + " (http://pywebsvcs.sf.net)"
print "Content-type:", t
print "Content-length:", len(data)
print 'SOAPAction: "%s"' % soapaction
debugFooter(s)
r.endheaders()
if config.dumpSOAPOut:
s = 'Outgoing SOAP'
debugHeader(s)
print data,
if data[-1] != '\n':
print
debugFooter(s)
# send the payload
r.send(data)
# read response line
code, msg, headers = r.getreply()
content_type = headers.get("content-type","text/xml")
content_length = headers.get("Content-length")
if content_length == None:
# No Content-Length provided; just read the whole socket
# This won't work with HTTP/1.1 chunked encoding
data = r.getfile().read()
message_len = len(data)
else:
message_len = int(content_length)
data = r.getfile().read(message_len)
if(config.debug):
print "code=",code
print "msg=", msg
print "headers=", headers
print "content-type=", content_type
print "data=", data
if config.dumpHeadersIn:
s = 'Incoming HTTP headers'
debugHeader(s)
if headers.headers:
print "HTTP/1.? %d %s" % (code, msg)
print "\n".join(map (lambda x: x.strip(), headers.headers))
else:
print "HTTP/0.9 %d %s" % (code, msg)
debugFooter(s)
def startswith(string, val):
return string[0:len(val)] == val
if code == 500 and not \
( startswith(content_type, "text/xml") and message_len > 0 ):
raise HTTPError(code, msg)
if config.dumpSOAPIn:
s = 'Incoming SOAP'
debugHeader(s)
print data,
if (len(data)>0) and (data[-1] != '\n'):
print
debugFooter(s)
if code not in (200, 500):
raise HTTPError(code, msg)
# get the new namespace
if namespace is None:
new_ns = None
else:
new_ns = self.getNS(namespace, data)
# return response payload
return data, new_ns
################################################################################
# SOAP Proxy
################################################################################
class SOAPProxy:
def __init__(self, proxy, namespace = None, soapaction = None,
header = None, methodattrs = None, transport = HTTPTransport,
encoding = 'UTF-8', throw_faults = 1, unwrap_results = None,
http_proxy=None, config = Config, noroot = 0,
simplify_objects=None):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
# get default values for unwrap_results and simplify_objects
# from config
if unwrap_results is None:
self.unwrap_results=config.unwrap_results
else:
self.unwrap_results=unwrap_results
if simplify_objects is None:
self.simplify_objects=config.simplify_objects
else:
self.simplify_objects=simplify_objects
self.proxy = SOAPAddress(proxy, config)
self.namespace = namespace
self.soapaction = soapaction
self.header = header
self.methodattrs = methodattrs
self.transport = transport()
self.encoding = encoding
self.throw_faults = throw_faults
self.http_proxy = http_proxy
self.config = config
self.noroot = noroot
# GSI Additions
if hasattr(config, "channel_mode") and \
hasattr(config, "delegation_mode"):
self.channel_mode = config.channel_mode
self.delegation_mode = config.delegation_mode
#end GSI Additions
def invoke(self, method, args):
return self.__call(method, args, {})
def __call(self, name, args, kw, ns = None, sa = None, hd = None,
ma = None):
ns = ns or self.namespace
ma = ma or self.methodattrs
if sa: # Get soapaction
if type(sa) == TupleType:
sa = sa[0]
else:
if self.soapaction:
sa = self.soapaction
else:
sa = name
if hd: # Get header
if type(hd) == TupleType:
hd = hd[0]
else:
hd = self.header
hd = hd or self.header
if ma: # Get methodattrs
if type(ma) == TupleType: ma = ma[0]
else:
ma = self.methodattrs
ma = ma or self.methodattrs
m = buildSOAP(args = args, kw = kw, method = name, namespace = ns,
header = hd, methodattrs = ma, encoding = self.encoding,
config = self.config, noroot = self.noroot)
r, self.namespace = self.transport.call(self.proxy, m, ns, sa,
encoding = self.encoding,
http_proxy = self.http_proxy,
config = self.config)
p, attrs = parseSOAPRPC(r, attrs = 1)
try:
throw_struct = self.throw_faults and \
isinstance (p, faultType)
except:
throw_struct = 0
if throw_struct:
print p
raise p
# If unwrap_results=1 and there is only element in the struct,
# SOAPProxy will assume that this element is the result
# and return it rather than the struct containing it.
# Otherwise SOAPproxy will return the struct with all the
# elements as attributes.
if self.unwrap_results:
try:
count = 0
for i in p.__dict__.keys():
if i[0] != "_": # don't count the private stuff
count += 1
t = getattr(p, i)
if count == 1: # Only one piece of data, bubble it up
p = t
except:
pass
# Automatically simplfy SOAP complex types into the
# corresponding python types. (structType --> dict,
# arrayType --> array, etc.)
if self.simplify_objects:
p = simplify(p)
if self.config.returnAllAttrs:
return p, attrs
return p
def _callWithBody(self, body):
return self.__call(None, body, {})
def __getattr__(self, name): # hook to catch method calls
if name == '__del__':
raise AttributeError, name
return self.__Method(self.__call, name, config = self.config)
# To handle attribute wierdness
class __Method:
# Some magic to bind a SOAP method to an RPC server.
# Supports "nested" methods (e.g. examples.getStateName) -- concept
# borrowed from xmlrpc/soaplib -- www.pythonware.com
# Altered (improved?) to let you inline namespaces on a per call
# basis ala SOAP::LITE -- www.soaplite.com
def __init__(self, call, name, ns = None, sa = None, hd = None,
ma = None, config = Config):
self.__call = call
self.__name = name
self.__ns = ns
self.__sa = sa
self.__hd = hd
self.__ma = ma
self.__config = config
return
def __call__(self, *args, **kw):
if self.__name[0] == "_":
if self.__name in ["__repr__","__str__"]:
return self.__repr__()
else:
return self.__f_call(*args, **kw)
else:
return self.__r_call(*args, **kw)
def __getattr__(self, name):
if name == '__del__':
raise AttributeError, name
if self.__name[0] == "_":
# Don't nest method if it is a directive
return self.__class__(self.__call, name, self.__ns,
self.__sa, self.__hd, self.__ma)
return self.__class__(self.__call, "%s.%s" % (self.__name, name),
self.__ns, self.__sa, self.__hd, self.__ma)
def __f_call(self, *args, **kw):
if self.__name == "_ns": self.__ns = args
elif self.__name == "_sa": self.__sa = args
elif self.__name == "_hd": self.__hd = args
elif self.__name == "_ma": self.__ma = args
return self
def __r_call(self, *args, **kw):
return self.__call(self.__name, args, kw, self.__ns, self.__sa,
self.__hd, self.__ma)
def __repr__(self):
return "<%s at %d>" % (self.__class__, id(self))
|
intip/da-apps
|
plugins/da_centrallogin/modules/soappy/SOAPpy/Client.py
|
Python
|
gpl-2.0
| 15,379
|
[
"Brian"
] |
ec0d4f3d91ff2d04cd817fd62d9774ea2365f97423382847f066544985eebf8b
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from rdkit.Chem import rdMolDescriptors
from rdkit import DataStructs
__license__ = "X11"
METADATA = {
"id": "method_rdkit_hashap_1024_tanimoto",
"representation": "hashap_1024",
"similarity": "tanimoto"
}
def _compute_fingerprint(molecule):
return rdMolDescriptors.GetHashedAtomPairFingerprintAsBitVect(
molecule, nBits=1024)
def _compute_similarity(left, right):
return DataStructs.TanimotoSimilarity(left, right)
def create_model(train_ligands, train_decoys):
model = []
for molecule in train_ligands:
model.append({
"name": molecule.GetProp("_Name"),
"fingerprint": _compute_fingerprint(molecule)
})
model_information = {}
return model, model_information
def compute_score(model, molecule):
fingerprint = _compute_fingerprint(molecule)
similarities = [_compute_similarity(fingerprint, item["fingerprint"])
for item in model]
max_score = max(similarities)
index_of_max_score = similarities.index(max_score)
closest_molecule = model[index_of_max_score]
return {
"value": max_score,
"info": {
"closest": closest_molecule["name"]
}
}
def compute_similarity(left, right):
return _compute_similarity(_compute_fingerprint(left),
_compute_fingerprint(right))
|
skodapetr/lbvs-environment
|
methods/ap/hashap_1024_tanimoto.py
|
Python
|
mit
| 1,417
|
[
"RDKit"
] |
19a9266d3b37c4cac829ef60bfb14a5b5f071e7e2f745c617d153a9b2d1136fc
|
from __future__ import print_function, division
from os.path import join
import tempfile
import shutil
import io
from io import BytesIO
try:
from subprocess import STDOUT, CalledProcessError
from sympy.core.compatibility import check_output
except ImportError:
pass
from sympy.core.compatibility import u
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.misc import find_executable
from .latex import latex
from sympy.utilities.decorator import doctest_depends_on
@doctest_depends_on(exe=('latex', 'dvipng'), modules=('pyglet',),
disable_viewers=('evince', 'gimp', 'superior-dvi-viewer'))
def preview(expr, output='png', viewer=None, euler=True, packages=(),
filename=None, outputbuffer=None, preamble=None, dvioptions=None,
outputTexFile=None, **latex_settings):
r"""
View expression or LaTeX markup in PNG, DVI, PostScript or PDF form.
If the expr argument is an expression, it will be exported to LaTeX and
then compiled using the available TeX distribution. The first argument,
'expr', may also be a LaTeX string. The function will then run the
appropriate viewer for the given output format or use the user defined
one. By default png output is generated.
By default pretty Euler fonts are used for typesetting (they were used to
typeset the well known "Concrete Mathematics" book). For that to work, you
need the 'eulervm.sty' LaTeX style (in Debian/Ubuntu, install the
texlive-fonts-extra package). If you prefer default AMS fonts or your
system lacks 'eulervm' LaTeX package then unset the 'euler' keyword
argument.
To use viewer auto-detection, lets say for 'png' output, issue
>>> from sympy import symbols, preview, Symbol
>>> x, y = symbols("x,y")
>>> preview(x + y, output='png')
This will choose 'pyglet' by default. To select a different one, do
>>> preview(x + y, output='png', viewer='gimp')
The 'png' format is considered special. For all other formats the rules
are slightly different. As an example we will take 'dvi' output format. If
you would run
>>> preview(x + y, output='dvi')
then 'view' will look for available 'dvi' viewers on your system
(predefined in the function, so it will try evince, first, then kdvi and
xdvi). If nothing is found you will need to set the viewer explicitly.
>>> preview(x + y, output='dvi', viewer='superior-dvi-viewer')
This will skip auto-detection and will run user specified
'superior-dvi-viewer'. If 'view' fails to find it on your system it will
gracefully raise an exception.
You may also enter 'file' for the viewer argument. Doing so will cause
this function to return a file object in read-only mode, if 'filename'
is unset. However, if it was set, then 'preview' writes the genereted
file to this filename instead.
There is also support for writing to a BytesIO like object, which needs
to be passed to the 'outputbuffer' argument.
>>> from io import BytesIO
>>> obj = BytesIO()
>>> preview(x + y, output='png', viewer='BytesIO',
... outputbuffer=obj)
The LaTeX preamble can be customized by setting the 'preamble' keyword
argument. This can be used, e.g., to set a different font size, use a
custom documentclass or import certain set of LaTeX packages.
>>> preamble = "\\documentclass[10pt]{article}\n" \
... "\\usepackage{amsmath,amsfonts}\\begin{document}"
>>> preview(x + y, output='png', preamble=preamble)
If the value of 'output' is different from 'dvi' then command line
options can be set ('dvioptions' argument) for the execution of the
'dvi'+output conversion tool. These options have to be in the form of a
list of strings (see subprocess.Popen).
Additional keyword args will be passed to the latex call, e.g., the
symbol_names flag.
>>> phidd = Symbol('phidd')
>>> preview(phidd, symbol_names={phidd:r'\ddot{\varphi}'})
For post-processing the generated TeX File can be written to a file by
passing the desired filename to the 'outputTexFile' keyword
argument. To write the TeX code to a file named
"sample.tex" and run the default png viewer to display the resulting
bitmap, do
>>> preview(x + y, outputTexFile="sample.tex")
"""
special = [ 'pyglet' ]
if viewer is None:
if output == "png":
viewer = "pyglet"
else:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
# TODO add candidates for windows to list
candidates = {
"dvi": [ "evince", "okular", "kdvi", "xdvi" ],
"ps": [ "evince", "okular", "gsview", "gv" ],
"pdf": [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
try:
for candidate in candidates[output]:
path = find_executable(candidate)
if path is not None:
viewer = path
break
else:
raise SystemError(
"No viewers found for '%s' output format." % output)
except KeyError:
raise SystemError("Invalid output format: %s" % output)
else:
if viewer == "file":
if filename is None:
SymPyDeprecationWarning(feature="Using viewer=\"file\" without a "
"specified filename", deprecated_since_version="0.7.3",
useinstead="viewer=\"file\" and filename=\"desiredname\"",
issue=7018).warn()
elif viewer == "StringIO":
SymPyDeprecationWarning(feature="The preview() viewer StringIO",
useinstead="BytesIO", deprecated_since_version="0.7.4",
issue=7083).warn()
viewer = "BytesIO"
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"StringIO\"")
elif viewer == "BytesIO":
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"BytesIO\"")
elif viewer not in special and not find_executable(viewer):
raise SystemError("Unrecognized viewer: %s" % viewer)
if preamble is None:
actual_packages = packages + ("amsmath", "amsfonts")
if euler:
actual_packages += ("euler",)
package_includes = "\n" + "\n".join(["\\usepackage{%s}" % p
for p in actual_packages])
preamble = r"""\documentclass[12pt]{article}
\pagestyle{empty}
%s
\begin{document}
""" % (package_includes)
else:
if len(packages) > 0:
raise ValueError("The \"packages\" keyword must not be set if a "
"custom LaTeX preamble was specified")
latex_main = preamble + '\n%s\n\n' + r"\end{document}"
if isinstance(expr, str):
latex_string = expr
else:
latex_string = latex(expr, mode='inline', **latex_settings)
try:
workdir = tempfile.mkdtemp()
with io.open(join(workdir, 'texput.tex'), 'w', encoding='utf-8') as fh:
rendered = latex_main % latex_string
fh.write(u(rendered.replace(r'\u', r'\\u')))
if outputTexFile is not None:
shutil.copyfile(join(workdir, 'texput.tex'), outputTexFile)
if not find_executable('latex'):
raise RuntimeError("latex program is not installed")
try:
check_output(['latex', '-halt-on-error', '-interaction=nonstopmode',
'texput.tex'], cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'latex' exited abnormally with the following output:\n%s" %
e.output)
if output != "dvi":
defaultoptions = {
"ps": [],
"pdf": [],
"png": ["-T", "tight", "-z", "9", "--truecolor"],
"svg": ["--no-fonts"],
}
commandend = {
"ps": ["-o", "texput.ps", "texput.dvi"],
"pdf": ["texput.dvi", "texput.pdf"],
"png": ["-o", "texput.png", "texput.dvi"],
"svg": ["-o", "texput.svg", "texput.dvi"],
}
if output == "svg":
cmd = ["dvisvgm"]
else:
cmd = ["dvi" + output]
if not find_executable(cmd[0]):
raise RuntimeError("%s is not installed" % cmd[0])
try:
if dvioptions is not None:
cmd.extend(dvioptions)
else:
cmd.extend(defaultoptions[output])
cmd.extend(commandend[output])
except KeyError:
raise SystemError("Invalid output format: %s" % output)
try:
check_output(cmd, cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'%s' exited abnormally with the following output:\n%s" %
(' '.join(cmd), e.output))
src = "texput.%s" % (output)
if viewer == "file":
if filename is None:
buffer = BytesIO()
with open(join(workdir, src), 'rb') as fh:
buffer.write(fh.read())
return buffer
else:
shutil.move(join(workdir,src), filename)
elif viewer == "BytesIO":
with open(join(workdir, src), 'rb') as fh:
outputbuffer.write(fh.read())
elif viewer == "pyglet":
try:
from pyglet import window, image, gl
from pyglet.window import key
except ImportError:
raise ImportError("pyglet is required for preview.\n visit http://www.pyglet.org/")
if output == "png":
from pyglet.image.codecs.png import PNGImageDecoder
img = image.load(join(workdir, src), decoder=PNGImageDecoder())
else:
raise SystemError("pyglet preview works only for 'png' files.")
offset = 25
config = gl.Config(double_buffer=False)
win = window.Window(
width=img.width + 2*offset,
height=img.height + 2*offset,
caption="sympy",
resizable=False,
config=config
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
else:
try:
check_output([viewer, src], cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'%s %s' exited abnormally with the following output:\n%s" %
(viewer, src, e.output))
finally:
try:
shutil.rmtree(workdir) # delete directory
except OSError as e:
if e.errno != 2: # code 2 - no such file or directory
raise
|
atreyv/sympy
|
sympy/printing/preview.py
|
Python
|
bsd-3-clause
| 12,227
|
[
"VisIt"
] |
56e579ab0d6815ef0cf7d0f9f18ed0ab6a1bf96be4b4dce70e54b21964c78343
|
def build_models(process_only_these_targets=None, process_only_these_templates=None, verbose=False):
r'''Uses the build_model method to build homology models for a given set of
targets and templates.
MPI-enabled.
'''
import os
import Bio.SeqIO
import mpi4py.MPI
comm = mpi4py.MPI.COMM_WORLD
rank = comm.rank
size = comm.size
targets_dir = os.path.abspath('targets')
templates_dir = os.path.abspath('templates')
models_dir = os.path.abspath('models')
targets_fasta_filename = os.path.join(targets_dir, 'targets.fa')
templates_fasta_filename = os.path.join(templates_dir, 'templates.fa')
targets = list( Bio.SeqIO.parse(targets_fasta_filename, 'fasta') )
templates = list( Bio.SeqIO.parse(templates_fasta_filename, 'fasta') )
ntemplates = len(templates)
for target in targets:
if process_only_these_targets and target.id not in process_only_these_targets: continue
models_target_dir = os.path.join(models_dir, target.id)
if rank == 0:
print "========================================================================="
print "Working on target '%s'" % (target.id)
print "========================================================================="
if not os.path.exists(models_target_dir):
os.mkdir(models_target_dir)
for template_index in range(rank, ntemplates, size):
template = templates[template_index]
if process_only_these_templates and template.id not in process_only_these_templates: continue
build_model(target, template, verbose)
comm.Barrier()
if rank == 0:
print 'Done.'
def build_model(target, template, verbose=False):
r'''Uses Modeller to build a homology model for a given target and
template.
Will not run Modeller if the output files already exist.
Parameters
----------
target : BioPython SeqRecord
template : BioPython SeqRecord
Must be a corresponding .pdb template file with the same ID in the
templates/structures directory.
'''
# align target and template
import os, tempfile, shutil, gzip
import Bio.pairwise2
import Bio.SubsMat.MatrixInfo
templates_dir = os.path.abspath('templates')
models_dir = os.path.abspath('models')
models_target_dir = os.path.join(models_dir, target.id)
model_dir = os.path.join(models_target_dir, template.id)
aln_filename = os.path.join(model_dir, 'alignment.pir')
seqid_filename = os.path.join(model_dir, 'sequence-identity.txt')
model_pdbfilename = os.path.join(model_dir, 'model.pdb')
restraint_filename_gz = os.path.join(model_dir, 'restraints.rsr.gz')
current_dir = os.getcwd()
# Skip model-building if files already exist.
files_to_check = [model_dir, model_pdbfilename, seqid_filename, aln_filename, restraint_filename_gz]
files_are_present = [os.path.exists(filename) for filename in files_to_check]
if all(files_are_present):
if verbose: print "Output files already exist for target '%s' // template '%s'; files were not overwritten." % (target.id, template.id)
return
print "-------------------------------------------------------------------------"
print "Modelling '%s' => '%s'" % (target.id, template.id)
print "-------------------------------------------------------------------------"
# Conduct alignment
matrix = Bio.SubsMat.MatrixInfo.gonnet
gap_open = -10
gap_extend = -0.5
aln = Bio.pairwise2.align.globalds(str(target.seq), str(template.seq), matrix, gap_open, gap_extend)
# Create temp dir for modelling, and chdir
temp_dir = tempfile.mkdtemp()
try:
os.chdir(temp_dir)
# Write Modeller-format PIR alignment file
tmp_aln_filename = 'aligned.pir'
contents = "Target-template alignment by clustal omega\n"
contents += ">P1;%s\n" % target.id
contents += "sequence:%s:FIRST:@:LAST :@:::-1.00:-1.00\n" % target.id
contents += aln[0][0] + '*\n'
contents += ">P1;%s\n" % template.id
contents += "structureX:%s:FIRST:@:LAST : :undefined:undefined:-1.00:-1.00\n" % template.id
contents += aln[0][1] + '*\n'
outfile = open('aligned.pir', 'w')
outfile.write(contents)
outfile.close()
# Run Modeller
import modeller
import modeller.automodel
modeller.log.none()
env = modeller.environ()
env.io.atom_files_directory = [os.path.join(templates_dir, 'structures')]
a = modeller.automodel.allhmodel(env,
# file with template codes and target sequence
alnfile = tmp_aln_filename,
# PDB codes of the template
knowns = template.id,
# code of the target
sequence = target.id)
a.make() # do homology modeling
tmp_model_pdbfilename = a.outputs[0]['name']
target_model = modeller.model(env, file=tmp_model_pdbfilename)
# Create directory to place final models in
if not os.path.exists(model_dir):
os.mkdir(model_dir)
target_model.write(file=model_pdbfilename)
# Write sequence identity.
with open(seqid_filename, 'w') as seqid_file:
seqid_file.write('%.1f\n' % target_model.seq_id)
# Copy alignment
shutil.move(tmp_aln_filename, aln_filename)
# Copy restraints.
with open('%s.rsr' % target.id, 'r') as rsrfile:
with gzip.open(restraint_filename_gz, 'wb') as rsrgzfile:
rsrgzfile.write(rsrfile.read())
# Clean up temp dir
os.chdir(current_dir)
if os.path.getsize(model_pdbfilename) < 1:
raise Exception, 'Output PDB file is empty. Could be a filesystem error.'
text = "---------------------------------------------------------------------------------\n"
text += 'Successfully modeled target %s on template %s.\n' % (target.id, template.id)
text += "Sequence identity was %.1f%%.\n" % (target_model.seq_id)
return text
finally:
shutil.rmtree(temp_dir)
def sort_by_sequence_identity(process_only_these_targets=None, verbose=False):
'''Compile sorted list of templates by sequence identity.
Runs serially.
'''
import os
import numpy
import Bio.SeqIO
import mpi4py.MPI
comm = mpi4py.MPI.COMM_WORLD
rank = comm.rank
if rank == 0:
targets_dir = os.path.abspath("targets")
templates_dir = os.path.abspath("templates")
models_dir = os.path.abspath("models")
targets_fasta_filename = os.path.join(targets_dir, 'targets.fa')
targets = list( Bio.SeqIO.parse(targets_fasta_filename, 'fasta') )
templates_fasta_filename = os.path.join(templates_dir, 'templates.fa')
templates = list( Bio.SeqIO.parse(templates_fasta_filename, 'fasta') )
# ========
# Compile sorted list by sequence identity
# ========
for target in targets:
# Process only specified targets if directed.
if process_only_these_targets and (target.id not in process_only_these_targets): continue
models_target_dir = os.path.join(models_dir, target.id)
if not os.path.exists(models_target_dir): continue
print "-------------------------------------------------------------------------"
print "Compiling template sequence identities for target %s" % (target.id)
print "-------------------------------------------------------------------------"
# ========
# Build a list of valid models
# ========
if verbose: print "Building list of valid models..."
valid_templates = list()
for template in templates:
model_filename = os.path.join(models_target_dir, template.id, 'model.pdb')
if os.path.exists(model_filename):
valid_templates.append(template)
nvalid = len(valid_templates)
if verbose: print "%d valid models found" % nvalid
# ========
# Sort by sequence identity
# ========
if verbose: print "Sorting models in order of decreasing sequence identity..."
seqids = numpy.zeros([nvalid], numpy.float32)
for (template_index, template) in enumerate(valid_templates):
model_seqid_filename = os.path.join(models_target_dir, template.id, 'sequence-identity.txt')
with open(model_seqid_filename, 'r') as model_seqid_file:
firstline = model_seqid_file.readline().strip()
seqid = float(firstline)
seqids[template_index] = seqid
sorted_seqids = numpy.argsort(-seqids)
# ========
# Write templates sorted by sequence identity
# ========
seq_ofilename = os.path.join(models_target_dir, 'sequence-identities.txt')
with open(seq_ofilename, 'w') as seq_ofile:
for index in sorted_seqids:
template = valid_templates[index]
identity = seqids[index]
seq_ofile.write('%-40s %6.1f\n' % (template.id, identity))
comm.Barrier()
if rank == 0:
print 'Done.'
def cluster_models(process_only_these_targets=None, verbose=False):
'''Cluster models based on RMSD, and filter out non-unique models as
determined by a given cutoff.
Runs serially.
'''
import os, glob
import Bio.SeqIO
import mdtraj
import mpi4py.MPI
comm = mpi4py.MPI.COMM_WORLD
rank = comm.rank
if rank == 0:
targets_dir = os.path.abspath("targets")
templates_dir = os.path.abspath("templates")
models_dir = os.path.abspath("models")
targets_fasta_filename = os.path.join(targets_dir, 'targets.fa')
targets = list( Bio.SeqIO.parse(targets_fasta_filename, 'fasta') )
templates_fasta_filename = os.path.join(templates_dir, 'templates.fa')
templates = list( Bio.SeqIO.parse(templates_fasta_filename, 'fasta') )
cutoff = 0.06 # Cutoff for RMSD clustering (nm)
for target in targets:
if process_only_these_targets and (target.id not in process_only_these_targets): continue
models_target_dir = os.path.join(models_dir, target.id)
if not os.path.exists(models_target_dir): continue
# =============================
# Construct a mdtraj trajectory containing all models
# =============================
print 'Building a list of valid models...'
model_pdbfilenames = []
valid_templateIDs = []
for t, template in enumerate(templates):
model_dir = os.path.join(models_target_dir, template.id)
model_pdbfilename = os.path.join(model_dir, 'model.pdb')
if not os.path.exists(model_pdbfilename):
continue
model_pdbfilenames.append(model_pdbfilename)
valid_templateIDs.append(template.id)
print 'Constructing a trajectory containing all valid models...'
traj = mdtraj.load(model_pdbfilenames)
# =============================
# Clustering
# =============================
print 'Conducting RMSD-based clustering...'
# Remove any existing unique_by_clustering files
for f in glob.glob( models_target_dir+'/*_PK_*/unique_by_clustering' ):
os.unlink(f)
# Each template will be added to the list uniques if it is further than
# 0.2 Angstroms (RMSD) from the nearest template.
uniques=[]
min_rmsd = []
for (t, templateID) in enumerate(valid_templateIDs):
model_dir = os.path.join(models_target_dir, templateID)
# Add the first template to the list of uniques
if t==0:
uniques.append(templateID)
with open( os.path.join(model_dir, 'unique_by_clustering'), 'w') as unique_file: pass
continue
# Cluster using CA atoms
CAatoms = [a.index for a in traj.topology.atoms if a.name == 'CA']
rmsds = mdtraj.rmsd(traj[0:t], traj[t], atom_indices=CAatoms, parallel=False)
min_rmsd.append( min(rmsds) )
if min_rmsd[-1] < cutoff:
continue
else:
uniques.append( templateID )
# Create a blank file to say this template was found to be unique
# by clustering
with open( os.path.join(model_dir, 'unique_by_clustering'), 'w') as unique_file: pass
with open( os.path.join(models_target_dir, 'unique-models.txt'), 'w') as uniques_file:
for u in uniques:
uniques_file.write(u+'\n')
print '%d unique models (from original set of %d) using cutoff of %.3f nm' % (len(uniques), len(valid_templateIDs), cutoff)
comm.Barrier()
if rank == 0:
print 'Done.'
|
choderalab/Ensembler2
|
MSMSeeder/attic/modelling.py
|
Python
|
gpl-2.0
| 13,563
|
[
"Biopython",
"MDTraj"
] |
fdf02f6ed81f87849b50cd8a279e03285556600772b726e2f95544f667cd3047
|
# symcompartment.py ---
#
# Filename: symcompartment.py
# Description:
# Author:
# Maintainer:
# Created: Thu Jun 20 17:47:10 2013 (+0530)
# Version:
# Last-Updated: Fri Jul 12 12:10:52 2013 (+0530)
# By: subha
# Update #: 71
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import numpy as np
import pylab
import moose
simdt = 1e-6
simtime = 100e-3
def test_symcompartment():
model = moose.Neutral('model')
soma = moose.SymCompartment('%s/soma' % (model.path))
soma.Em = -60e-3
soma.Rm = 1000402560
soma.Cm = 2.375043912e-11
soma.Ra = 233957.7812
d1 = moose.SymCompartment('%s/d1' % (model.path))
d1.Rm = 397887392
d1.Cm = 2.261946489e-12
d1.Ra = 24867960
d2 = moose.SymCompartment('%s/d2' % (model.path))
d2.Rm = 2.877870285e+10
d2.Cm = 8.256105218e-13
d2.Ra = 20906072
moose.connect(d1, 'proximal', soma, 'distal')
moose.connect(d2, 'proximal', soma, 'distal')
moose.connect(d1, 'sibling', d2, 'sibling')
pg = moose.PulseGen('/model/pulse')
pg.delay[0] = 10e-3
pg.width[0] = 20e-3
pg.level[0] = 1e-6
pg.delay[1] = 1e9
moose.connect(pg, 'output', d1, 'injectMsg')
data = moose.Neutral('/data')
tab_soma = moose.Table('%s/soma_Vm' % (data.path))
tab_d1 = moose.Table('%s/d1_Vm' % (data.path))
tab_d2 = moose.Table('%s/d2_Vm' % (data.path))
moose.connect(tab_soma, 'requestOut', soma, 'getVm')
moose.connect(tab_d1, 'requestOut', d1, 'getVm')
moose.connect(tab_d2, 'requestOut', d2, 'getVm')
moose.setClock(0, simdt)
moose.setClock(1, simdt)
moose.setClock(2, simdt)
moose.useClock(0, '/model/##[ISA=Compartment]', 'init') # This is allowed because SymCompartment is a subclass of Compartment
moose.useClock(1, '/model/##', 'process')
moose.useClock(2, '/data/##[ISA=Table]', 'process')
moose.reinit()
moose.start(simtime)
t = np.linspace(0, simtime, len(tab_soma.vector))
data_matrix = np.vstack((t, tab_soma.vector, tab_d1.vector, tab_d2.vector))
np.savetxt('symcompartment.txt', data_matrix.transpose())
pylab.plot(t, tab_soma.vector, label='Vm_soma')
pylab.plot(t, tab_d1.vector, label='Vm_d1')
pylab.plot(t, tab_d2.vector, label='Vm_d2')
pylab.show()
if __name__ == '__main__':
test_symcompartment()
#
# symcompartment.py ends here
|
dilawar/moose-full
|
moose-examples/snippets/symcompartment.py
|
Python
|
gpl-2.0
| 3,137
|
[
"MOOSE"
] |
3e38383d3e7561473d89e7505a56371b1bdb17c0c0be055059841f93475861e6
|
#
# Copyright (c) 2014-2015, Continuum Analytics, Inc. and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of Continuum Analytics nor the names of any contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# 2015-04-22: Modified to cache results of previously computed tasks
from __future__ import absolute_import, division, print_function
from operator import add
from itertools import chain
def inc(x):
return x + 1
def ishashable(x):
""" Is x hashable?
Example
-------
>>> ishashable(1)
True
>>> ishashable([1])
False
"""
try:
hash(x)
return True
except TypeError:
return False
def istask(x):
""" Is x a runnable task?
A task is a tuple with a callable first argument
Example
-------
>>> inc = lambda x: x + 1
>>> istask((inc, 1))
True
>>> istask(1)
False
"""
return isinstance(x, tuple) and x and callable(x[0])
def preorder_traversal(task):
"""A generator to preorder-traverse a task."""
for item in task:
if istask(item):
for i in preorder_traversal(item):
yield i
elif isinstance(item, list):
yield list
for i in preorder_traversal(item):
yield i
else:
yield item
cache = {}
def _get_task(d, task, maxdepth=1000):
# non-recursive. DAG property is checked upon reaching maxdepth.
_iter = lambda *args: iter(args)
# We construct a nested heirarchy of tuples to mimic the execution stack
# of frames that Python would maintain for a recursive implementation.
# A frame is associated with a single task from a Dask.
# A frame tuple has three elements:
# 1) The function for the task.
# 2) The arguments for the task (typically keys in the Dask).
# Arguments are stored in reverse order, and elements are popped
# as they are evaluated.
# 3) The calculated results of the arguments from (2).
global cache
stack = [(task[0], list(task[:0:-1]), [])]
while True:
func, args, results = stack[-1]
if not args:
try:
val = cache[(func, tuple(results))]
except KeyError:
val = func(*results)
cache[(func, tuple(results))] = val
if len(stack) == 1:
return val
stack.pop()
stack[-1][2].append(val)
continue
elif maxdepth and len(stack) > maxdepth:
cycle = getcycle(d, list(task[1:]))
if cycle:
cycle = '->'.join(cycle)
raise RuntimeError('Cycle detected in Dask: %s' % cycle)
maxdepth = None
key = args.pop()
if isinstance(key, list):
# v = (get(d, k, concrete=False) for k in key) # recursive
# Fake being lazy
stack.append((_iter, key[::-1], []))
continue
elif ishashable(key) and key in d:
v = d[key]
else:
v = key
if istask(v):
stack.append((v[0], list(v[:0:-1]), []))
else:
results.append(v)
def get(d, key, get=None, concrete=True, **kwargs):
""" Get value from Dask
Example
-------
>>> inc = lambda x: x + 1
>>> d = {'x': 1, 'y': (inc, 'x')}
>>> get(d, 'x')
1
>>> get(d, 'y')
2
See Also
--------
set
"""
get = get or _get
if isinstance(key, list):
v = (get(d, k, get=get, concrete=concrete) for k in key)
if concrete:
v = list(v)
elif ishashable(key) and key in d:
v = d[key]
elif istask(key):
v = key
else:
return key
if istask(v):
if get is _get:
# use non-recursive method by default
return _get_task(d, v)
func, args = v[0], v[1:]
args2 = [get(d, arg, get=get, concrete=False) for arg in args]
return func(*[get(d, arg, get=get) for arg in args2])
else:
return v
_get = get
def _deps(dsk, arg):
""" Get dependencies from keys or tasks
Helper function for get_dependencies.
>>> dsk = {'x': 1, 'y': 2}
>>> _deps(dsk, 'x')
['x']
>>> _deps(dsk, (add, 'x', 1))
['x']
>>> _deps(dsk, (add, 'x', (inc, 'y'))) # doctest: +SKIP
['x', 'y']
"""
if istask(arg):
result = []
for a in arg[1:]:
result.extend(_deps(dsk, a))
return result
try:
if arg not in dsk:
return []
except TypeError: # not hashable
return []
return [arg]
def get_dependencies(dsk, task, as_list=False):
""" Get the immediate tasks on which this task depends
>>> dsk = {'x': 1,
... 'y': (inc, 'x'),
... 'z': (add, 'x', 'y'),
... 'w': (inc, 'z'),
... 'a': (add, (inc, 'x'), 1)}
>>> get_dependencies(dsk, 'x')
set([])
>>> get_dependencies(dsk, 'y')
set(['x'])
>>> get_dependencies(dsk, 'z') # doctest: +SKIP
set(['x', 'y'])
>>> get_dependencies(dsk, 'w') # Only direct dependencies
set(['z'])
>>> get_dependencies(dsk, 'a') # Ignore non-keys
set(['x'])
"""
args = [dsk[task]]
result = []
while args:
arg = args.pop()
if istask(arg):
args.extend(arg[1:])
elif isinstance(arg, list):
args.extend(arg)
else:
result.append(arg)
if not result:
return [] if as_list else set()
rv = []
for x in result:
rv.extend(_deps(dsk, x))
return rv if as_list else set(rv)
def flatten(seq):
"""
>>> list(flatten([1]))
[1]
>>> list(flatten([[1, 2], [1, 2]]))
[1, 2, 1, 2]
>>> list(flatten([[[1], [2]], [[1], [2]]]))
[1, 2, 1, 2]
>>> list(flatten(((1, 2), (1, 2)))) # Don't flatten tuples
[(1, 2), (1, 2)]
>>> list(flatten((1, 2, [3, 4]))) # support heterogeneous
[1, 2, 3, 4]
"""
for item in seq:
if isinstance(item, list):
for item2 in flatten(item):
yield item2
else:
yield item
def reverse_dict(d):
"""
>>> a, b, c = 'abc'
>>> d = {a: [b, c], b: [c]}
>>> reverse_dict(d) # doctest: +SKIP
{'a': set([]), 'b': set(['a']}, 'c': set(['a', 'b'])}
"""
terms = list(d.keys()) + list(chain.from_iterable(d.values()))
result = dict((t, set()) for t in terms)
for k, vals in d.items():
for val in vals:
result[val].add(k)
return result
def subs(task, key, val):
""" Perform a substitution on a task
Example
-------
>>> subs((inc, 'x'), 'x', 1) # doctest: +SKIP
(inc, 1)
"""
if not istask(task):
if task == key:
return val
elif isinstance(task, list):
return [subs(x, key, val) for x in task]
else:
return task
newargs = []
for arg in task[1:]:
if istask(arg):
arg = subs(arg, key, val)
elif isinstance(arg, list):
arg = [subs(x, key, val) for x in arg]
elif type(arg) is type(key) and arg == key:
arg = val
newargs.append(arg)
return task[:1] + tuple(newargs)
def _toposort(dsk, keys=None, returncycle=False):
# Stack-based depth-first search traversal. This is based on Tarjan's
# method for topological sorting (see wikipedia for pseudocode)
if keys is None:
keys = dsk
elif not isinstance(keys, list):
keys = [keys]
if not returncycle:
ordered = []
# Nodes whose descendents have been completely explored.
# These nodes are guaranteed to not be part of a cycle.
completed = set()
# All nodes that have been visited in the current traversal. Because
# we are doing depth-first search, going "deeper" should never result
# in visiting a node that has already been seen. The `seen` and
# `completed` sets are mutually exclusive; it is okay to visit a node
# that has already been added to `completed`.
seen = set()
for key in keys:
if key in completed:
continue
nodes = [key]
while nodes:
# Keep current node on the stack until all descendants are visited
cur = nodes[-1]
if cur in completed:
# Already fully traversed descendants of cur
nodes.pop()
continue
seen.add(cur)
# Add direct descendants of cur to nodes stack
next_nodes = []
for nxt in get_dependencies(dsk, cur):
if nxt not in completed:
if nxt in seen:
# Cycle detected!
cycle = [nxt]
while nodes[-1] != nxt:
cycle.append(nodes.pop())
cycle.append(nodes.pop())
cycle.reverse()
if returncycle:
return cycle
else:
cycle = '->'.join(cycle)
raise RuntimeError('Cycle detected in Dask: %s' % cycle)
next_nodes.append(nxt)
if next_nodes:
nodes.extend(next_nodes)
else:
# cur has no more descendants to explore, so we're done with it
if not returncycle:
ordered.append(cur)
completed.add(cur)
seen.remove(cur)
nodes.pop()
if returncycle:
return []
return ordered
def toposort(dsk):
""" Return a list of keys of dask sorted in topological order."""
return _toposort(dsk)
def getcycle(d, keys):
""" Return a list of nodes that form a cycle if Dask is not a DAG.
Returns an empty list if no cycle is found.
``keys`` may be a single key or list of keys.
Example
-------
>>> d = {'x': (inc, 'z'), 'y': (inc, 'x'), 'z': (inc, 'y')}
>>> getcycle(d, 'x')
['x', 'z', 'y', 'x']
See Also
--------
isdag
"""
return _toposort(d, keys=keys, returncycle=True)
def isdag(d, keys):
""" Does Dask form a directed acyclic graph when calculating keys?
``keys`` may be a single key or list of keys.
Example
-------
>>> inc = lambda x: x + 1
>>> isdag({'x': 0, 'y': (inc, 'x')}, 'y')
True
>>> isdag({'x': (inc, 'y'), 'y': (inc, 'x')}, 'y')
False
See Also
--------
getcycle
"""
return not getcycle(d, keys)
def cull(dsk, keys):
""" Return new dask with only the tasks required to calculate keys.
In other words, remove unnecessary tasks from dask.
``keys`` may be a single key or list of keys.
Example
-------
>>> d = {'x': 1, 'y': (inc, 'x'), 'out': (add, 'x', 10)}
>>> cull(d, 'out') # doctest: +SKIP
{'x': 1, 'out': (add, 'x', 10)}
"""
if not isinstance(keys, list):
keys = [keys]
nxt = set(flatten(keys))
seen = nxt
while nxt:
cur = nxt
nxt = set()
for item in cur:
for dep in get_dependencies(dsk, item):
if dep not in seen:
nxt.add(dep)
seen.update(nxt)
return dict((k, v) for k, v in dsk.items() if k in seen)
|
ibab/datapipe
|
datapipe/dask.py
|
Python
|
mit
| 12,777
|
[
"VisIt"
] |
d4c465cdc19b5e17178512a219a249c5462935f53ad548554c6f06c2e29c1eec
|
"""
Created on Thu Feb 9 21:30:03 2012
Test script to play around with the single pulse single qubit Clifford gates achieved via phase-ramping.
"""
from __future__ import division
import numpy as np
from scipy.constants import pi
from scipy.linalg import expm
import matplotlib.pyplot as plt
from PySim.SystemParams import SystemParams
from PySim.PulseSequence import PulseSequence
from PySim.Simulation import simulate_sequence_stack
from PySim.QuantumSystems import SCQubit, Hamiltonian
from copy import deepcopy
#Setup the system
systemParams = SystemParams()
qubit = SCQubit(2, 0e9, delta=200e6, name='Q1', T1=1e-6)
systemParams.add_sub_system(qubit)
systemParams.add_control_ham(inphase = Hamiltonian(0.5*(qubit.loweringOp + qubit.raisingOp)), quadrature = Hamiltonian(0.5*(-1j*qubit.loweringOp + 1j*qubit.raisingOp)))
systemParams.add_control_ham(inphase = Hamiltonian(0.5*(qubit.loweringOp + qubit.raisingOp)), quadrature = Hamiltonian(0.5*(-1j*qubit.loweringOp + 1j*qubit.raisingOp)))
systemParams.measurement = qubit.pauliZ
systemParams.create_full_Ham()
#Define the initial state as the ground state
rhoIn = qubit.levelProjector(0)
#First the basic sequence
basePulseSeq = PulseSequence()
basePulseSeq.add_control_line(freq=0e9, phase=0)
basePulseSeq.add_control_line(freq=0e9, phase=pi/2)
basePulseSeq.H_int = None
#Some parameters for the pulse
timeStep = 1.0/1.2e9
#How many discrete timesteps to break it up into
stepsArray = np.arange(12,61)
# stepsArray = np.arange(24,121)
'''
Test a square Hadamard pulse.
'''
#Setup the pulseSequences
#pulseSeqs = []
#
#for numSteps in stepsArray:
# tmpPulseSeq = deepcopy(basePulseSeq)
# timePts = np.arange(0, numSteps*timeStep, timeStep)
# calScale = 0.5/np.sqrt(2)/np.sum(timeStep*np.ones(numSteps))
# #First order Trotter
## phaseStep = pi/np.sqrt(2)/numSteps
## phaseRamp = phaseStep*(np.arange(numSteps))
# #Second order Trotter
# phaseStep = pi/np.sqrt(2)/numSteps/2
# phaseRamp = phaseStep*(2*np.arange(numSteps)+1)
# complexPulse = calScale*np.exp(-1j*phaseRamp)
# tmpPulseSeq.controlAmps = np.vstack((complexPulse.real, complexPulse.imag))
# tmpPulseSeq.timeSteps = timeStep*np.ones(numSteps)
#
# pulseSeqs.append(tmpPulseSeq)
'''
Test a Gaussian ampitude profile (with potential DRAG correction).
'''
pulseSeqs = []
phaseCorrArr = []
for numSteps in stepsArray:
tmpPulseSeq = deepcopy(basePulseSeq)
xPts = np.linspace(-2,2,numSteps)
gaussPulse = np.exp(-0.5*(xPts**2)) - np.exp(-0.5*2**2)
DRAGPulse = -0*(1/2/2/pi/qubit.delta)*(4.0/(numSteps*timeStep))*(-xPts*np.exp(-0.5*(xPts**2)))
timePts = np.arange(0, numSteps*timeStep, timeStep)
calScale = 0.5/np.sqrt(2)/np.sum(timeStep*gaussPulse)
phaseSteps = 2*pi*calScale*timeStep*gaussPulse
#Optional Z DRAG correction
# phaseSteps += 1*-0.5*(1/2/pi/qubit.delta)*timeStep*(2*pi*calScale*gaussPulse)**2
phaseRamp = np.cumsum(phaseSteps) - phaseSteps/2
phaseCorrArr.append(np.sum(phaseSteps))
complexPulse = calScale*(gaussPulse+1j*DRAGPulse)*np.exp(-1j*phaseRamp)
tmpPulseSeq.controlAmps = np.vstack((complexPulse.real, complexPulse.imag))
tmpPulseSeq.timeSteps = timeStep*np.ones(numSteps)
pulseSeqs.append(tmpPulseSeq)
results = simulate_sequence_stack(pulseSeqs, systemParams, rhoIn, simType='unitary')
#Single qubit paulis
X = np.array([[0, 1],[1, 0]])
Y = np.array([[0, -1j],[1j, 0]])
Z = np.array([[1, 0],[0, -1]]);
I = np.eye(2)
UgoalQ = expm(-1j*(pi/2)*(1/np.sqrt(2))*(X+Z))
Ugoal = np.zeros((qubit.dim,qubit.dim), dtype=np.complex128)
Ugoal[0:2,0:2] = UgoalQ
#Ugoal = qubit.pauliX
#phaseCorr = pi/np.sqrt(2)
#fidelity = [np.abs(np.trace(np.dot(Ugoal.conj().transpose(), np.dot(expm(-1j*(phaseCorr/2)*qubit.pauliZ),tmpU))))**2/4 for tmpU in results[1]]
fidelity = [np.abs(np.trace(np.dot(Ugoal.conj().transpose(), np.dot(expm(-1j*(phaseCorr/2)*qubit.pauliZ),tmpU))))**2/4 for (tmpU,phaseCorr) in zip(results[1],phaseCorrArr)]
print(1-fidelity[-1])
plt.figure()
plt.semilogy(stepsArray*timeStep,1-np.array(fidelity))
plt.xlabel('Length of Pulse')
plt.ylabel('Gate Error (after frame-correction)')
#plt.savefig('/home/cryan/Desktop/junk.pdf')
plt.show()
plt.figure()
plt.semilogy(timeStep*stepsArray, 1-fidelities_noDrag)
plt.semilogy(timeStep*stepsArray, 1-fidelities_ZDrag)
plt.semilogy(0.5*timeStep*np.arange(24,121), 1-fidelities_ZDragDDR)
plt.semilogy(timeStep*stepsArray, 1-fidelities_ZDragX180)
plt.legend(('Hadamard, No DRAG', 'Hadamard, Z DRAG', 'Hadamard, Z DRAG, DDR', 'X180, Z DRAG'))
plt.xlabel('Pulse Length (s)', fontsize=14)
plt.ylabel('Gate Fidelity', fontsize=14)
plt.title('Hadamard Atomic Clifford Gate Fidelities', fontsize=16)
plt.savefig('AtomicHadamard.svg')
plt.show()
|
BBN-Q/PySimulator
|
scripts/AtomicCliffords.py
|
Python
|
apache-2.0
| 4,760
|
[
"Gaussian"
] |
f198d11e223e2e2dcd8e94d42e54b144220b74223bd46a13051eeb2dc21b47b1
|
import json
import time
import logging
import msgpack
import traceback
import collections
import synapse.link as s_link
import synapse.async as s_async
import synapse.daemon as s_daemon
import synapse.aspects as s_aspects
import synapse.dyndeps as s_dyndeps
import synapse.eventbus as s_eventbus
import synapse.telepath as s_telepath
import synapse.datamodel as s_datamodel
import synapse.lib.sched as s_sched
import synapse.lib.cache as s_cache
import synapse.lib.socket as s_socket
from synapse.common import *
from synapse.lib.threads import firethread
import synapse.impulse as s_impulse
from synapse.eventbus import EventBus
logger = logging.getLogger(__name__)
# FIXME maybe should go in neuron link protocol / relay?
class NeuSock(s_socket.Socket):
def __init__(self, neur, dest):
s_socket.Socket.__init__(self, None)
self.neur = neur
self.dest = dest
def tx(self, mesg):
self.neur.route( self.dest, mesg, dsid=self.dsid)
class Neuron(s_daemon.Daemon):
'''
Neurons implement a peer-to-peer network mesh using any
available synapse link protocol. A neuron mesh provides service
discovery and RMI transport to allow clients to reach services
and APIs via the application layer mesh.
'''
def __init__(self, core=None, pool=None):
s_daemon.Daemon.__init__(self, core=core, pool=pool)
self.sched = s_sched.Sched()
self.model = self.core.genDataModel()
self.model.addTufoForm('neuron')
self.model.addTufoProp('neuron','name')
self.model.addTufoProp('neuron','super',ptype='int',defval=0)
self.model.addTufoProp('neuron','usepki', ptype='bool', defval=0)
self.neur = self.core.formTufoByProp('neuron','self')
self.iden = self.neur[0]
self.mesh = {}
self.peers = {}
self.mesh['certs'] = {}
self.mesh['links'] = {}
self.mesh['peers'] = { self.iden:self.neur }
self.sockbyfrom = s_cache.Cache(maxtime=120)
self.sockbyfrom.setOnMiss( self._getFakeSock )
self.links = collections.defaultdict(set)
self.linkpaths = s_cache.Cache(maxtime=30)
self.linkpaths.setOnMiss( self._getLinkPath )
self.setMesgFunc('peer:syn', self._onPeerSynMesg )
self.setMesgFunc('peer:synack', self._onPeerSynAckMesg )
self.setMesgFunc('peer:link:init', self._onPeerLinkInitMesg )
#self.setMesgFunc('neu:peer:chal', self._onNeuPeerChal )
#self.setMesgFunc('neu:peer:resp', self._onNeuPeerResp )
self.setMesgFunc('neu:data', self._onNeuDataMesg )
self.setMesgFunc('neu:storm', self._onNeuStormMesg )
self.on('neu:link:init', self._onNeuLinkInit)
self.on('neu:link:fini', self._onNeuLinkFini)
self.share('neuron',self)
self.hasopt = {}
# fire any persistent neuron listeners
for url in self.core.getTufoList(self.neur, 'listen'):
try:
self.listen(url)
self.hasopt[ ('listen',url) ] = True
except Exception as e:
logger.error('neu listen: %s', e)
# spin up any persistent neuron connections
for url in self.core.getTufoList(self.neur, 'connect'):
try:
self.connect(url)
self.hasopt[ ('connect',url) ] = True
except Exception as e:
logger.error('neu connect: %s', e)
# load any persistent shared objects
for jsval in self.core.getTufoList(self.neur, 'shared'):
try:
info = json.loads(v)
name = info.get('name')
task = info.get('task')
tags = info.get('tags',())
item = s_dyndeps.runDynTask(task)
self.share(name,item,tags=tags)
except Exception as e:
logger.error('neu share: %s', e)
def setNeuProp(self, prop, valu):
'''
Set a property for this neuron.
Example:
neu.setNeuProp('name','woot0')
'''
self.core.setTufoProp(self.neur,prop,valu)
# FIXME AUTH / PERMS
# FIXME send peer update
def getNeuProp(self, prop):
'''
Return a property for this neuron.
'''
return self.neur[1].get(prop)
#def addShareTask(self, name, task, tags=()):
#'''
#Add a 'task' tufo to initialize during Neuron startup.
#'''
def addNeuShare(self, name, task, tags=()):
'''
Add a shared object to the neuron
Example:
task = ('synapse.cortex.openurl', ('ram:///',), {})
neu.addNeuShare('hehe', task, tags=('foo.bar.baz',))
'''
item = s_dyndeps.runDynTask(task)
self.share(name,item,tags=tags)
jsinfo = dict(name=name, task=task, tags=tags)
self.core.addTufoList(self.neur,'shared', json.dumps(jsinfo) )
def addNeuListen(self, url):
'''
Add a link listen url to the neuron
Example:
neu.addNeuListen('tcp://0.0.0.0:9999')
'''
if self.hasopt.get(('listen',url)):
raise DupOpt('listen: %s' % url)
self.listen(url)
self.core.addTufoList(self.neur,'listen',url)
def addNeuConnect(self, url):
'''
Add a link connect url to the neuron
Example:
neu.addNeuConnect('tcp://mesh.kenshoto.com:9999')
'''
if self.hasopt.get(('connect',url)):
raise DupOpt('connect: %s' % url)
self.connect(url)
self.core.addTufoList(self.neur,'connect',url)
def _getFakeSock(self, idensid):
iden,sid = idensid
return NeuSock(self,iden,sid)
def _sendPeerSyn(self, sock):
# only do this once per socket..
if sock.get('peer:chal'):
return
# generate and save a challenge blob for the peer
chal = os.urandom(16)
sock.set('peer:chal', chal)
# transmit our peer:syn message with challenge blob
sock.tx( tufo('peer:syn', chal=chal) )
def _onPeerSynMesg(self, sock, mesg):
'''
Handle peer:syn message to become a neuron peer.
'''
sign = None
chal = mesg[1].get('chal')
if chal != None:
sign = self.pki.genByteSign(self.iden, chal)
cert = self.pki.getTokenCert(self.iden)
sock.tx(tufo('peer:synack', iden=self.iden, mesh=self.mesh, cert=cert, sign=sign))
# this will not send if we've already done so...
self._sendPeerSyn(sock)
def _onPeerSynAckMesg(self, sock, mesg):
'''
Handle a peer:synack hello from a peer we connected to.
'''
chal = sock.get('peer:chal')
if chal == None:
# we didn't send a chal... newp!
return
iden = mesg[1].get('iden')
mesh = mesg[1].get('mesh')
sign = mesg[1].get('sign')
cert = mesg[1].get('cert')
if cert != None:
self.pki.loadCertToken(cert, save=True)
if self.neur[1].get('neuron:usepki'):
if not self.pki.isValidSign(iden, sign, chal):
return
self.mesh['certs'][iden] = cert
# Inform the requestor that we have accepted them as a peer
self._syncMeshDict(mesh)
self._setPeerSock(iden,sock)
# this must be after mesh dict updates!
self._genMeshLinkStorm(sock)
def _onPeerLinkInitMesg(self, sock, mesg):
# peer:syn->peer:synack->peer:link:init
# the peer:link message confirms that the sender
# has accepted us as a peer so we may begin sending
# storm/data messages.
sock.set('peer:link',True)
self._genMeshLinkStorm(sock)
def _syncMeshDict(self, mesh):
'''
Ingest the mesh dict from a peer
'''
for iden,cert in mesh.get('certs',{}).items():
if cert != None:
self.pki.loadCertToken(cert, save=False)
for iden,peer in mesh.get('peers',{}).items():
if self.mesh['peers'].get(iden) == None:
self.mesh['peers'][iden] = peer
for (iden1,iden2),info in mesh.get('links',{}).items():
self.addPathLink(iden1,iden2,**info)
def _genMeshLinkStorm(self, sock):
# check if the sock has been mutually peer'd yet
# and possibly make the storm() announce if so.
# do we think he's a peer yet?
iden = sock.get('peer:peer')
if iden == None:
return False
# does he think we're a peer yet?
if not sock.get('peer:link'):
return False
mesh = self.getMeshDict()
cert = self.pki.getTokenCert(self.iden)
self.storm('neu:link:init', link=(self.iden,iden), cert=cert, mesh=mesh)
def _setPeerSock(self, iden, sock):
'''
Record that a peer is on the other end of sock.
'''
# we can use this to ensure messages are from peer socks
self.peers[iden] = sock
sock.set('peer:peer',iden)
def onfini():
# FIXME peer lock and check if it's our sock?
self.peers.pop(iden,None)
self.storm('neu:link:fini', link=(self.iden,iden))
sock.onfini(onfini)
sock.tx( tufo('peer:link:init') )
#self.storm('neu:link:init', link=(self.iden,iden))
def _onNeuDataMesg(self, sock, mesg):
'''
Handle neu:data message ( most likely routing )
'''
# This message is only valid from peer socks
if not sock.get('peer:peer'):
return
off = mesg[1].get('off') + 1
path = mesg[1].get('path')
# If we're not there yet, route it along...
if len(path) > off:
dest = path[off]
mesg[1]['off'] = off
peer = self.peers.get(dest)
if peer == None:
# FIXME SEND ERROR BACK
return
peer.tx( mesg )
return
# we are the final hop
byts = mesg[1].get('byts')
newm = msgunpack(byts)
jid = mesg[1].get('jid')
ssid = mesg[1].get('ssid')
dsid = mesg[1].get('dsid')
if dsid != None:
sess = self.cura.getSessBySid(dsid)
if sess == None: # the session is gone/expired...
# FIXME SEND ERROR BACK
return
sess.dist(newm)
return
ssid = mesg[1].get('ssid')
sock = self.sockbyfrom.get( (path[0],ssid) )
self._runLinkSockMesg( tufo('link:sock:mesg', sock=sock, mesg=newm) )
def getPeerTufo(self):
'''
Return the "peer tufo" for this neuron.
Example:
peer = neu.getPeerTufo()
'''
return self.neur
def getMeshDict(self):
'''
Return this neurons knowledge of the state of the mesh.
Example:
mesh = neu.getMeshDict()
'''
return self.mesh
def getModelDict(self):
'''
Return the DataModel() dict for this neuron's cortex.
Example:
moddef = neu.getModelDict()
'''
return self.model.getModelDict()
def ping(self):
'''
Retrieve vital stats for a neuron ( and gauge RTT ).
'''
return {'iden':self.iden}
def getPathTrees(self):
'''
Return a list of trees for shortest path broadcast.
'''
done = set([self.iden])
trees = [ (i,[]) for i in self.links[self.iden] ]
todo = list(trees)
while todo:
node = todo.pop()
for iden in list(self.links[ node[0] ]):
if iden in done:
continue
done.add(iden)
newn = (iden,[])
todo.append(newn)
node[1].append(newn)
return trees
def getPathLinks(self):
'''
Return a list of (id1,id2) tuples for each known link.
'''
links = []
for iden,peers in self.links.items():
links.extend( [ (iden,p) for p in peers ] )
return links
def addPathLink(self, iden1, iden2, **info):
'''
Add a link to the known paths.
'''
self.links[iden1].add(iden2)
self.mesh['links'][ (iden1,iden2) ] = info
def delPathLink(self, iden1, iden2):
'''
Delete a known mesh path link.
'''
self.links[iden1].discard(iden2)
self.mesh['links'].pop( (iden1,iden2), None )
def addPathLinks(self, links):
'''
Add multiple (id1,id2) link tuples.
'''
[ self.addPathLink(iden1,iden2) for (iden1,iden2) in links ]
def getLinkPath(self, iden1, iden2):
'''
Find the shortest path from iden1 to iden2
'''
return self.linkpaths.get( (iden1,iden2) )
def _getLinkPath(self, identup ):
iden1, iden2 = identup
todo = [ [iden1] ]
done = set()
while todo:
path = todo.pop()
for iden in self.links[ path[-1] ]:
if iden in done:
continue
done.add(iden)
if iden == iden2:
path.append(iden)
return path
todo.append( path + [ iden ] )
def connect(self, url, **opts):
'''
Connect to a peer neuron
This will attempt to bring up a permanent connection
and reconnect if it is torn down.
'''
if self.isfini:
return
link = s_link.chopLinkUrl(url)
link[1].update(opts)
relay = s_link.getLinkRelay(link)
sock = relay.connect()
if sock == None:
self.sched.insec( 1, self.connect, url, **opts )
return None
self.runPlexSock(sock)
sock.tx( tufo('peer:syn', iden=self.iden, mesh=self.mesh) )
def runPlexSock(self, sock):
'''
Begin handling the given socket using the Plex().
'''
sock.on('link:sock:mesg', self._onLinkSockMesg )
self.plex.addPlexSock(sock)
self.fire('link:sock:init', sock=sock)
def storm(self, evt, **evtinfo):
'''
Send an event to all the boxes in the mesh.
'''
mesg = (evt,evtinfo)
self.dist(mesg)
byts = msgenpack(mesg)
sign = None
if self.neur[1].get('neuron:usepki'):
sign = self.pki.genByteSign(self.iden, byts)
cert = self.pki.getTokenCert(self.iden)
trees = self.getPathTrees()
for tree in trees:
self.relay(tree[0], tufo('neu:storm', tree=tree, byts=byts, sign=sign, iden=self.iden, cert=cert))
return trees
def relay(self, dest, mesg):
'''
Send a message to an adjacent neuron peer.
'''
sock = self.peers.get(dest)
if sock == None:
return False
sock.tx(mesg)
return True
def route(self, iden, mesg, dsid=None):
'''
Route the given message to the dest neuron.
Optionally specify dsid to deliver to a session
connected to the remote neuron...
'''
ssid = None
sess = s_session.current()
if sess != None:
ssid = sess.sid
path = self.getLinkPath(self.iden, iden)
if path == None:
raise NoSuchPeer(iden)
byts = msgenpack( mesg )
nhop = path[1]
data = tufo('neu:data', path=path, off=1, byts=byts, ssid=sess.sid, dsid=dsid)
self.relay(nhop,data)
def _onNeuLinkInit(self, event):
iden0,iden1 = event[1].get('link')
self.addPathLink(iden0,iden1)
self.addPathLink(iden1,iden0)
cert = event[1].get('cert')
if cert != None:
self.pki.loadCertToken(cert, save=True)
mesh = event[1].get('mesh')
if mesh != None:
self._syncMeshDict(mesh)
def _onNeuLinkFini(self, event):
iden0,iden1 = event[1].get('link')
self.delPathLink(iden0,iden1)
self.delPathLink(iden1,iden0)
def _onNeuStormMesg(self, sock, mesg):
# This message is only valid from peer socks
if not sock.get('peer:peer'):
return
# if they embedded a cert, check/load it
cert = mesg[1].get('cert')
if cert != None:
print("STORM CERT: %r" % (self.pki.loadCertToken(cert),))
tree = mesg[1].get('tree')
byts = mesg[1].get('byts')
sign = mesg[1].get('sign')
iden = mesg[1].get('iden')
if self.neur[1].get('neuron:usepki'):
if not self.pki.isValidSign(iden, sign, byts):
print('NEWP2 %s %r' % (iden,sign))
print('NEWP2 TOKN %r' % (self.pki.getTokenTufo(iden),))
print('NEWP2 MESG: %r' % (msgunpack(byts),) )
return
self.dist( msgunpack(byts) )
for newt in tree[1]:
self.relay(newt[0], tufo('neu:storm', tree=newt, byts=byts, iden=iden, sign=sign))
|
imjonsnooow/synapse
|
synapse/neuron.py
|
Python
|
apache-2.0
| 17,207
|
[
"NEURON"
] |
9969585a2633bd8ab31361e2c716b9bd6c4dd383409e23a25b4bf8c1b76afcf5
|
# The test code for Le Troter's sulcus pruning algorithm based on path probability
import cPickle
import pyvtk
def load_fundi(Filename):
'''Load fundi from a VTK file
Returns
==========
Fundi : list of 2-tuples of integers
Each element is a line segment in fundi, could belong to any component
Vertexes: list of 3-tuples of floats
Each element is a vertex's coordinates
'''
VTKReader = pyvtk.VtkData(Filename)
Fundi = VTKReader.structure.lines
Vertexes = VTKReader.structure.points
return Fundi, Vertexes
def load_component(Filename):
'''Load vertex components of a hemisphere
Returns
==========
VrtxCmpnts : list of lists of integers
each element of VrtxCmpnt is a list of vertexes that are in the same sulcal component
'''
Fp = open(Filename, 'r')
VrtxCmpnt = cPickle.load(Fp)
Fp.close()
return VrtxCmpnt
def fundus_per_component(VrtxCmpnts, Fundi):
'''Return fundus vertexes within each component
Parameters
===========
VrtxCmpnts : list of lists of integers
each element of VrtxCmpnt is a list of vertexes that are in the same sulcal component
Fundi : list of 2-tuples of integers
Each element is a line segment in fundi, could belong to any component
Returns
=========
Fundus_per_cmp : list of lists of 2-tuples of integers
Each element is a list of fundus line segments within the same component.
Le Troter's algorithm runs on each of this.
'''
Fundus_per_cmp = [[] for i in xrange(0, len(VrtxCmpnts)) ]
for Fundus in Fundi:
[V1, V2] = Fundus # V1 and V2 must belong to the same component
for CmpntID, Cmpnt in enumerate(VrtxCmpnts):
if V1 in Cmpnt:
Fundus_per_cmp[CmpntID].append(Fundus)
return Fundus_per_cmp
def other(Tuple, Element):
'''Given a 2-tuple and one element, return the other element.
'''
if Element == Tuple[0]:
return Tuple[1]
else:
return Tuple[0]
def init_graph(Fundus_per_cmp):
'''Initialize the graph for running le troter's algorithm
Parameters
===========
Fundus_per_cmp : list of lists of 2-tuples of integers
Each element is a list of fundus line segments within the same component.
Degree : dictionary of integer keys and integer values
Keys are nodes in initial graph and values are their degrees
Merged : Boolean
True is a short segment is merged with another short segment
Nbr : a dictionary
key is node ID and value is IDs of neighbors of the node
Graph : a 4-tuple of 2 lists and 2 dictionaries
The 1st list includes all nodes in the initial graph,
while the 2nd list includes all edges in the initial graph.
The 1st list is a list of integers and the 2nd is a list of 2-tuples of integers.
The 3rd element is a *Degree* dictionary
The 4th element is an *Nbr* dictionary
Edge_Comp : list of 2-tuples of tuples of integers
Each element is a 2-tuple. The first is a 2-tuple containing two vertexes on the original mesh.
During the running of this function, those two vertexes are terminal nodes of an edge under expansion.
After running this function, those two vertexes are nodes on initial graph.
The second is a list of integers representing a path on original mesh forming the expanding edge specified by the first tuple element.
Returns
=========
Graphs : list of Graph
Each element corresponds to one Graph of a sulcus patch/component
Notes
=========
In the initial graph, there should be no node of the degree 2.
How do we build edges for initial graph? We start with original segments on the mesh.
For any two connected segments, if their joint has a degree of 2, replace them by a new edge spanning over their non-joint vertexes.
E.g., given A-B and B-C, replace them by A-C.
'''
def find_Comp(Edge_Comp, Edge):
'''The nested function that returns the path forming edge (N1, N2)
'''
for Comp in Edge_Comp:
if Comp[0] == Edge:
return Comp[1]
print "cannot find such Edge in Edge_Comp", Edge, Edge_Comp
return -1
Graphs = []
for Fundus in Fundus_per_cmp:
if len(Graphs) ==5:
print "Check the creation of pgraph 6"
Nodes, Edges = [], []
Degree, Nbr = {}, {}
Edge_Comp = []
# Get all nodes in initial graph
for Segment in Fundus:
Nodes += Segment
Nodes = list(set(Nodes))
# End of Get all nodes in initial graph
# Get all edges in initial graph
# 1. Find all nodes of degree 1 and greater than 2.
for Node in Nodes:
Degree[Node] = 0
for Segment in Fundus:
[V1, V2] = Segment
Degree[V1] += 1
Degree[V2] += 1
# 2. extend small segments into edges
for Segment in Fundus:
[V1, V2] = Segment
Merged = False
if Degree[V1] == 2: # find another edge who has V1.
for EdgeID, Edge in enumerate(Edges):
if V1 in Edge:
Edges[EdgeID] = [other(Edge, V1), V2]
Merged = True
Old_Path = find_Comp(Edge_Comp, Edge)
# print Old_Path
if V1 == Old_Path[0]:
New_Path = [V2] + Old_Path
elif V1 == Old_Path[-1]:
New_Path = Old_Path + [V2]
else:
print "Error"
exit()
New_Edge = [other(Edge, V1), V2]
Edge_Comp.remove([Edge, Old_Path])
Edge_Comp.append([New_Edge, New_Path])
break
elif Degree[V2] == 2: # find another edge who has V1.
for EdgeID, Edge in enumerate(Edges):
if V2 in Edge:
Edges[EdgeID] = [V1, other(Edge, V2)]
Merged = True
Old_Path = find_Comp(Edge_Comp, Edge)
if V2 == Old_Path[0]:
New_Path = [V1] + Old_Path
elif V2 == Old_Path[-1]:
New_Path = Old_Path + [V1]
else:
print "Error"
exit()
New_Edge = [V1, other(Edge, V2)]
Edge_Comp.remove([Edge, Old_Path])
Edge_Comp.append([New_Edge, New_Path])
break
if not Merged: # there is not edge currently in Edges that shares terminal vertexes with this Segment
Edges.append(Segment)
Edge_Comp.append([Segment, Segment])
# End of Get all edges in initial graph
# 3. Get nodes and Degrees for initial graph
NewNodes = []
for Edge in Edges:
NewNodes += Edge
NewNodes = list(set(NewNodes))
NewDegree = {}
for Node in NewNodes:
NewDegree[Node] = 0
for Edge in Edges:
[V1, V2] = Edge
NewDegree[V1] += 1
NewDegree[V2] += 1
# 4. generate neighbor list for initial graph
for Node in NewNodes:
Nbr[Node] = []
for Edge in Edges:
# print Edge
[V1, V2] = Edge
Nbr[V1].append(V2)
Nbr[V2].append(V1)
# End of generate neighbor list for initial graph
Graph = [NewNodes, Edges, NewDegree, Nbr, Edge_Comp]
Graphs.append(Graph)
return Graphs
def path_prob2(Graphs):
'''Compute path probability using Dijkstra algorithm
'''
pass
def path_prob(Graphs):
'''Compute path probability using my own algorithm
Parameters
===========
Graphs : list of Graph
Each element corresponds to one *Graph* of a sulcus patch/component
See definition of the same variable in function init_graph
UnAssigned : queue of integers
A queue of nodes who has not been assigned path probability (=0)
Degree_1 : integer
number of nodes of degree 1 in a graph.
Returns
========
Path_Prob : dictionary
keys are node ID and values are their path probability
Notes
=======
For temporary visualization purpose, we assign edge probability to all fundus vertexes on the edge.
'''
Path_prob = {}
for Graph in Graphs:
import collections
UnAssigned = collections.deque([])
Path_prob_local = {} # Path_prob for nodes in this graph
[Nodes, Edges, Degree, Nbr, Edge_Comp] = Graph
Counter = 0 # number of iteration has been run on this graph
# Degree_1= 0
# for Node in Nodes:
# if Degree[Node] == 1:
# Degree_1 += 1
# 1. initialize Path_prob
for Node in Nodes:
if Degree[Node] == 1:
Path_prob_local[Node] = 1 #it is debating whether to let it be len(Nodes) or Degree_1 or just 1.
else:
Path_prob_local[Node] = 0
UnAssigned.append(Node)
# 2. assigned Path_prob to nodes of degree greater than 3
while len(UnAssigned) >0:
Candidate = UnAssigned.popleft() # pick a node from the list of unassigned nodes
Counter += 1
Some_UnAssigned = False # a flag denoting whether some lower-degree neighbors of Candidate are not assigned
for Node in Nbr[Candidate]:
if Node in list(UnAssigned) and Degree[Node] < Degree[Candidate]:
Some_UnAssigned = True
break
# UnAssigned.remove(Candidate)
if not Some_UnAssigned:
for Node in Nbr[Candidate]:
Path_prob_local[Candidate] += Path_prob_local[Node]
else: # put Candidate to the back of the UnAssigned list
UnAssigned.append(Candidate)
# print UnAssigned
if Counter > len(Nodes)*3:
print "Too many iterations "
break
# 3. Assign path probability to vertexes between any pair of nodes (i.e., edges)
# This step is only for visualization.
for Comp in Edge_Comp:
[N1, N2] = Comp[0]
Path = Comp[1]
# print Path
Avg_Prob = (Path_prob_local[N1] + Path_prob_local[N2])/2.0
for i in xrange(1,len(Path)-1):
Path_prob_local[Path[i]] = Avg_Prob
Path_prob.update(Path_prob_local)
return Path_prob
def Prob2File(Path_prob, Fundi, Vertexes):
'''Visualize Path Probability and other debugging information to fundi
'''
Structure = pyvtk.PolyData(points=Vertexes, lines=Fundi)
# convert dictionary-type Path_prob into a list for pyvtk to use
Path_prob_list = [0 for i in xrange(len(Vertexes))]
for Key,Value in Path_prob.iteritems():
Path_prob_list[Key] = Value
Pointdata = pyvtk.PointData(pyvtk.Scalars(Path_prob_list,name='Path_Prob'))
# pyvtk.Scalars(Parts,name='hierarchy'))
pyvtk.VtkData(Structure, Pointdata).tofile('path_prob_test.vtk','ascii')
def prune(Graphs, Path_prob):
'''Prune fundi according to path probability
We prune branches (one end has degree of 1) in ascending order of path probability,
until there is no branching nodes on the fundi.
Parameters
============
Graphs : list of Graph
Each element corresponds to one Graph of a sulcus patch/component
Check the output from function init_graph
Path_prob : dictionary
keys are node ID and values are their path probability
Return
==========
NewGraphs : list of Graph
Same structure as Graphs, but some branches are removed.
Notes
=======
2012-05-12 Currently we may not prune enough.
'''
NewGraphs = list(Graphs) # deep copy, branch remove on this.
for GraphID, Graph in enumerate(Graphs):
if GraphID == 6:
print "debug point"
[Nodes, Edges, Degree, Nbr, Edge_Comp] = Graph
if Nodes == []: # this graph/component has no fundus
continue
# 1. form a dictionary of Path_prob for nodes on initial graph only.
Path_prob_local = {}
for Node in Nodes:
Path_prob_local[Node] = Path_prob[Node]
# 2. sort the local Path_prob dict and form a stack of Nodes,
# where the node of high path prob are in the bottom.
from operator import itemgetter
Sorted_Path_prob_list = sorted(Path_prob_local.items(),key=itemgetter(1))
from collections import deque
Exam = deque([])
for Pair in Sorted_Path_prob_list:
if Degree[Pair[0]] ==1:
Exam.append(Pair[0])
# 3. Form a list of nodes whose degree is greater than 2.
Forks = []
for Node in Nodes:
if Degree[Node] > 2:
Forks.append(Node)
if len(Forks) == 1:
continue
# 4. remove nodes until this graph has no node of degree greater than 2
Counter = 0
while len(Forks) > 0:
Candidate = Exam.popleft()
for Edge in Edges:
if Candidate in Edge:
NewGraphs[GraphID][1].remove(Edge) # 1 is Edges
Fork = other(Edge, Candidate)
NewGraphs[GraphID][2][Fork] -= 1 # 2 is Degree
if NewGraphs[GraphID][2][Fork] <3: # should be < 3
Forks.remove(Fork)
if NewGraphs[GraphID][2][Fork] ==1:
Exam.append(Fork)
break
Counter += 1
if Counter > len(Nodes)+200:
print "Something may go wrong in pruning"
break
return NewGraphs
def getComp(Edge_Comp, Edge):
'''Return components of an edge
Parameters
============
Edge_Comp : list of 2-tuples of tuples of integers
Each element is a 2-tuple. The first is a 2-tuple containing two vertexes on the original mesh.
During the running of this function, those two vertexes are terminal nodes of an edge under expansion.
After running this function, those two vertexes are nodes on initial graph.
The second is a list of integers representing a path on original mesh forming the expanding edge specified by the first tuple element.
Edge : list of 2 integers
Returns
===========
Comp : list of integers
'''
for [Edge_s, Comp] in Edge_Comp:
if Edge_s == Edge:
return Comp
return -1
def graph2File(Graphs,Vertexes):
'''Write Graphs to a file for visualizing pre- or post-prunning fundi.
'''
Lines = []
for Graph in Graphs:
[Nodes, Edges, Degree, Nbr, Edge_Comp] = Graph
for Edge in Edges:
Comp = getComp(Edge_Comp, Edge)
for i in xrange(len(Comp)-1):
Lines.append([Comp[i], Comp[i+1]])
Structure = pyvtk.PolyData(points=Vertexes, lines=Lines)
# convert dictionary-type Path_prob into a list for pyvtk to use
pyvtk.VtkData(Structure).tofile('new_fundi.vtk','ascii')
def test_run(CmpntFile, FundiFile):
'''Run letroter algorithm on a test hemihsphere
'''
Fundi, Vertexes = load_fundi(FundiFile)
VrtxCmpnts = load_component(CmpntFile)
Fundus_per_cmp = fundus_per_component(VrtxCmpnts, Fundi)
Graphs = init_graph(Fundus_per_cmp)
Path_prob = path_prob(Graphs)
# Prob2File(Path_prob, Fundi, Vertexes)
NewGraphs = prune(Graphs, Path_prob)
graph2File(Graphs, Vertexes)
return Path_prob, Graphs, NewGraphs
import sys
Path_prob, Graphs, NewGraphs = test_run(sys.argv[1], sys.argv[2])
|
binarybottle/mindboggle_sidelined
|
fundi_from_pits/letroter.py
|
Python
|
apache-2.0
| 17,309
|
[
"VTK"
] |
53ce04785b9bb0a48634f1be259dfd7495232bab9e2ea4c76c1623cc96732e36
|
######################################################################################
# fitn2dgauss.py
# V.1.0.2 19 May 2015 - Typo fixed on residuals
# V.1.0.1 - 13 March 2015 - Emsellem
# Changed a few lines to simplify the functions and variables
# V.1.0.0 - 12 Mai 2014 - Emsellem
# Adding some options including the PSF convolution and PA following the original Lyon lib C pgm.
# V.0.9.8 - 12 August 2013 - Emsellem / Ginsburg
# Adding a printing function suggested by Adam Ginsburg
# Removed epsfcn in lmfit
# V.0.9.7 - 23 July 2013 - Emsellem
# Changing the residual function using the standard normalisation
# V.0.9.6 - 8 August 2012 - Emsellem
# Adding a function to filter Gaussians which are not relevant
# V.0.9.5 - 3 August 2012 - Emsellem
# Integrated all methods (mpfit, lmfit + NNLS or BVLS) in a single file
# V.0.9.2 - 27 June 2012 - Bois + Emsellem
# A few fixes from Maxime Bois, and small changes from Emsellem
# V.0.9.1 - 22 January 2012 - Emsellem
# Included np.log10 instead of log10
# V.0.9.0 - 22 January 2012 - Emsellem
# V.0.1.0 - 2010 - Emsellem
# Reshuffling and packaging from old python MGE routines
#
# Largely Inspired from gaussfiter.py
# written by Adam Ginsburg (adam.ginsburg@colorado.edu or keflavich@gmail.com) 3/17/08)
# latest version available at http://code.google.com/p/agpy/source/browse/trunk/agpy/gaussfitter.py
#
# Specific development of a N 2D Gaussian fitter # Eric Emsellem (eric.emsellem@eso.org)
# Basis for the Multi-Gaussian Expansion method as described in Emsellem et al. 1994
# and subsequent papers on the MGE technique
# Decoupling the linear and non-linear parts, following the
# algo fitting approach proposed by Michele Cappellari (see 2002 paper and available idl/python routines)
######################################################################################
import numpy as np
from numpy import exp, cos, sin, arctan, sqrt, pi, newaxis
from numpy import float64 as floatFit
## Use mpfit for the non-linear least-squares
from pygme.fitting.mpfit import mpfit
## Tring to import lmfit
try :
# If it works, can use lmfit version
# for the non-linear least squares
import lmfit
from lmfit import minimize, Parameters, Minimizer
Exist_LMFIT = True
except ImportError :
Exist_LMFIT = False
print "WARNING: Only mpfit is available as an optimiser"
print "WARNING: you may want to install lmfit"
from pygme.mge_miscfunctions import convert_xy_to_polar
#
# # If Openopt is there we can import it and allow the use to use BVLS
# # Otherwise we just use nnls which is faster anyway
# try :
# # If it works, can use openopt for
# # bound-constrained linear least squares (wrapper of BVLS)
# import openopt
# from openopt import LLSP
# Exist_OpenOpt = True
# except ImportError :
# Exist_OpenOpt = False
# print "WARNING: Only nnls is available as a linear method"
# Use scipy nnls to solve the linear part
from scipy.optimize import nnls
"""
Note (Ginsburg) about mpfit/leastsq:
I switched everything over to the Markwardt mpfit routine for a few reasons,
but foremost being the ability to set limits on parameters, not just force them
to be fixed. As far as I can tell, leastsq does not have that capability.
The version of mpfit I use can be found here:
http://code.google.com/p/agpy/source/browse/trunk/mpfit
This is included in the present distribution
Note (Emsellem): this is now also solved using lmfit which is based on leastsq from scipy
So two methods are implemented
"""
## Useful constants
twopi = 2.0 * np.pi
_default_parPSF = np.array([1.,0.])
## # ==========================================================================
## ## Extract the Gaussian parameters from the input pars
## # ==========================================================================
## def _extract_PSFParam(pars) :
## """
## Extract the PSF parameters from the original formatting
## Internal routine for fitn2gauss.py
## """
## pars = pars.ravel()
## if len(pars) % 2 == 0:
## ## Normalising the energy of the total PSF to 1
## ## Warning: Each Intensity is Imax for each PSF Gaussian
## I = pars[::2] / np.sum(pars[::2])
## sigma = pars[1::2]
## else:
## raise ValueError("Wrong array lengths!")
## return [1.], [0.]
## return I, sigma
##
# ==========================================================================
## Normalise the PSF given by Imax and sigmas
# ==========================================================================
def norm_PSFParam(pars) :
"""
Normalise the PSF parameters from the original formatting
Calculate the fraction (energy) of each Gaussian and normalise
by the sum of all PSF Gaussians
Internal routine for fitn2gauss.py
"""
pars = pars.ravel()
if len(pars) % 2 == 0:
## Normalising the energy of the total PSF to 1
## Warning: Each Intensity initially given is Imax for each PSF Gaussian
## So now we calculate the fraction of the energy taken by each Gaussian
## Hence the sum of intensities should be 1 after the normalisation
if np.sum(pars[::2] * pars[1::2]**2) == 0 :
pars[::2] = 1.
pars[1::2] = 0.
else :
pars[::2] = pars[::2] * pars[1::2]**2 / np.sum(pars[::2] * pars[1::2]**2)
else:
raise ValueError("Wrong array lengths!")
return np.array([1., 0.])
return pars
# ==========================================================================
## Returns a function which does the calculation, given a set of parameters,
## of the sum of Gaussians, the I being the maximum intensities (not normed)
# ==========================================================================
def n_centred_twodgaussian_Imax(pars, parPSF=_default_parPSF, I=None, q=None, sigma=None, pa=None):
"""
Returns a function that provides the sum
over N 2D gaussians, where N is the length of
I,q,sigma,pa *OR* N = len(pars) / 4
The background level is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - an array with len(pars) = 4n, assuming I, sigma, q, pa repeated n times
parPSF - an array with the parameters from the PSF (formatted as pars)
I, sigma, q, pa - alternative amplitudes, line widths, axis ratios, PA
if pars is not provided (PA in degrees).
"""
if pars is None :
if not(len(I) == len(q) == len(sigma) == len(pa)):
raise ValueError("Wrong array lengths! q: %i sigma: %i pa: %i" \
% (len(q), len(sigma), len(pa)))
else :
pars = pars.ravel()
if len(pars) % 4 == 0:
I = pars[::4]
sigma = pars[1::4]
q = pars[2::4]
pa = pars[3::4]
## Convolution of the sigmas with the PSF
ImaxPSF, sigmaPSF = parPSF[::2], parPSF[1::2]
nPSF = len(ImaxPSF)
sigmaX = np.sqrt((sigma**2)[:,newaxis] + sigmaPSF**2)
sigmaY = np.sqrt(((q * sigma)**2)[:,newaxis] + sigmaPSF**2)
Itot = (I * sigma**2 * q)[:,newaxis] * ImaxPSF[newaxis,:] / (sigmaX * sigmaY)
parad = np.radians(pa)
def g2d(r, theta):
v = np.zeros_like(r)
for i in xrange(len(q)):
angle = - parad[i] + theta + pi / 2.
v += np.sum([Itot[i,j] * exp( - 0.5 * r**2 * ((cos(angle)/sigmaX[i,j])**2 + (sin(angle) / sigmaY[i,j])**2)) for j in range(nPSF)], axis=0)
return v
return g2d
# ==========================================================================
## Returns a function which does the calculation, given a set of parameters
## of the sum of Gaussians, the I being the NORMED intensities
# ==========================================================================
def _n_centred_twodgaussian_Inorm(pars, parPSF=_default_parPSF, I=None, logsigma=None, q=None, pa=None):
"""
Returns a function that provides the sum
over N 2D gaussians, where N is the length of
I,q,sigma,pa *OR* N = len(pars) / 4
The background "height" is assumed to be zero (you must "baseline" your
spectrum before fitting)
pars - an array with len(pars) = 4n, assuming I, logsigma, q, pa repeated n times
I, logsigma, q, pa - alternative amplitudes, line widths (in log10), axis ratios, PA
if pars is not provided (PA in degrees).
"""
pars = pars.ravel()
I = pars[::4]
sigma = 10**(pars[1::4])
q = pars[2::4]
pa = pars[3::4]
## Convolution of the sigmas with the PSF
ImaxPSF, sigmaPSF = parPSF[::2], parPSF[1::2]
nPSF = len(ImaxPSF)
sigmaX = np.sqrt(sigma[:,newaxis]**2 + sigmaPSF**2)
sigmaY = np.sqrt(((q * sigma)**2)[:,newaxis] + sigmaPSF**2)
# Fgauss = twopi * sigmaX * sigmaY
# Itot = (I * sigma**2 * q)[:,newaxis] * ImaxPSF[newaxis,:] / (sigmaX * sigmaY)
Itot = ImaxPSF[newaxis,:] / (twopi * sigmaX * sigmaY)
parad = np.radians(pa)
def g2d(r, theta):
v = np.zeros_like(r)
for i in xrange(len(q)):
angle = - parad[i] + theta + pi / 2.
# v += np.sum([Itot[i,j] * exp( - 0.5 * r**2 * ((cos(angle)/sigmaX[i,j])**2 + (sin(angle) / sigmaY[i,j])**2)) / Fgauss[i,j] for j in range(nPSF)], axis=0)
v += np.sum([Itot[i,j] * exp( - 0.5 * r**2 * ((cos(angle)/sigmaX[i,j])**2 + (sin(angle) / sigmaY[i,j])**2)) for j in range(nPSF)], axis=0)
return v.ravel()
return g2d
# ==========================================================================
#---------- Centred (no offset) 2D Gaussian but without flux ---------------##
#---------- Returns a function which provide a set of N normalised Gaussians
#---------- But NORMALISED BY the data
# ==========================================================================
def _n_centred_twodgaussian_Datanorm(pars, parPSF=_default_parPSF, logsigma=None, q=None, pa=None):
"""
Returns a function that provides an array of N NORMALISED 2D gaussians,
where N is the length of q,sigma,pa *OR* N = len(pars) / 3. These
Gaussians are also normalised by an input data array
(the returned function takes 3 arguments: r, theta and data)
pars - an array with len(pars) = 3n, assuming q, logsigma, pa repeated n times
logsigma, q, pa - alternative line widths (in log10), axis ratios, PA
if pars is not provided (PA in degrees).
"""
pars = pars.ravel()
sigma = 10**(pars[::3])
q = pars[1::3]
pa = pars[2::3]
## Convolution of the sigmas with the PSF
ImaxPSF, sigmaPSF = parPSF[::2], parPSF[1::2]
nPSF = len(ImaxPSF)
sigmaX = np.sqrt(sigma[:,newaxis]**2 + sigmaPSF**2)
sigmaY = np.sqrt(((q * sigma)**2)[:,newaxis] + sigmaPSF**2)
# Fgauss = twopi * sigmaX * sigmaY
# Itot = (sigma**2 * q)[:,newaxis] * ImaxPSF[newaxis,:] / (sigmaX * sigmaY)
Itot = ImaxPSF[newaxis,:] / (twopi * sigmaX * sigmaY)
parad = np.radians(pa)
def g2d_datanorm(r, theta, data):
v = np.zeros((np.size(r),len(q)))
for i in xrange(len(q)):
angle = - parad[i] + theta + pi / 2. # in radians
# v[:,i] = np.sum([Itot[i,j] * exp( - 0.5 * r**2 * ((cos(angle)/sigmaX[i,j])**2 + (sin(angle) / sigmaY[i,j])**2) ) \
# / (Fgauss[i,j]) for j in range(nPSF)], axis=0) / data
v[:,i] = np.sum([Itot[i,j] * exp( - 0.5 * r**2 * ((cos(angle)/sigmaX[i,j])**2 + (sin(angle) / sigmaY[i,j])**2)) for j in range(nPSF)], axis=0) / data
return v
return g2d_datanorm
# ==========================================================================
##------ Find the best set of amplitudes for fixed q,sigma,pa ----- ##
##------ This is a linear bounded solution using BVLS
# ==========================================================================
def optimise_twodgaussian_amp_bvls(pars, parPSF=_default_parPSF, r=None, theta=None, data=None) :
"""
Returns the best set of amplitude for a given set of q,sigma,pa
for a set of N 2D Gaussian functions
The function returns the result of the BVLS solving by LLSP (openopt)
pars : input parameters including q, sigma, pa (in degrees)
r : radii
theta : angle for each point in radians
data : data to fit
data, theta and r should have the same size
"""
pars = pars.ravel()
ngauss = len(pars) / 3
## First get the normalised values from the gaussians
## We normalised this also to 1/data to have a sum = 1
nGnorm = _n_centred_twodgaussian_Datanorm(pars=pars, parPSF=parPSF)(r,theta,data)
## This is the vector we wish to get as close as possible
## The equation being : Sum_n In * (G2D_n) = 1.0
## or I x G = d
d = np.ones(np.size(r), dtype=floatFit)
## Lower and upper bounds (only positiveness)
lb = np.zeros(ngauss)
ub = lb + np.inf
## Set up LLSP with the parameters and data (no print)
parBVLS = LLSP(nGnorm, d, lb=lb, ub=ub, iprint=-1)
## Return the solution
sol_bvls = parBVLS.solve('bvls')
del parBVLS
return nGnorm, sol_bvls.xf
# ==========================================================================
##------ Find the best set of amplitudes for fixed q,sigma,pa ----- ##
##------ This is a linear bounded solution using NNLS
# ==========================================================================
def optimise_twodgaussian_amp_nnls(pars, parPSF=_default_parPSF, r=None, theta=None, data=None) :
"""
Returns the best set of amplitude for a given set of q,sigma,pa
for a set of N 2D Gaussian functions
The function returns the result of the NNLS solving (scipy)
pars : input parameters including q, sigma, pa (in degrees)
r : radii
theta : angle for each point in radians
data : data to fit
data, theta and r should have the same size
"""
pars = pars.ravel()
## First get the normalised values from the gaussians
## We normalised this also to 1/data to have a sum = 1
nGnorm = _n_centred_twodgaussian_Datanorm(pars=pars, parPSF=parPSF)(r,theta,data)
## This is the vector we wish to get as close as possible
## The equation being : Sum_n In * (G2D_n) = 1.0
## or I x G = d
d = np.ones(np.size(r), dtype=floatFit)
## Use NNLS to solve the linear bounded (0) equations
sol_nnls, norm_nnls = nnls(nGnorm, d)
return nGnorm, sol_nnls
################################################################################
## Find the best set of N 2D Gaussians whose sums to the input data
##
## This is a non-linear least squares problem,
## split into a non-linear one (q,sigma,pa) - solved with lmfit or mpfit
## and a linear one on the amplitude - solved with NNLS or BVLS
##
################################################################################
## Default values
default_minpars=[-3,0.05,-180.]
default_maxpars=[3.0,1.0,180.]
default_fixed=[False,False,True]
default_limitedmin=[True,True,True]
default_limitedmax=[True,True,True]
dic_linear_methods = {"nnls": optimise_twodgaussian_amp_nnls, "bvls": optimise_twodgaussian_amp_bvls}
def set_parameters_and_default(parlist, default, ngauss) :
"""
Set up the parameters given a default and a number of gaussians
Input is
parlist : the input parameters you wish to set
default : the default when needed for one gaussian
ngauss : the number of Gaussians
"""
## If the len of the parlist is the good one, then just keep it
if parlist is None : parlist = []
if len(parlist) != 3*ngauss:
## If the length is 3, then it is just to be replicated
## ngauss times
if len(parlist) == 3:
parlist = parlist * ngauss
## Otherwise you need to use the default times the number of Gaussians
elif len(default) == 3*ngauss :
parlist[:] = default
elif len(default) == 3 :
parlist[:] = default * ngauss
else :
print "ERROR: wrong passing of arguments in set_parameters_and_default"
print "ERROR: the default has not the right size ", len(default)
return parlist
##----------- Regrow the parameters when samePA is true ---------##
def regrow_PA(p) :
pall = np.zeros(((len(p)-1)//2, 3), floatFit)
pall[:,0] = p[:-1:2]
pall[:,1] = p[1::2]
pall[:,2].fill(p[-1])
return pall.ravel()
##----------- Shrink the parameters when samePA is true ---------##
def shrink_PA(parinfo) :
indpop = 2
ngauss = len(parinfo) // 3
for ii in range(ngauss-1) :
parinfo.pop(indpop)
indpop += 2
return parinfo
## -----------------------------------------------------------------------------------------
## Return the difference between a model and the data WITH ERRORS
## -----------------------------------------------------------------------------------------
def fitn2dgauss_residuals_err1(err, nGnorm, Iamp) :
return ((1.0 - nGnorm.dot(Iamp)) / err.ravel())
## -----------------------------------------------------------------------------------------
## Return the difference between a model and the data
## -----------------------------------------------------------------------------------------
def fitn2dgauss_residuals1(nGnorm, Iamp) :
return (1.0 - nGnorm.dot(Iamp))
## -----------------------------------------------------------------------------------------
## -----------------------------------------------------------------------------------------
## Return the difference between a model and the data WITH ERRORS
## -----------------------------------------------------------------------------------------
# def fitn2dgauss_residuals_err(par, parPSF, r, theta, data, err) :
# return ((data.ravel() - _n_centred_twodgaussian_Inorm(pars=par, parPSF=parPSF)(r, theta))/(data.ravel() * err.ravel()))
## -----------------------------------------------------------------------------------------
## Return the difference between a model and the data WITH ERRORS
## -----------------------------------------------------------------------------------------
def fitn2dgauss_chi2_err(par, parPSF, r, theta, data, err) :
return np.sum(((data.ravel() - _n_centred_twodgaussian_Inorm(pars=par, parPSF=parPSF)(r, theta))/(data.ravel() * err.ravel()))**2)
## -----------------------------------------------------------------------------------------
## Return the difference between a model and the data
## -----------------------------------------------------------------------------------------
# def fitn2dgauss_residuals(par, parPSF, r, theta, data) :
# return (1.0 - _n_centred_twodgaussian_Inorm(pars=par, parPSF=parPSF)(r, theta) / data.ravel())
## -----------------------------------------------------------------------------------------
## Return the difference between a model and the data
## -----------------------------------------------------------------------------------------
# def fitn2dgauss_chi2(par, parPSF, r, theta, data) :
# return np.sum((1.0 - _n_centred_twodgaussian_Inorm(pars=par, parPSF=parPSF)(r, theta)/data.ravel())**2)
## -------------------------------------------------------------------------------------
## Printing routine for mpfit
## -------------------------------------------------------------------------------------
class _mpfitprint(object):
def __init__(self):
self.pars = []
self.chi2 = []
self.parinfo = []
def __call__(self, mpfitfun, p, iter, fnorm, functkw=None, parinfo=None, quiet=0, dof=None):
self.chi2.append(fnorm)
self.pars.append(p)
self.parinfo.append(parinfo)
print "Chi2 = ", fnorm
## -------------------------------------------------------------------------------------
SLOPE_outer = 2.0
################################################################################
# MPFIT version of the multigauss 2D fitting routine
################################################################################
def multi_2dgauss_mpfit(xax, yax, data, ngauss=1, err=None, params=None, paramsPSF=None,
fixed=None, limitedmin=None, limitedmax=None, minpars=None,
maxpars=None, force_Sigma_bounds=True, factor_Chi2=1.01, verbose=True, veryverbose=False,
linear_method="nnls", default_q=0.3, default_PA=0.0, samePA=True, minSigma=None, maxSigma=None,
mpfitprint=_mpfitprint(), **fcnargs):
"""
An improvement on gaussfit. Lets you fit multiple 2D gaussians.
Inputs:
xax - x axis
yax - y axis
data - count axis
ngauss - How many gaussians to fit? Default 1
err - error corresponding to data
These parameters need to have length = 3*ngauss. If ngauss > 1 and length = 3, they will
be replicated ngauss times, otherwise they will be reset to defaults:
params - Fit parameters: [width, axis ratio, pa] * ngauss
If len(params) % 3 == 0, ngauss will be set to len(params) / 3
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
force_Sigma_bounds: force the Sigmas to be within the radii range with some margin
default to True
factor_Chi2 : if one Gaussian contribute less than (factor-1) to the Chi2, we remove it
If set to 1, it means only zero Gaussians will be removed
If set to default=1.01, it means any Gaussian contributing to less than 1% will be
removed
minSigma, maxSigma: default to None but can be set for bounds for Sigma
linearmethod: Method used to solve the linear part of the problem
Two methods are implemented:
"nnls" -> NNLS (default, included in scipy)
"bvls" -> LLSP/BVLS in openopt (only if available)
The variable Exist_OpenOpt is (internally) set to True if available
**fcnargs - Will be passed to MPFIT, you can for example use:
xtol, gtol, ftol, quiet
verbose - self-explanatory
veryverbose - self-explanatory
Returns:
Fit parameters
Model
Fit errors
chi2
"""
import copy
## Set up some default parameters for mpfit
if "xtol" not in fcnargs.keys() : fcnargs["xtol"] = 1.e-7
if "gtol" not in fcnargs.keys() : fcnargs["gtol"] = 1.e-7
if "ftol" not in fcnargs.keys() : fcnargs["ftol"] = 1.e-7
if "quiet" not in fcnargs.keys() : fcnargs["quiet"] = True
## Checking the method used for the linear part
if linear_method == "bvls" and not Exist_OpenOpt :
print "WARNING: you selected BVLS, but OpenOpt is not installed"
print "WARNING: we will therefore use NNLS instead"
linear_method == "nnls"
## Checking if the linear_method is implemented
if linear_method.lower() not in dic_linear_methods.keys():
print "ERROR: you should use one of the following linear_method: ", dic_linear_methods.keys()
return 0, 0, 0, 0
f_Get_Iamp = dic_linear_methods[linear_method.lower()]
## If no coordinates is given, create them
if xax is None:
xax = np.arange(len(data))
if yax is None:
yax = np.arange(len(data))
if not isinstance(xax,np.ndarray):
xax = np.asarray(xax)
if not isinstance(yax,np.ndarray):
yax = np.asarray(yax)
if not isinstance(data,np.ndarray):
data = np.asarray(data)
xax = xax.ravel()
yax = yax.ravel()
datashape = data.shape
data = data.ravel()
## Polar coordinates
r, theta = convert_xy_to_polar(xax, yax)
selxy = (xax != 0) & (yax != 0)
rin = sqrt(xax[selxy]**2+yax[selxy]**2)
if minSigma is None : minSigma = np.min(rin)
if maxSigma is None : maxSigma = np.max(rin) / sqrt(SLOPE_outer)
lminSigma = np.log10(minSigma)
lmaxSigma = np.log10(maxSigma)
DlSigma = 0.5 * (lmaxSigma - lminSigma) / ngauss
if isinstance(params,np.ndarray): params=params.tolist()
if params is not None :
if len(params) != ngauss and (len(params) / 3) > ngauss:
ngauss = len(params) / 3
if verbose :
print "WARNING: Your input parameters do not fit the Number of input Gaussians"
print "WARNING: the new number of input Gaussians is: ", ngauss
## Extracting the parameters for the PSF and normalising the Imax for integral = 1
if paramsPSF is None :
paramsPSF = _default_parPSF
paramsPSF = norm_PSFParam(paramsPSF)
## if no input parameters are given, we set up the guess as a log spaced sigma between min and max
default_params = np.concatenate((np.log10(np.logspace(lminSigma + DlSigma, lmaxSigma - DlSigma, ngauss)), \
np.array([default_q]*ngauss), np.array([default_PA]*ngauss))).reshape(3,ngauss).transpose().ravel()
newdefault_minpars = copy.copy(default_minpars)
newdefault_maxpars = copy.copy(default_maxpars)
if force_Sigma_bounds :
newdefault_minpars[0] = lminSigma
newdefault_maxpars[0] = lmaxSigma
else :
newdefault_minpars[0] = lminSigma - np.log10(2.)
newdefault_maxpars[0] = lmaxSigma + np.log10(2.)
## Set up the default parameters if needed
params = set_parameters_and_default(params, default_params, ngauss)
fixed = set_parameters_and_default(fixed, default_fixed, ngauss)
limitedmin = set_parameters_and_default(limitedmin, default_limitedmin, ngauss)
limitedmax = set_parameters_and_default(limitedmax, default_limitedmax, ngauss)
minpars = set_parameters_and_default(minpars, newdefault_minpars, ngauss)
maxpars = set_parameters_and_default(maxpars, newdefault_maxpars, ngauss)
## -------------------------------------------------------------------------------------
## mpfit function which returns the residual from the best fit N 2D Gaussians
## Parameters are just sigma,q,pa - the amplitudes are optimised at each step
## Two versions are available depending on whether BVLS or NNLS is used (and available)
## -------------------------------------------------------------------------------------
def mpfitfun(p, parPSF, fjac=None, r=None, theta=None, err=None, data=None, f_Get_Iamp=None, samePA=False):
if samePA : p = regrow_PA(p)
nGnorm, Iamp = f_Get_Iamp(p, parPSF, r, theta, data)
if err is None :
return [0,fitn2dgauss_residuals1(nGnorm, Iamp)]
else :
return [0,fitn2dgauss_residuals_err1(err, nGnorm, Iamp)]
# newp = (np.vstack((Iamp, p.reshape(ngauss,3).transpose()))).transpose()
# if err is None :
# return [0,fitn2dgauss_residuals(newp, parPSF, r, theta, data)]
# else :
# return [0,fitn2dgauss_residuals_err(newp, parPSF, r, theta, data, err)]
## -------------------------------------------------------------------------------------
## Information about the parameters
if verbose :
print "--------------------------------------"
print "GUESS: Sig Q PA"
print "--------------------------------------"
for i in xrange(ngauss) :
print "GAUSS %02d: %8.3e %8.3f %8.3f"%(i+1, 10**(params[3*i]), params[3*i+1], params[3*i+2])
print "--------------------------------------"
## Information about the parameters
parnames = {0:"LOGSIGMA",1:"AXIS RATIO",2:"POSITION ANGLE"}
parinfo = [ {'n':ii, 'value':params[ii], 'limits':[minpars[ii],maxpars[ii]],
'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii],
'parname':parnames[ii%3]+str(ii/3+1)} for ii in xrange(len(params)) ]
## If samePA we remove all PA parameters except the last one
## We could use the 'tied' approach but we prefer setting up just one parameter
if samePA : parinfo = shrink_PA(parinfo)
## Fit with mpfit of q, sigma, pa on xax, yax, and data (+err)
fa = {'parPSF':paramsPSF, 'r': r, 'theta': theta, 'data': data, 'err':err, 'f_Get_Iamp':f_Get_Iamp, 'samePA':samePA}
result = mpfit(mpfitfun, functkw=fa, iterfunct=mpfitprint, nprint=10, parinfo=parinfo, **fcnargs)
## Getting these best fit values into the dictionnary
if samePA : result.params = regrow_PA(result.params)
bestparinfo = [ {'n':ii, 'value':result.params[ii], 'limits':[minpars[ii],maxpars[ii]],
'limited':[limitedmin[ii],limitedmax[ii]], 'fixed':fixed[ii],
'parname':parnames[ii%3]+str(ii/3)} for ii in xrange(len(result.params)) ]
## Recompute the best amplitudes to output the right parameters
## And renormalising them
nGnorm, Iamp = f_Get_Iamp(result.params, paramsPSF, r, theta, data)
# Ibestpar_array = (np.vstack((Iamp, result.params.reshape(ngauss,3).transpose()))).transpose()
bestpar_array = result.params.reshape(ngauss,3)
## Getting rid of the non-relevant Gaussians
## If parameters factor_Chi2 is set we use it as a threshold to remove gaussians
## Otherwise we just remove the zeros
if err is None : nerr = np.ones_like(data)
else : nerr = err
## First get the Chi2 from this round
# bestChi2 = fitn2dgauss_chi2_err(Ibestpar_array, paramsPSF, r, theta, data, nerr)
bestChi2 = np.sum(fitn2dgauss_residuals1(nGnorm, Iamp)**2)
result.ind = range(ngauss)
k = 0
Removed_Gaussians = []
for i in xrange(ngauss) :
## Derive the Chi2 WITHOUT the ith Gaussian
new_nGnorm, new_Iamp = f_Get_Iamp(np.delete(bestpar_array, i, 0), paramsPSF, r, theta, data)
newChi2 = np.sum(fitn2dgauss_residuals1(new_nGnorm, new_Iamp)**2)
# newChi2 = fitn2dgauss_chi2_err(np.delete(Ibestpar_array, i, 0), paramsPSF, r, theta, data, nerr)
## If this Chi2 is smaller than factor_Chi2 times the best value, then remove
## It just means that Gaussian is not an important contributor
if newChi2 <= factor_Chi2 * bestChi2 :
val = bestparinfo.pop(3*k)
val = bestparinfo.pop(3*k)
val = bestparinfo.pop(3*k)
result.ind.pop(k)
Removed_Gaussians.append(i+1)
else : k += 1
if veryverbose :
if len(Removed_Gaussians) != 0 :
print "WARNING Removed Gaussians ", Removed_Gaussians
print "WARNING: (not contributing enough to the fit)"
ngauss = len(result.ind)
## New minimisation after removing all the non relevant Gaussians
if samePA : bestparinfo = shrink_PA(bestparinfo)
newresult = mpfit(mpfitfun, functkw=fa, iterfunct=mpfitprint, nprint=10, parinfo=bestparinfo, **fcnargs)
newresult.ind = range(ngauss)
if samePA : newresult.params = regrow_PA(newresult.params)
bestfit_params = newresult.params.reshape(ngauss, 3)
## We add the Amplitudes to the array and renormalise them
nGnorm, Iamp = f_Get_Iamp(bestfit_params, paramsPSF, r, theta, data)
Ibestfit_params = (np.vstack((Iamp, bestfit_params.transpose()))).transpose()
## Going back to sigma from logsigma
Ibestfit_params[:,1] = 10**(Ibestfit_params[:,1])
Ibestfit_params[:,0] /= (2.0 * Ibestfit_params[:,1]**2 * Ibestfit_params[:,2] * pi)
## And we sort them with Sigma
Ibestfit_params = Ibestfit_params[Ibestfit_params[:,1].argsort()]
if newresult.status == 0:
raise Exception(newresult.errmsg)
if verbose :
print "=================================================="
print "FIT: Imax Sig Q PA"
print "=================================================="
for i in xrange(ngauss) :
print "GAUSS %02d: %8.3e %8.3f %8.3f %8.3f"%(i+1, Ibestfit_params[i,0], Ibestfit_params[i,1], Ibestfit_params[i,2], Ibestfit_params[i,3])
print "Chi2: ",newresult.fnorm," Reduced Chi2: ",newresult.fnorm/len(data)
return Ibestfit_params, newresult, n_centred_twodgaussian_Imax(pars=Ibestfit_params, parPSF=paramsPSF)(r, theta).reshape(datashape)
################################################################################
# LMFIT version of the multigauss 2D fitting routine
################################################################################
##------ Transform the Parameters class into a single array ---- ##
def extract_mult2dG_params(Params) :
ind = Params.ind
ngauss = len(ind)
p = np.zeros((ngauss, 3), floatFit)
for i in xrange(ngauss) :
p[i,0] = Params['logSigma%02d'%(ind[i]+1)].value
p[i,1] = Params['Q%02d'%(ind[i]+1)].value
if Params['PA%02d'%(ind[i]+1)].value is None :
p[i,2] = Params[Params['PA%02d'%(ind[i]+1)].expr].value
else :
p[i,2] = Params['PA%02d'%(ind[i]+1)].value
return p
##------ Reimpose the same PA on all the parameters -----##
def Set_SamePA_params(Params, ngauss, valuePA, minPA, maxPA, varyPA) :
## Now we reset the Parameters according to the PA
## So we look for the first Gaussian PA
FirstParamName = 'PA%02d'%(Params.ind[0]+1)
FirstParam = Params[FirstParamName]
# And we save its value
FirstParam.value, FirstParam.min, FirstParam.max, FirstParam.vary = valuePA, minPA, maxPA, varyPA
FirstParam.expr = None
# Now we set the others to be the same than the first one (using the "expr" option)
for i in range(1, ngauss) :
currentParamName = 'PA%02d'%(Params.ind[i]+1)
currentParam = Params[currentParamName]
currentParam.value, currentParam.min, currentParam.max, currentParam.vary = None, None, None, False
currentParam.expr = FirstParamName
# Returning the updated Parameters
return Params
##------------- Printing option for lmfit -------------- ##
class lmfit_iprint(object):
def __init__(self):
self.chi2 = []
self.pars = []
def __call__(self, res, myinput, pars):
""" Printing function for the iteration in lmfit
"""
if (myinput.iprint > 0) & (myinput.verbose):
if myinput.aprint == myinput.iprint:
chi2 = ((res*res).sum())
self.chi2.append(chi2)
self.pars.append(pars)
print "Chi2 = %g" % chi2
myinput.aprint = 0
myinput.aprint += 1
def multi_2dgauss_lmfit(xax, yax, data, ngauss=1, err=None, params=None, paramsPSF=None,
fixed=None, limitedmin=None, limitedmax=None, minpars=None,
maxpars=None, force_Sigma_bounds=True, factor_Chi2=1.01, iprint=50, lmfit_method='leastsq',
verbose=True, veryverbose=False, linear_method="nnls", default_q=0.3, default_PA=0.0,
samePA=True, sameQ=False, minSigma=None, maxSigma=None, lmfit_iprint=lmfit_iprint(), **fcnargs):
"""
An improvement on gaussfit. Lets you fit multiple 2D gaussians.
Inputs:
xax - x axis
yax - y axis
data - count axis
ngauss - How many gaussians to fit? Default 1
err - error corresponding to data
These parameters need to have the same length.
It should by default be 3*ngauss.
If ngauss > 1 and length = 3, they will be replicated ngauss times,
otherwise they will be reset to defaults:
params - Fit parameters: [width, axis ratio, pa] * ngauss
If len(params) % 3 == 0, ngauss will be set to len(params) / 3
fixed - Is parameter fixed?
limitedmin/minpars - set lower limits on each parameter (default: width>0)
limitedmax/maxpars - set upper limits on each parameter
force_Sigma_bounds: force the Sigmas to be within the radii range with some margin
default to True
factor_Chi2 : if one Gaussian contribute less than (factor-1) to the Chi2, we remove it
If set to 1, it means only zero Gaussians will be removed
If set to default=1.01, it means any Gaussian contributing to less than 1% will be
removed
minSigma, maxSigma: default to None but can be set for bounds for Sigma
samePA : by default set to True. In that case, only one PA value is used as a free parameter
(all Gaussians will share the same PA)
sameQ: by default set to False. In that case, only one Axis ratio value is used as a free parameter
(all Gaussians will share the same axis ratio)
lmfit_method : method to pass on to lmfit ('leastsq', 'lbfgsb', 'anneal')
Default is leastsq (most efficient for the problem)
linearmethod: Method used to solve the linear part of the problem
Two methods are implemented:
"nnls" -> NNLS (default, included in scipy)
"bvls" -> LLSP/BVLS in openopt (only if available)
The variable Exist_OpenOpt is (internally) set to True if available
**fcnargs - dictionary which will be passed to LMFIT, you can for example use:
xtol , gtol, ftol, etc
iprint - if > 0, print every iprint iterations of lmfit. default is 50
verbose - self-explanatory
veryverbose - self-explanatory
Returns:
Fit parameters
Model
Fit errors
chi2
"""
import copy
## Default values
lmfit_methods = ['leastsq', 'lbfgsb', 'anneal']
## Method check
if lmfit_method not in lmfit_methods :
print "ERROR: method must be one of the three following methods : ", lmfit_methods
## Setting up epsfcn if not forced by the user
## Removing epsfcn to get the default machine precision
## if "epsfcn" not in fcnargs.keys() : fcnargs["epsfcn"] = 0.01
if "xtol" not in fcnargs.keys() : fcnargs["xtol"] = 1.e-7
if "gtol" not in fcnargs.keys() : fcnargs["gtol"] = 1.e-7
if "ftol" not in fcnargs.keys() : fcnargs["ftol"] = 1.e-7
## Checking the method used for the linear part
if linear_method == "bvls" and not Exist_OpenOpt :
print "WARNING: you selected BVLS, but OpenOpt is not installed"
print "WARNING: we will therefore use NNLS instead"
linear_method == "nnls"
## Checking if the linear_method is implemented
if linear_method.lower() not in dic_linear_methods.keys():
print "ERROR: you should use one of the following linear_method: ", dic_linear_methods.keys()
return 0, 0, 0
f_Get_Iamp = dic_linear_methods[linear_method.lower()]
## If no coordinates is given, create them
if xax is None:
xax = np.arange(len(data))
if yax is None:
yax = np.arange(len(data))
if not isinstance(xax,np.ndarray):
xax = np.asarray(xax)
if not isinstance(yax,np.ndarray):
yax = np.asarray(yax)
if not isinstance(data,np.ndarray):
data = np.asarray(data)
xax = xax.ravel()
yax = yax.ravel()
datashape = data.shape
data = data.ravel()
## Polar coordinates
r, theta = convert_xy_to_polar(xax, yax)
selxy = (xax != 0) & (yax != 0)
rin = sqrt(xax[selxy]**2+yax[selxy]**2)
if minSigma is None : minSigma = np.min(rin)
if maxSigma is None : maxSigma = np.max(rin) / sqrt(SLOPE_outer)
lminSigma = np.log10(minSigma)
lmaxSigma = np.log10(maxSigma)
DlSigma = 0.5 * (lmaxSigma - lminSigma) / ngauss
if isinstance(params,np.ndarray): params=params.tolist()
if params is not None :
if len(params) != ngauss and (len(params) / 3) > ngauss:
ngauss = len(params) / 3
if verbose :
print "WARNING: Your input parameters do not fit the Number of input Gaussians"
print "WARNING: the new number of input Gaussians is: ", ngauss
## Extracting the parameters for the PSF and normalising the Imax for integral = 1
if paramsPSF is None :
paramsPSF = _default_parPSF
paramsPSF = norm_PSFParam(paramsPSF)
## if no input parameters are given, we set up the guess as a log spaced sigma between min and max
default_params = np.concatenate((np.log10(np.logspace(lminSigma + DlSigma, lmaxSigma - DlSigma, ngauss)), \
np.array([default_q]*ngauss), np.array([default_PA]*ngauss))).reshape(3,ngauss).transpose().ravel()
newdefault_minpars = copy.copy(default_minpars)
newdefault_maxpars = copy.copy(default_maxpars)
if force_Sigma_bounds :
newdefault_minpars[0] = lminSigma
newdefault_maxpars[0] = lmaxSigma
else :
newdefault_minpars[0] = lminSigma - np.log10(2.)
newdefault_maxpars[0] = lmaxSigma + np.log10(2.)
## Set up the default parameters if needed
params = set_parameters_and_default(params, default_params, ngauss)
fixed = set_parameters_and_default(fixed, default_fixed, ngauss)
limitedmin = set_parameters_and_default(limitedmin, default_limitedmin, ngauss)
limitedmax = set_parameters_and_default(limitedmax, default_limitedmax, ngauss)
minpars = set_parameters_and_default(minpars, newdefault_minpars, ngauss)
maxpars = set_parameters_and_default(maxpars, newdefault_maxpars, ngauss)
class input_residuals() :
def __init__(self, iprint, verbose) :
self.iprint = iprint
self.verbose = verbose
self.aprint = 0
## -----------------------------------------------------------------------------------------
## lmfit function which returns the residual from the best fit N 2D Gaussians
## Parameters are just sigma,q,pa - the amplitudes are optimised at each step
## -----------------------------------------------------------------------------------------
def opt_lmfit(pars, parPSF, myinput=None, r=None, theta=None, err=None, data=None, f_Get_Iamp=None):
""" Provide the residuals for the lmfit minimiser
for a Multi 1D gaussian
"""
# We retrieve the parameters
pars_array = extract_mult2dG_params(pars)
## Derive the Normalised Gaussians for this set of parameters
nGnorm, Iamp = f_Get_Iamp(pars_array, parPSF, r, theta, data)
if err is None :
res = fitn2dgauss_residuals1(nGnorm, Iamp)
else :
res = fitn2dgauss_residuals_err1(err, nGnorm, Iamp)
# newp = (np.vstack((Iamp, pars_array.transpose()))).transpose()
# if err is None :
# res = fitn2dgauss_residuals(newp, parPSF, r, theta, data)
# else :
# res = fitn2dgauss_residuals_err(newp, parPSF, r, theta, data, err)
lmfit_iprint(res, myinput, pars)
return res
## -----------------------------------------------------------------------------------------
## Information about the parameters
nameParam = ['logSigma', 'Q', 'PA']
Lparams = Parameters()
if verbose :
print "--------------------------------------"
print "GUESS: Sig Q PA"
print "--------------------------------------"
for i in xrange(ngauss) :
print "GAUSS %02d: %8.3e %8.3f %8.3f"%(i+1, 10**(params[3*i]), params[3*i+1], params[3*i+2])
print "--------------------------------------"
for i in xrange(ngauss) :
Lparams.add(nameParam[0]+"%02d"%(i+1), value=params[3*i], min=minpars[3*i], max=maxpars[3*i], vary= not fixed[3*i])
Lparams.add(nameParam[1]+"%02d"%(i+1), value=params[3*i+1], min=minpars[3*i+1], max=maxpars[3*i+1], vary= not fixed[3*i+1])
Lparams.add(nameParam[2]+"%02d"%(i+1), value=params[3*i+2], min=minpars[3*i+2], max=maxpars[3*i+2], vary= not fixed[3*i+2])
## Adding indices to follow up the Gaussians we may remove
Lparams.ind = range(ngauss)
## Setting the samePA option if True
## For this we set up the first PA to the default and
## then use "expr" to say that all other PA are equal to the first one
if samePA:
Lparams = Set_SamePA_params(Lparams, ngauss, params[2], minpars[2], maxpars[2], not fixed[2])
if veryverbose :
for i in xrange(ngauss) :
print "GAUSS %02d: %8.3e %8.3f %8.3f"%(i+1, 10**(params[3*i]), params[3*i+1], params[3*i+2])
if samePA:
print "WARNING: All PAs will be forced to one single value"
print "--------------------------------------"
## Setting up the printing option
myinput = input_residuals(iprint, verbose)
####################################
## Doing the minimisation with lmfit
####################################
if verbose:
print "------ Starting the minimisation -------"
result = minimize(opt_lmfit, Lparams, method=lmfit_method, args=(paramsPSF, myinput, r, theta, err, data, f_Get_Iamp), **fcnargs)
## Remove the Null Gaussians
result.params.ind = range(ngauss)
ngauss, Ind_ZGauss = Remove_Zero_2DGaussians(ngauss, nameParam, result, paramsPSF, r, theta, data, err, factor_Chi2,
f_Get_Iamp, niter=1, verbose=veryverbose, samePA=samePA)
## Recall the Minimizer function for a second iteration to get the new chi2 etc
newresult = minimize(opt_lmfit, result.params, method=lmfit_method, args=(paramsPSF, myinput, r, theta, err, data, f_Get_Iamp), **fcnargs)
## Remove the Null Gaussians
newresult.params.ind = result.params.ind
ngauss, Ind_ZGauss = Remove_Zero_2DGaussians(ngauss, nameParam, newresult, paramsPSF, r, theta, data, err, factor_Chi2,
f_Get_Iamp, niter=2, verbose=veryverbose, samePA=samePA)
## We add the Amplitudes to the array and renormalise them
bestfit_params = extract_mult2dG_params(newresult.params)
nGnorm, Iamp = f_Get_Iamp(bestfit_params, paramsPSF, r, theta, data)
Ibestfit_params = (np.vstack((Iamp, bestfit_params.transpose()))).transpose()
## Changing the parameters back to Sigma
Ibestfit_params[:,1] = 10**(Ibestfit_params[:,1])
Ibestfit_params[:,0] /= (2.0 * Ibestfit_params[:,1]**2 * Ibestfit_params[:,2] * pi)
## And we sort them with Sigma
Ibestfit_params = Ibestfit_params[Ibestfit_params[:,1].argsort()]
if verbose :
print "=================================================="
print "FIT: Imax Sig Q PA"
print "=================================================="
for i in xrange(ngauss) :
print "GAUSS %02d: %8.3e %8.3f %8.3f %8.3f"%(i+1, Ibestfit_params[i,0], Ibestfit_params[i,1], Ibestfit_params[i,2], Ibestfit_params[i,3])
print "Chi2: ",newresult.chisqr," Reduced Chi2: ",newresult.redchi
return Ibestfit_params, newresult, n_centred_twodgaussian_Imax(pars=Ibestfit_params, parPSF=paramsPSF)(r, theta).reshape(datashape)
def Remove_Zero_2DGaussians(ngauss, nameParam, result, parPSF, r, theta, data, err, factor_Chi2, f_Get_Iamp, niter=1, verbose=False, samePA=True) :
## Recompute the best amplitudes and remove the ones that are zeros
bestpar_array = extract_mult2dG_params(result.params)
nGnorm, Iamp = f_Get_Iamp(bestpar_array, parPSF, r, theta, data)
# ## New array including the Imax values
# Ibestpar_array = (np.vstack((Iamp, bestpar_array.transpose()))).transpose()
## Getting rid of the non-relevant Gaussians
## If parameters factor_Chi2 is set we use it as a threshold to remove gaussians
## Otherwise we just remove the zeros
if err is None : nerr = np.ones_like(data)
else : nerr = err
## First get the Chi2 from this round
bestChi2 = np.sum(fitn2dgauss_residuals1(nGnorm, Iamp)**2)
# bestChi2 = fitn2dgauss_chi2_err(Ibestpar_array, parPSF, r, theta, data, nerr)
k = 0
Removed_Gaussians = []
## If SamePA we need to save the first set of parameters for the PA
if samePA :
FirstParamName = nameParam[2]+'%02d'%(result.params.ind[0]+1)
FirstParam = result.params[FirstParamName]
valuePA, minPA, maxPA, varyPA = FirstParam.value, FirstParam.min, FirstParam.max, FirstParam.vary
for i in xrange(ngauss) :
## Derive the Chi2 WITHOUT the ith Gaussian
new_nGnorm, new_Iamp = f_Get_Iamp(np.delete(bestpar_array, i, 0), parPSF, r, theta, data)
newChi2 = np.sum(fitn2dgauss_residuals1(new_nGnorm, new_Iamp)**2)
# newChi2 = fitn2dgauss_chi2_err(np.delete(Ibestpar_array, i, 0), parPSF, r, theta, data, nerr)
## If this Chi2 is smaller than factor_Chi2 times the best value, then remove
## It just means that Gaussian is not an important contributor
if newChi2 <= factor_Chi2 * bestChi2 :
if verbose :
print "Removing Gaussian ", i
result.params.pop(nameParam[0]+'%02d'%(result.params.ind[k]+1))
result.params.pop(nameParam[1]+'%02d'%(result.params.ind[k]+1))
result.params.pop(nameParam[2]+'%02d'%(result.params.ind[k]+1))
result.params.ind.pop(k)
Removed_Gaussians.append(i+1)
else : k += 1
New_ngauss = len(result.params.ind)
## Setting the samePA option if True
## For this we set up the first PA to the default and
## then use "expr" to say that all other PA are equal to the first one
if samePA:
result.params = Set_SamePA_params(result.params, New_ngauss, valuePA, minPA, maxPA, varyPA)
if verbose :
if len(Removed_Gaussians) != 0 :
print "WARNING Removed Gaussians (iteration %d) : "%(niter), Removed_Gaussians
print "WARNING: (not contributing enough to the fit)"
return New_ngauss, Removed_Gaussians
|
emsellem/pygme
|
pygme/fitting/fitn2dgauss.py
|
Python
|
bsd-3-clause
| 49,600
|
[
"Gaussian"
] |
c311786b9f32e73c709079cf8afec545cbf24543a620565ef7946cc33dc7e93c
|
# import aeropy.CST_lib as cst
import aeropy.CST_3D as cst
import aeropy.CST_3D.mesh_tools as meshtools
import panairwrapper
from aeropy.filehandling.vtk import generate_surface
import numpy as np
import matplotlib.pyplot as plt
# wing_points = stl_processing.points_from_stl('./wing_right.stl')
# wing_points = np.genfromtxt('./wing_upper_points.csv', skip_header=1, delimiter=',')
aoa = 2.3067
# wing parameters
span = 4.578*2. # meters
eta_cp = [0., 0.276004, .552007, 1.]
taper = [1.0, 0.375317, .204817, .080035]
chord_root = 21.43
sweep = [0., 14.4476, 19.1612, 24.79405]
# sweep = [0., 0.]
dihedral = [0., 0.095311, 0.172374] # , 0., 0.]
eta_dihedral = [0., .552007, 1.]
twist = [0.1, -0.05, -0.1, -0.1]
f_sy = cst.piecewise_linear(eta_cp, taper)
# shape coefficients upper wing
a_mat_upper = np.array([[0.01263563, 0.02351035, 0.01410948, 0.02870071, 0.00672103, 0.01423143],
[0.01493258, 0.02318954, 0.0256106, 0.02404792, 0.0188881, 0.01735003],
[0.01451941, 0.03214948, 0.03073803, 0.03342343, 0.03707097, 0.03925866],
[0.01687441, 0.01973094, 0.05100545, 0.01154137, 0.05669724, 0.02297795]]).T
A0_u = cst.piecewise_linear(eta_cp, a_mat_upper[0])
A1_u = cst.piecewise_linear(eta_cp, a_mat_upper[1])
A2_u = cst.piecewise_linear(eta_cp, a_mat_upper[2])
A3_u = cst.piecewise_linear(eta_cp, a_mat_upper[3])
A4_u = cst.piecewise_linear(eta_cp, a_mat_upper[4])
A5_u = cst.piecewise_linear(eta_cp, a_mat_upper[5])
# shape coefficients lower wing
a_mat_lower = np.array([[-0.01115817, -0.00544143, -0.0136072, -0.01172747, -0.01432724, -0.01362163],
[-0.01205803, 0.00105121, -0.01297246, -0.00643726, -0.0084237, -0.00878171],
[-0.0115331, 0.00720371, 0.00013933, -0.0095523, 0.02167392, 0.00636361],
[-0.00905945, 0.0107793, -0.01631087, 0.0092086, 0.01001222, 0.01334978]]).T
A0_l = cst.piecewise_linear(eta_cp, a_mat_lower[0])
A1_l = cst.piecewise_linear(eta_cp, a_mat_lower[1])
A2_l = cst.piecewise_linear(eta_cp, a_mat_lower[2])
A3_l = cst.piecewise_linear(eta_cp, a_mat_lower[3])
A4_l = cst.piecewise_linear(eta_cp, a_mat_lower[4])
A5_l = cst.piecewise_linear(eta_cp, a_mat_lower[5])
A_upper = [A0_u, A1_u, A2_u, A3_u, A4_u, A5_u]
A_lower = [A0_l, A1_l, A2_l, A3_l, A4_l, A5_l]
f_sx_upper = cst.BernsteinPolynomial(5, A_upper)
f_sx_lower = cst.BernsteinPolynomial(5, A_lower)
f_xshear = cst.piecewise_linear(eta_cp, sweep)
f_zshear = cst.piecewise_linear(eta_dihedral, dihedral)
f_twist = cst.piecewise_linear(eta_cp, twist)
wing_upper = cst.CST3D(rotation=(0., aoa, 0.),
location=(12., 0., -0.535),
XYZ=(chord_root, span/2., chord_root),
ref=(0., 0., 0.),
sx=f_sx_upper,
nx=(0.5, 1.),
sy=f_sy,
ny=(0., 0.),
xshear=f_xshear,
zshear=f_zshear,
twist=f_twist)
wing_lower = cst.CST3D(rotation=(0., aoa, 0.),
location=(12., 0., -0.535),
XYZ=(chord_root, span/2., chord_root),
ref=(0., 0., 0.),
sx=f_sx_lower,
nx=(0.5, 1.),
sy=f_sy,
ny=(0., 0.),
xshear=f_xshear,
zshear=f_zshear,
twist=f_twist)
# psi_w, eta_w = meshtools.meshparameterspace((20, 50),
# psi_spacing='cosine',
# eta_spacing='cosine')
# psi_stl, eta_stl = wing_upper.inverse(np.array([wing_points[:, 1]]),
# np.array([wing_points[:, 2]]),
# np.array([wing_points[:, 3]]))
# plt.scatter(psi_stl, eta_stl)
# plt.show()
# wing_mesh_u = wing_upper(psi_w, eta_w)
# wing_mesh_l = wing_lower(psi_w, eta_w)
# network_wu = np.dstack(wing_mesh_u)
# network_wl = np.dstack(wing_mesh_l)
# generate_surface(network_wu, "wing_upper")
# generate_surface(network_wl, "wing_lower")
# fuselage parameters
fuse_data = np.genfromtxt('./cross-section_fit_new2.txt', skip_header=1)
i_sort = np.argsort(fuse_data[:, 0])
fuse_data = fuse_data[i_sort]
y_section, x_LE, c, N1, N2, A0, A1, A2, A3, A4, A5, error = fuse_data.T
length = y_section[-1]
width = np.max(c)
eta_f_cp = y_section/length
fuse_xshear = cst.piecewise_linear(eta_f_cp, x_LE)
fuse_xtaper = cst.piecewise_linear(eta_f_cp, 1.001*c/width)
fuse_nx1 = cst.piecewise_linear(eta_f_cp, N1)
fuse_nx2 = cst.piecewise_linear(eta_f_cp, N2)
A0_f = cst.piecewise_linear(eta_f_cp, A0)
A1_f = cst.piecewise_linear(eta_f_cp, A1)
A2_f = cst.piecewise_linear(eta_f_cp, A2)
A3_f = cst.piecewise_linear(eta_f_cp, A3)
A4_f = cst.piecewise_linear(eta_f_cp, A4)
A5_f = cst.piecewise_linear(eta_f_cp, A5)
f_sx_fuse = cst.BernsteinPolynomial(5, [A0_f, A1_f, A2_f, A3_f, A4_f, A5_f])
fuselage = cst.CST3D(rotation=(-90., -90.+aoa, 0.),
XYZ=(width, length, width),
ref=(0., 0., 0.),
sx=f_sx_fuse,
nx=(fuse_nx1, fuse_nx2),
sy=fuse_xtaper,
ny=(0., 0.),
xshear=fuse_xshear)
# fuse_mesh = fuselage(psi_w, eta_w)
# network_f = np.dstack(fuse_mesh)
# generate_surface(network_f, "fuselage")
# generate mesh
N_chord = 20
N_span = 10
N_nose = 20
N_tail = 50
N_circ = 20
# wing and fuselage intersection
cos_space = meshtools.cosine_spacing()
psi_spacing_w = cos_space(0., 1., N_chord)
intersection_f_wu = cst.intersection(wing_upper, fuselage, psi_spacing_w, 0.3)
intersection_f_wl = cst.intersection(wing_lower, fuselage, psi_spacing_w, 0.3)
# upper wing network
np.savetxt("intersection_upper.csv", np.array(intersection_f_wu).T)
edge_wf = meshtools.gen_network_edge(wing_upper, intersection_f_wu)
# edge_w1 = meshtools.gen_network_edge(wing_upper, intersection_f_wu)
edge_1 = meshtools.constant_eta_edge(eta_cp[1], N_chord)
edge_2 = meshtools.constant_eta_edge(eta_cp[2], N_chord)
edge_3 = meshtools.constant_eta_edge(eta_cp[3], N_chord)
psi_wu1, eta_wu1 = meshtools.meshparameterspace((N_chord, N_span),
psi_spacing='cosine',
eta_spacing='cosine',
eta_limits=(edge_wf, edge_1))
psi_wu2, eta_wu2 = meshtools.meshparameterspace((N_chord, N_span),
psi_spacing='cosine',
eta_spacing='cosine',
eta_limits=(edge_1, edge_2))
psi_wu3, eta_wu3 = meshtools.meshparameterspace((N_chord, N_span),
psi_spacing='cosine',
eta_spacing='cosine',
eta_limits=(edge_2, edge_3))
mesh_wu1 = wing_upper(psi_wu1, eta_wu1)
mesh_wu2 = wing_upper(psi_wu2, eta_wu2)
mesh_wu3 = wing_upper(psi_wu3, eta_wu3)
# lower wing network
# np.savetxt("intersection_lower.csv", p_intersect_l)
edge_wl = meshtools.gen_network_edge(wing_lower, intersection_f_wl)
psi_wl1, eta_wl1 = meshtools.meshparameterspace((N_chord, N_span),
psi_spacing='cosine',
eta_spacing='cosine',
eta_limits=(edge_wl, edge_1))
psi_wl2, eta_wl2 = meshtools.meshparameterspace((N_chord, N_span),
psi_spacing='cosine',
eta_spacing='cosine',
eta_limits=(edge_1, edge_2))
psi_wl3, eta_wl3 = meshtools.meshparameterspace((N_chord, N_span),
psi_spacing='cosine',
eta_spacing='cosine',
eta_limits=(edge_2, edge_3))
mesh_wl1 = wing_lower(psi_wl1, eta_wl1)
mesh_wl2 = wing_lower(psi_wl2, eta_wl2)
mesh_wl3 = wing_lower(psi_wl3, eta_wl3)
# fuselage mesh
edge_fu = meshtools.gen_network_edge(fuselage, intersection_f_wu, N_b=N_nose,
N_t=N_tail, vertical=True)
edge_fl = meshtools.gen_network_edge(fuselage, intersection_f_wl, N_b=N_nose,
N_t=N_tail, vertical=True)
psi_fu, eta_fu = meshtools.meshparameterspace((N_circ, N_chord+N_nose+N_tail-2),
psi_spacing='cosine',
eta_spacing='uniform',
psi_limits=(edge_fu, None))
psi_fl, eta_fl = meshtools.meshparameterspace((N_circ, N_chord+N_nose+N_tail-2),
psi_spacing='cosine',
eta_spacing='uniform',
psi_limits=(None, edge_fl))
mesh_fu = fuselage(psi_fu, eta_fu)
mesh_fl = fuselage(psi_fl, eta_fl)
# format as networks
network_wu1 = np.dstack(mesh_wu1)
network_wu2 = np.dstack(mesh_wu2)
network_wu3 = np.dstack(mesh_wu3)
network_wl1 = np.dstack(mesh_wl1)
network_wl2 = np.dstack(mesh_wl2)
network_wl3 = np.dstack(mesh_wl3)
network_fu = np.dstack(mesh_fu)
network_fl = np.dstack(mesh_fl)
# generate wing cap
network_wingcap = np.zeros((N_chord, 2, 3))
network_wingcap[:, 0, :] = network_wu3[:, -1, :]
network_wingcap[:, 1, :] = network_wl3[:, -1, :]
# generate tail cap
# print(network_fl[:, -1, 2])
# print(network_fu[:, -1, 2])
network_fusecap = np.zeros((2*N_circ-1, 2, 3))
network_fusecap[:, 1, :] = np.concatenate((network_fl[:-1, -1],
network_fu[:, -1]))
network_fusecap[:, 0, 0] = network_fusecap[:, 1, 0]
network_fusecap[:, 0, 2] = network_fusecap[:, 1, 2]
# np.savetxt('tail_cap0.csv', network_fuse_cap[:, 0, :])
# np.savetxt('tail_cap1.csv', network_fuse_cap[:, 1, :])
# exit()
# calculate wake
wing_trailing_edge = np.concatenate((network_wu1[-1, :-1, :],
network_wu2[-1, :-1, :],
network_wu3[-1, :, :]))
# print(wing_trailing_edge)
# exit()
fuselage_wake_boundary = network_fu[0, -N_tail:]
inner_endpoint = np.copy(fuselage_wake_boundary[-1])
n_wake_streamwise = len(fuselage_wake_boundary+1)
body_wake_l = meshtools.generate_wake(network_fl[:, -1], inner_endpoint[0]+.05,
2, aoa, cos_spacing=True)
body_wake_u = meshtools.generate_wake(network_fu[:, -1], inner_endpoint[0]+.05,
2, aoa, cos_spacing=True)
wing_wake = meshtools.generate_wake(wing_trailing_edge, inner_endpoint[0]+.05,
n_wake_streamwise+1, aoa, cos_spacing=True)
wingbody_wake = np.zeros((n_wake_streamwise+1, 2, 3))
wingbody_wake[:-1, 0] = fuselage_wake_boundary
wingbody_wake[-1, 0] = body_wake_u[-1, 0]
wingbody_wake[:, 1] = wing_wake[:, 0]
# Generating vtk files
generate_surface(network_wu1, "wingupper1")
generate_surface(network_wu2, "wingupper2")
generate_surface(network_wu3, "wingupper3")
generate_surface(network_wl1, "winglower1")
generate_surface(network_wl2, "winglower2")
generate_surface(network_wl3, "winglower3")
generate_surface(network_fu, "fuselageupper")
generate_surface(network_fl, "fuselagelower")
generate_surface(network_wingcap, "wingcap")
generate_surface(network_fusecap, "fusecap")
generate_surface(wing_wake, "wake")
generate_surface(body_wake_l, "body_wake_l")
generate_surface(body_wake_u, "body_wake_u")
generate_surface(wingbody_wake, "wingbody_wake")
# run in Panair
gamma = 1.4
MACH = 1.6
panair = panairwrapper.PanairWrapper('wingbody')
panair.set_aero_state(MACH, aoa)
panair.set_symmetry(1, 0)
panair.add_network("wing_u1", np.flipud(network_wu1))
panair.add_network("wing_u2", np.flipud(network_wu2))
panair.add_network("wing_u3", np.flipud(network_wu3))
panair.add_network("wing_l1", network_wl1)
panair.add_network("wing_l2", network_wl2)
panair.add_network("wing_l3", network_wl3)
panair.add_network("fuselage_u", np.flipud(network_fu))
panair.add_network("fuselage_l", np.flipud(network_fl))
panair.add_network("wing_cap", np.flipud(network_wingcap))
panair.add_network("fuse_cap", network_fusecap, 5.)
panair.add_network("wake", wing_wake, 18.)
panair.add_network("body_wake_l", body_wake_l, 20.)
panair.add_network("body_wake_u", body_wake_u, 20.)
panair.add_network("wingbody_wake", wingbody_wake, 20.)
panair.set_sensor(MACH, aoa, 3, 32.92, 2.)
results = panair.run()
results.write_vtk()
offbody_data = results.get_offbody_data()
distance_along_sensor = offbody_data[:, 2]
dp_over_p = 0.5*gamma*MACH**2*offbody_data[:, -2]
nearfield_sig = np.array([distance_along_sensor, dp_over_p]).T
plt.plot(nearfield_sig[:, 0], nearfield_sig[:, 1])
plt.title("nearfield signature")
plt.show()
np.savetxt('nearfield_sig', nearfield_sig)
|
leal26/pyXFOIL
|
examples/3D_fitting/full/test.py
|
Python
|
mit
| 13,211
|
[
"VTK"
] |
005d0d1fa50707df1116dffca20f4eec414736bcf921873ddc53b2b7a289e1e8
|
"""Exploration Simulation
Simulation of a spacecraft exploring an asteroid
States are chosen to minimize a cost function tied to the uncertainty of the
shape. Full SE(3) dynamics and the polyhedron potential model is used
Author
------
Shankar Kulumani GWU skulumani@gwu.edu
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import pdb
import logging
import os
import tempfile
import argparse
from collections import defaultdict
import itertools
import subprocess
import h5py
import numpy as np
from scipy import integrate, interpolate, ndimage
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from lib import asteroid, surface_mesh, cgal, mesh_data, reconstruct
from lib import surface_mesh
from lib import controller as controller_cpp
from lib import stats
from lib import geodesic
from dynamics import dumbbell, eoms, controller
from point_cloud import wavefront
from kinematics import attitude
import utilities
from visualization import graphics, animation, publication
compression = 'gzip'
compression_opts = 9
max_steps = 15000
def initialize_asteroid(output_filename, ast_name="castalia"):
"""Initialize all the things for the simulation
Output_file : the actual HDF5 file to save the data/parameters to
"""
logger = logging.getLogger(__name__)
logger.info('Initialize asteroid and dumbbell objects')
AbsTol = 1e-9
RelTol = 1e-9
logger.info("Initializing Asteroid: {} ".format(ast_name))
# switch based on asteroid name
if ast_name == "castalia":
file_name = "castalia.obj"
v, f = wavefront.read_obj('./data/shape_model/CASTALIA/castalia.obj')
elif ast_name == "itokawa":
file_name = "itokawa_low.obj"
v, f = wavefront.read_obj('./data/shape_model/ITOKAWA/itokawa_low.obj')
elif ast_name == "eros":
file_name = "eros_low.obj"
v, f = wavefront.read_obj('./data/shape_model/EROS/eros_low.obj')
elif ast_name == "phobos":
file_name = "phobos_low.obj"
v, f = wavefront.read_obj('./data/shape_model/PHOBOS/phobos_low.obj')
elif ast_name == "lutetia":
file_name = "lutetia_low.obj"
v, f = wavefront.read_obj('./data/shape_model/LUTETIA/lutetia_low.obj')
elif ast_name == "geographos":
file_name = "1620geographos.obj"
v, f = wavefront.read_obj('./data/shape_model/RADAR/1620geographos.obj')
elif ast_name == "bacchus":
file_name = "2063bacchus.obj"
v, f = wavefront.read_obj('./data/shape_model/RADAR/2063bacchus.obj')
elif ast_name == "golevka":
file_name = "6489golevka.obj"
v, f = wavefront.read_obj('./data/shape_model/RADAR/6489golevka.obj')
elif ast_name == "52760":
file_name = "52760.obj"
v, f = wavefront.read_obj('./data/shape_model/RADAR/52760.obj')
else:
print("Incorrect asteroid name")
return 1
# true asteroid and dumbbell
true_ast_meshdata = mesh_data.MeshData(v, f)
true_ast = asteroid.Asteroid(ast_name, true_ast_meshdata)
dum = dumbbell.Dumbbell(m1=500, m2=500, l=0.003)
# estimated asteroid (starting as an ellipse)
if (ast_name == "castalia" or ast_name == "itokawa"
or ast_name == "golevka" or ast_name == "52760"):
surf_area = 0.01
max_angle = np.sqrt(surf_area / true_ast.get_axes()[0]**2)
min_angle = 10
max_radius = 0.03
max_distance = 0.5
elif ast_name == "geographos":
surf_area = 0.05
max_angle = np.sqrt(surf_area / true_ast.get_axes()[0]**2)
min_angle = 10
max_radius = 0.05
max_distance = 0.5
elif ast_name == "bacchus":
surf_area = 0.01
max_angle = np.sqrt(surf_area / true_ast.get_axes()[0]**2)
min_angle = 10
max_radius = 0.02
max_distance = 0.5
elif ast_name == "52760":
surf_area = 0.01
max_angle = np.sqrt(surf_area / true_ast.get_axes()[0]**2)
min_angle = 10
max_radius = 0.035
max_distance = 0.5
elif (ast_name == "phobos"):
surf_area = 0.1
max_angle = np.sqrt(surf_area / true_ast.get_axes()[0]**2)
min_angle = 10
max_radius = 0.006
max_distance = 0.1
elif (ast_name == "lutetia"):
surf_area = 1
max_angle = np.sqrt(surf_area / true_ast.get_axes()[0]**2)
min_angle = 10
max_radius = 1
max_distance = 1
elif (ast_name == "eros"):
surf_area = 0.1
max_angle = np.sqrt(surf_area / true_ast.get_axes()[0]**2)
min_angle = 10
max_radius = 0.2
max_distance = 0.01
ellipsoid = surface_mesh.SurfMesh(true_ast.get_axes()[0],
true_ast.get_axes()[1],
true_ast.get_axes()[2], min_angle,
max_radius, max_distance)
v_est = ellipsoid.get_verts()
f_est = ellipsoid.get_faces()
est_ast_meshdata = mesh_data.MeshData(v_est, f_est)
est_ast_rmesh = reconstruct.ReconstructMesh(est_ast_meshdata)
est_ast = asteroid.Asteroid(ast_name, est_ast_rmesh)
# controller functions
complete_controller = controller_cpp.Controller()
# lidar object
lidar = cgal.Lidar()
lidar = lidar.view_axis(np.array([1, 0, 0]))
lidar = lidar.up_axis(np.array([0, 0, 1]))
lidar = lidar.fov(np.deg2rad(np.array([7, 7]))).dist(2).num_steps(3)
# raycaster from c++
caster = cgal.RayCaster(v, f)
# save a bunch of parameters to the HDF5 file
with h5py.File(output_filename, 'w-') as hf:
sim_group = hf.create_group("simulation_parameters")
sim_group['AbsTol'] = AbsTol
sim_group['RelTol'] = RelTol
dumbbell_group = sim_group.create_group("dumbbell")
dumbbell_group["m1"] = 500
dumbbell_group["m2"] = 500
dumbbell_group['l'] = 0.003
true_ast_group = sim_group.create_group("true_asteroid")
true_ast_group.create_dataset("vertices", data=v, compression=compression,
compression_opts=compression_opts)
true_ast_group.create_dataset("faces", data=f, compression=compression,
compression_opts=compression_opts)
true_ast_group['name'] = file_name
est_ast_group = sim_group.create_group("estimate_asteroid")
est_ast_group['surf_area'] = surf_area
est_ast_group['max_angle'] = max_angle
est_ast_group['min_angle'] = min_angle
est_ast_group['max_distance'] = max_distance
est_ast_group['max_radius'] = max_radius
est_ast_group.create_dataset('initial_vertices', data=est_ast_rmesh.get_verts(), compression=compression,
compression_opts=compression_opts)
est_ast_group.create_dataset("initial_faces", data=est_ast_rmesh.get_faces(), compression=compression,
compression_opts=compression_opts)
est_ast_group.create_dataset("initial_weight", data=est_ast_rmesh.get_weights(), compression=compression,
compression_opts=compression_opts)
lidar_group = sim_group.create_group("lidar")
lidar_group.create_dataset("view_axis", data=lidar.get_view_axis())
lidar_group.create_dataset("up_axis", data=lidar.get_up_axis())
lidar_group.create_dataset("fov", data=lidar.get_fov())
return (true_ast_meshdata, true_ast, complete_controller, est_ast_meshdata,
est_ast_rmesh, est_ast, lidar, caster, max_angle,
dum, AbsTol, RelTol)
def initialize_refinement(output_filename, ast_name="castalia"):
"""Initialize all the things for the simulation to refine the landing site
Output_file : the actual HDF5 file to save the data/parameters to
This will load the estimated asteroid shape, the true asteroid and a bumpy
version for use with the raycaster
"""
logger = logging.getLogger(__name__)
logger.info('Initialize asteroid and dumbbell objects')
AbsTol = 1e-9
RelTol = 1e-9
logger.info("Initializing Refinement : {} ".format(ast_name))
# open the file and recreate the objects
with h5py.File(output_filename, 'r') as hf:
state_keys = np.array(utilities.sorted_nicely(list(hf['state'].keys())))
explore_tf = hf['time'][()][-1]
# explore_tf = int(state_keys[-1])
explore_state = hf['state/' + str(explore_tf)][()]
explore_Ra = hf['Ra/' + str(explore_tf)][()]
explore_v = hf['reconstructed_vertex/' + str(explore_tf)][()]
explore_f = hf['reconstructed_face/' + str(explore_tf)][()]
explore_w = hf['reconstructed_weight/' + str(explore_tf)][()]
explore_name = hf['simulation_parameters/true_asteroid/name'][()][:-4]
explore_m1 = hf['simulation_parameters/dumbbell/m1'][()]
explore_m2 = hf['simulation_parameters/dumbbell/m2'][()]
explore_l = hf['simulation_parameters/dumbbell/l'][()]
explore_AbsTol = hf['simulation_parameters/AbsTol'][()]
explore_RelTol = hf['simulation_parameters/RelTol'][()]
explore_true_vertices = hf['simulation_parameters/true_asteroid/vertices'][()]
explore_true_faces = hf['simulation_parameters/true_asteroid/faces'][()]
# switch based on asteroid name
if ast_name == "castalia":
refine_file_name = "castalia_bump.obj"
v, f = wavefront.read_obj('./data/shape_model/CASTALIA/castalia_bump_2.obj')
else:
print("Incorrect asteroid name")
return 1
# true asteroid and dumbbell
true_ast_meshdata = mesh_data.MeshData(explore_true_vertices, explore_true_faces)
true_ast = asteroid.Asteroid(explore_name, true_ast_meshdata)
dum = dumbbell.Dumbbell(m1=explore_m1, m2=explore_m2, l=explore_l)
# estimated asteroid (starting as an ellipse)
if (ast_name == "castalia" or ast_name == "itokawa"
or ast_name == "golevka" or ast_name == "52760"):
surf_area = 0.0005
max_angle = np.sqrt(surf_area / true_ast.get_axes()[0]**2)
min_angle = 10
max_radius = 0.03
max_distance = 0.5
est_ast_meshdata = mesh_data.MeshData(explore_v, explore_f)
# set the weight of everything to a big number
# new_w = np.full_like(explore_w, 10)
est_ast_rmesh = reconstruct.ReconstructMesh(est_ast_meshdata, explore_w)
est_ast = asteroid.Asteroid(ast_name, est_ast_rmesh)
# controller functions
complete_controller = controller_cpp.Controller()
# lidar object
lidar = cgal.Lidar()
lidar = lidar.view_axis(np.array([1, 0, 0]))
lidar = lidar.up_axis(np.array([0, 0, 1]))
lidar = lidar.fov(np.deg2rad(np.array([2, 2]))).dist(2).num_steps(3)
# raycaster from c++ using the bumpy asteroid
caster = cgal.RayCaster(v, f)
return (true_ast_meshdata, true_ast, complete_controller, est_ast_meshdata,
est_ast_rmesh, est_ast, lidar, caster, max_angle,
dum, AbsTol, RelTol)
def initialize_castalia(output_filename):
"""Initialize all the things for the simulation
Output_file : the actual HDF5 file to save the data/parameters to
"""
logger = logging.getLogger(__name__)
logger.info('Initialize asteroid and dumbbell objects')
AbsTol = 1e-9
RelTol = 1e-9
ast_name = "castalia"
file_name = "castalia.obj"
# true asteroid and dumbbell
v, f = wavefront.read_obj('./data/shape_model/CASTALIA/castalia.obj')
true_ast_meshdata = mesh_data.MeshData(v, f)
true_ast = asteroid.Asteroid(ast_name, true_ast_meshdata)
dum = dumbbell.Dumbbell(m1=500, m2=500, l=0.003)
# estimated asteroid (starting as an ellipse)
surf_area = 0.01
max_angle = np.sqrt(surf_area / true_ast.get_axes()[0]**2)
min_angle = 10
max_distance = 0.5
max_radius = 0.03
ellipsoid = surface_mesh.SurfMesh(true_ast.get_axes()[0], true_ast.get_axes()[1], true_ast.get_axes()[2],
min_angle, max_radius, max_distance)
v_est = ellipsoid.get_verts()
f_est = ellipsoid.get_faces()
est_ast_meshdata = mesh_data.MeshData(v_est, f_est)
est_ast_rmesh = reconstruct.ReconstructMesh(est_ast_meshdata)
est_ast = asteroid.Asteroid(ast_name, est_ast_rmesh)
# controller functions
complete_controller = controller_cpp.Controller()
# lidar object
lidar = cgal.Lidar()
lidar = lidar.view_axis(np.array([1, 0, 0]))
lidar = lidar.up_axis(np.array([0, 0, 1]))
lidar = lidar.fov(np.deg2rad(np.array([7, 7]))).dist(2).num_steps(3)
# raycaster from c++
caster = cgal.RayCaster(v, f)
# save a bunch of parameters to the HDF5 file
with h5py.File(output_filename, 'w-') as hf:
sim_group = hf.create_group("simulation_parameters")
sim_group['AbsTol'] = AbsTol
sim_group['RelTol'] = RelTol
dumbbell_group = sim_group.create_group("dumbbell")
dumbbell_group["m1"] = 500
dumbbell_group["m2"] = 500
dumbbell_group['l'] = 0.003
true_ast_group = sim_group.create_group("true_asteroid")
true_ast_group.create_dataset("vertices", data=v, compression=compression,
compression_opts=compression_opts)
true_ast_group.create_dataset("faces", data=f, compression=compression,
compression_opts=compression_opts)
true_ast_group['name'] = file_name
est_ast_group = sim_group.create_group("estimate_asteroid")
est_ast_group['surf_area'] = surf_area
est_ast_group['max_angle'] = max_angle
est_ast_group['min_angle'] = min_angle
est_ast_group['max_distance'] = max_distance
est_ast_group['max_radius'] = max_radius
est_ast_group.create_dataset('initial_vertices', data=est_ast_rmesh.get_verts(), compression=compression,
compression_opts=compression_opts)
est_ast_group.create_dataset("initial_faces", data=est_ast_rmesh.get_faces(), compression=compression,
compression_opts=compression_opts)
est_ast_group.create_dataset("initial_weight", data=est_ast_rmesh.get_weights(), compression=compression,
compression_opts=compression_opts)
lidar_group = sim_group.create_group("lidar")
lidar_group.create_dataset("view_axis", data=lidar.get_view_axis())
lidar_group.create_dataset("up_axis", data=lidar.get_up_axis())
lidar_group.create_dataset("fov", data=lidar.get_fov())
return (true_ast_meshdata, true_ast, complete_controller, est_ast_meshdata,
est_ast_rmesh, est_ast, lidar, caster, max_angle,
dum, AbsTol, RelTol)
def simulate(output_filename="/tmp/exploration_sim.hdf5"):
"""Actually run the simulation around the asteroid
"""
logger = logging.getLogger(__name__)
num_steps = int(max_steps)
time = np.arange(0, num_steps)
t0, tf = time[0], time[-1]
dt = time[1] - time[0]
# define the initial condition in the inertial frame
initial_pos = np.array([1.5, 0, 0])
initial_vel = np.array([0, 0, 0])
initial_R = attitude.rot3(np.pi / 2).reshape(-1)
initial_w = np.array([0, 0, 0])
initial_state = np.hstack((initial_pos, initial_vel, initial_R, initial_w))
# initialize the simulation objects
(true_ast_meshdata, true_ast, complete_controller,
est_ast_meshdata, est_ast_rmesh, est_ast, lidar, caster, max_angle, dum,
AbsTol, RelTol) = initialize(output_filename)
with h5py.File(output_filename, 'a') as hf:
hf.create_dataset('time', data=time, compression=compression,
compression_opts=compression_opts)
hf.create_dataset("initial_state", data=initial_state, compression=compression,
compression_opts=compression_opts)
v_group = hf.create_group("reconstructed_vertex")
f_group = hf.create_group("reconstructed_face")
w_group = hf.create_group("reconstructed_weight")
state_group = hf.create_group("state")
targets_group = hf.create_group("targets")
Ra_group = hf.create_group("Ra")
inertial_intersections_group = hf.create_group("inertial_intersections")
asteroid_intersections_group = hf.create_group("asteroid_intersections")
# initialize the ODE function
system = integrate.ode(eoms.eoms_controlled_inertial_pybind)
system.set_integrator("lsoda", atol=AbsTol, rtol=RelTol, nsteps=10000)
system.set_initial_value(initial_state, t0)
system.set_f_params(true_ast, dum, complete_controller, est_ast_rmesh)
point_cloud = defaultdict(list)
ii = 1
while system.successful() and system.t < tf:
# integrate the system
t = (system.t + dt)
state = system.integrate(system.t + dt)
logger.info("Step: {} Time: {}".format(ii, t))
if not (np.floor(t) % 1):
# logger.info("RayCasting at t: {}".format(t))
targets = lidar.define_targets(state[0:3],
state[6:15].reshape((3, 3)),
np.linalg.norm(state[0:3]))
# update the asteroid inside the caster
nv = true_ast.rotate_vertices(t)
Ra = true_ast.rot_ast2int(t)
# this also updates true_ast (both point to same data)
caster.update_mesh(nv, true_ast.get_faces())
# do the raycasting
intersections = caster.castarray(state[0:3], targets)
# reconstruct the mesh with new measurements
# convert the intersections to the asteroid frame
ast_ints = []
for pt in intersections:
if np.linalg.norm(pt) < 1e-9:
logger.info("No intersection for this point")
pt_ast = np.array([np.nan, np.nan, np.nan])
else:
pt_ast = Ra.T.dot(pt)
ast_ints.append(pt_ast)
# convert the intersections to the asteroid frame
ast_ints = np.array(ast_ints)
est_ast_rmesh.update(ast_ints, max_angle)
# save data to HDF5
v_group.create_dataset(str(ii), data=est_ast_rmesh.get_verts(), compression=compression,
compression_opts=compression_opts)
f_group.create_dataset(str(ii), data=est_ast_rmesh.get_faces(), compression=compression,
compression_opts=compression_opts)
w_group.create_dataset(str(ii), data=est_ast_rmesh.get_weights(), compression=compression,
compression_opts=compression_opts)
state_group.create_dataset(str(ii), data=state, compression=compression,
compression_opts=compression_opts)
targets_group.create_dataset(str(ii), data=targets, compression=compression,
compression_opts=compression_opts)
Ra_group.create_dataset(str(ii), data=Ra, compression=compression,
compression_opts=compression_opts)
inertial_intersections_group.create_dataset(str(ii), data=intersections, compression=compression,
compression_opts=compression_opts)
asteroid_intersections_group.create_dataset(str(ii), data=ast_ints, compression=compression,
compression_opts=compression_opts)
ii += 1
def simulate_control(output_filename="/tmp/exploration_sim.hdf5",
asteroid_name="castalia"):
"""Run the simulation with the control cost added in
"""
logger = logging.getLogger(__name__)
num_steps = int(max_steps)
time = np.arange(0, num_steps)
t0, tf = time[0], time[-1]
dt = time[1] - time[0]
# initialize the simulation objects
(true_ast_meshdata, true_ast, complete_controller,
est_ast_meshdata, est_ast_rmesh, est_ast, lidar, caster, max_angle, dum,
AbsTol, RelTol) = initialize_asteroid(output_filename, asteroid_name)
# change the initial condition based on the asteroid name
if true_ast.get_name() == "itokawa":
initial_pos = np.array([1.5, 0, 0]) # castalia
elif true_ast.get_name() == "castalia":
initial_pos = np.array([1.5, 0, 0]) # itokawa
elif true_ast.get_name() == "eros":
initial_pos = np.array([19, 0 , 0]) # eros needs more time (greater than 15000
elif true_ast.get_name() == "phobos":
initial_pos = np.array([70, 0, 0])
elif true_ast.get_name() == "lutetia":
initial_pos = np.array([70, 0, 0])
elif true_ast.get_name() == "geographos":
initial_pos = np.array([5, 0, 0])
elif true_ast.get_name() == "bacchus":
initial_pos = np.array([1.5, 0, 0])
elif true_ast.get_name() == "52760":
initial_pos = np.array([3, 0, 0])
else:
print("Incorrect asteroid selected")
return 1
# define the initial condition in the inertial frame
initial_vel = np.array([0, 0, 0])
initial_R = attitude.rot3(np.pi / 2).reshape(-1)
initial_w = np.array([0, 0, 0])
initial_state = np.hstack((initial_pos, initial_vel, initial_R, initial_w))
with h5py.File(output_filename, 'a') as hf:
hf.create_dataset('time', data=time, compression=compression,
compression_opts=compression_opts)
hf.create_dataset("initial_state", data=initial_state, compression=compression,
compression_opts=compression_opts)
v_group = hf.create_group("reconstructed_vertex")
f_group = hf.create_group("reconstructed_face")
w_group = hf.create_group("reconstructed_weight")
state_group = hf.create_group("state")
targets_group = hf.create_group("targets")
Ra_group = hf.create_group("Ra")
inertial_intersections_group = hf.create_group("inertial_intersections")
asteroid_intersections_group = hf.create_group("asteroid_intersections")
# initialize the ODE function
system = integrate.ode(eoms.eoms_controlled_inertial_control_cost_pybind)
system.set_integrator("lsoda", atol=AbsTol, rtol=RelTol, nsteps=10000)
# system.set_integrator("vode", nsteps=5000, method='bdf')
system.set_initial_value(initial_state, t0)
system.set_f_params(true_ast, dum, complete_controller, est_ast_rmesh, est_ast)
point_cloud = defaultdict(list)
ii = 1
while system.successful() and system.t < tf:
t = system.t + dt
# TODO Make sure the asteroid (est and truth) are being rotated by ROT3(t)
state = system.integrate(system.t + dt)
logger.info("Step: {} Time: {} Pos: {} Uncertainty: {}".format(ii, t,
state[0:3],
np.sum(est_ast_rmesh.get_weights())))
if not (np.floor(t) % 1):
targets = lidar.define_targets(state[0:3],
state[6:15].reshape((3, 3)),
np.linalg.norm(state[0:3]))
# update the asteroid inside the caster
nv = true_ast.rotate_vertices(t)
Ra = true_ast.rot_ast2int(t)
caster.update_mesh(nv, true_ast.get_faces())
# do the raycasting
intersections = caster.castarray(state[0:3], targets)
# reconstruct the mesh with new measurements
# convert the intersections to the asteroid frame
ast_ints = []
for pt in intersections:
if np.linalg.norm(pt) < 1e-9:
logger.info("No intersection for this point")
pt_ast = np.array([np.nan, np.nan, np.nan])
else:
pt_ast = Ra.T.dot(pt)
ast_ints.append(pt_ast)
ast_ints = np.array(ast_ints)
# this updates the estimated asteroid mesh used in both rmesh and est_ast
est_ast_rmesh.update(ast_ints, max_angle)
v_group.create_dataset(str(ii), data=est_ast_rmesh.get_verts(), compression=compression,
compression_opts=compression_opts)
f_group.create_dataset(str(ii), data=est_ast_rmesh.get_faces(), compression=compression,
compression_opts=compression_opts)
w_group.create_dataset(str(ii), data=est_ast_rmesh.get_weights(), compression=compression,
compression_opts=compression_opts)
state_group.create_dataset(str(ii), data=state, compression=compression,
compression_opts=compression_opts)
targets_group.create_dataset(str(ii), data=targets, compression=compression,
compression_opts=compression_opts)
Ra_group.create_dataset(str(ii), data=Ra, compression=compression,
compression_opts=compression_opts)
inertial_intersections_group.create_dataset(str(ii), data=intersections, compression=compression,
compression_opts=compression_opts)
asteroid_intersections_group.create_dataset(str(ii), data=ast_ints, compression=compression,
compression_opts=compression_opts)
ii += 1
logger.info("Exploration complete")
logger.info("All done")
def save_animation(filename, move_cam=False, mesh_weight=False,
output_path=tempfile.mkdtemp()):
"""Given a HDF5 file from simulate this will animate teh motion
"""
with h5py.File(filename, 'r') as hf:
# get the inertial state and asteroid mesh object
time = hf['time'][()]
state_group = hf['state']
state_keys = np.array(utilities.sorted_nicely(list(hf['state'].keys())))
intersections_group = hf['inertial_intersections']
# extract out the entire state and intersections
state = []
inertial_intersections = []
for key in state_keys:
state.append(state_group[key][()])
inertial_intersections.append(intersections_group[key][()])
state = np.array(state)
inertial_intersections = np.array(inertial_intersections)
# get the true asteroid from the HDF5 file
true_vertices = hf['simulation_parameters/true_asteroid/vertices'][()]
true_faces = hf['simulation_parameters/true_asteroid/faces'][()]
true_name = hf['simulation_parameters/true_asteroid/name'][()]
est_initial_vertices = hf['simulation_parameters/estimate_asteroid/initial_vertices'][()]
est_initial_faces = hf['simulation_parameters/estimate_asteroid/initial_faces'][()]
# think about a black background as well
mfig = graphics.mayavi_figure(bg=(0, 0, 0), size=(800,600), offscreen=True)
if mesh_weight:
mesh = graphics.mayavi_addMesh(mfig, est_initial_vertices, est_initial_faces,
scalars=np.squeeze(hf['simulation_parameters/estimate_asteroid/initial_weight'][()]),
color=None, colormap='viridis')
else:
mesh = graphics.mayavi_addMesh(mfig, est_initial_vertices, est_initial_faces)
xaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([2, 0, 0]), color=(1, 0, 0))
yaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([0, 2, 0]), color=(0, 1, 0))
zaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([0, 0, 2]), color=(0, 0, 1))
ast_axes = (xaxis, yaxis, zaxis)
# initialize a dumbbell object
dum = dumbbell.Dumbbell(hf['simulation_parameters/dumbbell/m1'][()],
hf['simulation_parameters/dumbbell/m2'][()],
hf['simulation_parameters/dumbbell/l'][()])
# com, dum_axes = graphics.draw_dumbbell_mayavi(state[0, :], dum, mfig)
if move_cam:
com = graphics.mayavi_addPoint(mfig, state[0, 0:3],
color=(1, 0, 0), radius=0.02,
opacity=0.5)
else:
com = graphics.mayavi_addPoint(mfig, state[0, 0:3],
color=(1, 0, 0), radius=0.1)
pc_points = graphics.mayavi_points3d(mfig, inertial_intersections[0],
color=(0, 0, 1), scale_factor=0.03)
# add some text objects
time_text = graphics.mlab.text(0.1, 0.1, "t: {:8.1f}".format(0), figure=mfig,
color=(1, 1, 1), width=0.05)
weight_text = graphics.mlab.text(0.1, 0.2, "w: {:8.1f}".format(0), figure=mfig,
color=(1, 1, 1), width=0.05)
# mayavi_objects = (mesh, ast_axes, com, dum_axes, pc_lines)
mayavi_objects = (mesh, com, pc_points, time_text, weight_text)
print("Images will be saved to {}".format(output_path))
animation.inertial_asteroid_trajectory_cpp_save(time, state, inertial_intersections,
filename, mayavi_objects, move_cam=move_cam,
mesh_weight=mesh_weight,
output_path=output_path,
magnification=4)
# now call ffmpeg
fps = 60
name = os.path.join(output_path, 'exploration.mp4')
ffmpeg_fname = os.path.join(output_path, '%07d.jpg')
cmd = "ffmpeg -framerate {} -i {} -c:v libx264 -profile:v high -crf 20 -pix_fmt yuv420p -vf 'scale=trunc(iw/2)*2:trunc(ih/2)*2' {}".format(fps, ffmpeg_fname, name)
print(cmd)
subprocess.check_output(['bash', '-c', cmd])
# remove folder now
for file in os.listdir(output_path):
file_path = os.path.join(output_path, file)
if file_path.endswith('.jpg'):
os.remove(file_path)
def animate(filename, move_cam=False, mesh_weight=False, save_animation=False):
"""Given a HDF5 file from simulate this will animate teh motion
"""
# TODO Animate the changing of the mesh itself as a function of time
with h5py.File(filename, 'r') as hf:
# get the inertial state and asteroid mesh object
# time = hf['time'][()]
state_group = hf['state']
state_keys = np.array(utilities.sorted_nicely(list(hf['state'].keys())))
time = [int(t) for t in state_keys];
intersections_group = hf['inertial_intersections']
# extract out the entire state and intersections
state = []
inertial_intersections = []
for key in state_keys:
state.append(state_group[key][()])
inertial_intersections.append(intersections_group[key][()])
state = np.array(state)
inertial_intersections = np.array(inertial_intersections)
# get the true asteroid from the HDF5 file
true_vertices = hf['simulation_parameters/true_asteroid/vertices'][()]
true_faces = hf['simulation_parameters/true_asteroid/faces'][()]
true_name = hf['simulation_parameters/true_asteroid/name'][()]
est_initial_vertices = hf['simulation_parameters/estimate_asteroid/initial_vertices'][()]
est_initial_faces = hf['simulation_parameters/estimate_asteroid/initial_faces'][()]
# think about a black background as well
mfig = graphics.mayavi_figure(size=(800,600), bg=(0, 0, 0))
if mesh_weight:
mesh = graphics.mayavi_addMesh(mfig, est_initial_vertices, est_initial_faces,
scalars=np.squeeze(hf['simulation_parameters/estimate_asteroid/initial_weight'][()]),
color=None, colormap='viridis')
else:
mesh = graphics.mayavi_addMesh(mfig, est_initial_vertices, est_initial_faces)
xaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([2, 0, 0]), color=(1, 0, 0))
yaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([0, 2, 0]), color=(0, 1, 0))
zaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([0, 0, 2]), color=(0, 0, 1))
ast_axes = (xaxis, yaxis, zaxis)
# initialize a dumbbell object
dum = dumbbell.Dumbbell(hf['simulation_parameters/dumbbell/m1'][()],
hf['simulation_parameters/dumbbell/m2'][()],
hf['simulation_parameters/dumbbell/l'][()])
# com, dum_axes = graphics.draw_dumbbell_mayavi(state[0, :], dum, mfig)
if move_cam:
com = graphics.mayavi_addPoint(mfig, state[0, 0:3],
color=(1, 0, 0), radius=0.02,
opacity=0.5)
else:
com = graphics.mayavi_addPoint(mfig, state[0, 0:3],
color=(1, 0, 0), radius=0.1)
pc_points = graphics.mayavi_points3d(mfig, inertial_intersections[0],
color=(0, 0, 1), scale_factor=0.03)
# add some text objects
time_text = graphics.mlab.text(0.1, 0.1, "t: {:8.1f}".format(0), figure=mfig,
color=(1, 1, 1), width=0.05)
weight_text = graphics.mlab.text(0.1, 0.2, "w: {:8.1f}".format(0), figure=mfig,
color=(1, 1, 1), width=0.05)
# mayavi_objects = (mesh, ast_axes, com, dum_axes, pc_lines)
mayavi_objects = (mesh, com, pc_points, time_text, weight_text)
animation.inertial_asteroid_trajectory_cpp(time, state, inertial_intersections,
filename, mayavi_objects, move_cam=move_cam,
mesh_weight=mesh_weight)
graphics.mlab.show()
def animate_refinement(filename, move_cam=False, mesh_weight=False, save_animation=False):
"""Given a HDF5 file from simulate this will animate teh motion
"""
# TODO Animate the changing of the mesh itself as a function of time
with h5py.File(filename, 'r') as hf:
# get the inertial state and asteroid mesh object
# time = hf['time'][()]
state_group = hf['refinement/state']
state_keys = np.array(utilities.sorted_nicely(list(hf['refinement/state'].keys())))
time = [int(t) for t in state_keys];
intersections_group = hf['refinement/inertial_intersections']
# extract out the entire state and intersections
state = []
inertial_intersections = []
for key in state_keys:
state.append(state_group[key][()])
inertial_intersections.append(intersections_group[key][()])
state = np.array(state)
inertial_intersections = np.array(inertial_intersections)
# get the true asteroid from the HDF5 file
true_vertices = hf['simulation_parameters/true_asteroid/vertices'][()]
true_faces = hf['simulation_parameters/true_asteroid/faces'][()]
true_name = hf['simulation_parameters/true_asteroid/name'][()]
est_initial_vertices = hf['simulation_parameters/estimate_asteroid/initial_vertices'][()]
est_initial_faces = hf['simulation_parameters/estimate_asteroid/initial_faces'][()]
# think about a black background as well
mfig = graphics.mayavi_figure(bg=(0, 0 ,0), size=(800,600))
if mesh_weight:
mesh = graphics.mayavi_addMesh(mfig, est_initial_vertices, est_initial_faces,
scalars=np.squeeze(hf['simulation_parameters/estimate_asteroid/initial_weight'][()]),
color=None, colormap='viridis')
else:
mesh = graphics.mayavi_addMesh(mfig, est_initial_vertices, est_initial_faces)
xaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([2, 0, 0]), color=(1, 0, 0))
yaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([0, 2, 0]), color=(0, 1, 0))
zaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([0, 0, 2]), color=(0, 0, 1))
ast_axes = (xaxis, yaxis, zaxis)
# initialize a dumbbell object
dum = dumbbell.Dumbbell(hf['simulation_parameters/dumbbell/m1'][()],
hf['simulation_parameters/dumbbell/m2'][()],
hf['simulation_parameters/dumbbell/l'][()])
# com, dum_axes = graphics.draw_dumbbell_mayavi(state[0, :], dum, mfig)
if move_cam:
com = graphics.mayavi_addPoint(mfig, state[0, 0:3],
color=(1, 0, 0), radius=0.02,
opacity=0.5)
else:
com = graphics.mayavi_addPoint(mfig, state[0, 0:3],
color=(1, 0, 0), radius=0.1)
pc_points = graphics.mayavi_points3d(mfig, inertial_intersections[0],
color=(0, 0, 1), scale_factor=0.03)
# add some text objects
time_text = graphics.mlab.text(0.1, 0.1, "t: {:8.1f}".format(0), figure=mfig,
color=(1, 1, 1), width=0.05)
weight_text = graphics.mlab.text(0.1, 0.2, "w: {:8.1f}".format(0), figure=mfig,
color=(1, 1, 1), width=0.05)
# mayavi_objects = (mesh, ast_axes, com, dum_axes, pc_lines)
mayavi_objects = (mesh, com, pc_points, time_text, weight_text)
animation.inertial_asteroid_refinement_cpp(time, state, inertial_intersections,
filename, mayavi_objects, move_cam=move_cam,
mesh_weight=mesh_weight)
graphics.mlab.show()
def animate_landing(filename, move_cam=False, mesh_weight=False):
"""Animation for the landing portion of simulation
"""
with h5py.File(filename, 'r') as hf:
time = hf['landing/time'][()]
state_group = hf['landing/state']
state_keys = np.array(utilities.sorted_nicely(list(state_group.keys())))
state = []
for key in state_keys:
state.append(state_group[key][()])
state=np.array(state)
mfig = graphics.mayavi_figure(bg=(0, 0, 0),size=(800, 600))
# option for the mesh weight
if mesh_weight:
mesh = graphics.mayavi_addMesh(mfig, hf['landing/vertices'][()], hf['landing/faces'][()],
scalars=np.squeeze(hf['landing/weight'][()]),
color=None, colormap='viridis')
else:
mesh = graphics.mayavi_addMesh(mfig, hf['landing/vertices'][()], hf['landing/faces'][()])
xaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([2, 0, 0]), color=(1, 0, 0))
yaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([0, 2, 0]), color=(0, 1, 0))
zaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([0, 0, 2]), color=(0, 0, 1))
ast_axes = (xaxis, yaxis, zaxis)
if move_cam:
com = graphics.mayavi_addPoint(mfig, state[0, 0:3],
color=(1, 0, 0), radius=0.02,
opacity=0.5)
else:
com = graphics.mayavi_addPoint(mfig, state[0, 0:3],
color=(1, 0, 0), radius=0.1)
# add some text objects
time_text = graphics.mlab.text(0.1, 0.1, "t: {:8.1f}".format(0), figure=mfig,
color=(1, 1, 1), width=0.05)
weight_text = graphics.mlab.text(0.1, 0.2, "w: {:8.1f}".format(0), figure=mfig,
color=(1, 1, 1), width=0.05)
mayavi_objects = (mesh, com, time_text, weight_text)
animation.inertial_asteroid_landing_cpp(time, state, filename, mayavi_objects,
move_cam=move_cam, mesh_weight=mesh_weight)
graphics.mlab.show()
def save_animate_landing(filename,output_path,move_cam=False, mesh_weight=False):
"""Save the landing animation
"""
with h5py.File(filename, 'r') as hf:
time = hf['landing/time'][()]
state_group = hf['landing/state']
state_keys = np.array(utilities.sorted_nicely(list(state_group.keys())))
state = []
for key in state_keys:
state.append(state_group[key][()])
state=np.array(state)
mfig = graphics.mayavi_figure(bg=(0, 0, 0), size=(800, 600), offscreen=True)
# option for the mesh weight
if mesh_weight:
mesh = graphics.mayavi_addMesh(mfig, hf['landing/vertices'][()], hf['landing/faces'][()],
scalars=np.squeeze(hf['landing/weight'][()]),
color=None, colormap='viridis')
else:
mesh = graphics.mayavi_addMesh(mfig, hf['landing/vertices'][()], hf['landing/faces'][()])
xaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([2, 0, 0]), color=(1, 0, 0))
yaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([0, 2, 0]), color=(0, 1, 0))
zaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([0, 0, 2]), color=(0, 0, 1))
ast_axes = (xaxis, yaxis, zaxis)
if move_cam:
com = graphics.mayavi_addPoint(mfig, state[0, 0:3],
color=(1, 0, 0), radius=0.02,
opacity=0.5)
else:
com = graphics.mayavi_addPoint(mfig, state[0, 0:3],
color=(1, 0, 0), radius=0.1)
# add some text objects
time_text = graphics.mlab.text(0.1, 0.1, "t: {:8.1f}".format(0), figure=mfig,
color=(1, 1, 1), width=0.05)
weight_text = graphics.mlab.text(0.1, 0.2, "w: {:8.1f}".format(0), figure=mfig,
color=(1, 1, 1), width=0.05)
mayavi_objects = (mesh, com, time_text, weight_text)
print("Images will be saved to {}".format(output_path))
animation.inertial_asteroid_landing_cpp_save(time, state, filename, mayavi_objects,
move_cam=move_cam, mesh_weight=mesh_weight,
output_path=output_path,
magnification=4)
# now call ffmpeg
fps = 60
name = os.path.join(output_path, 'landing.mp4')
ffmpeg_fname = os.path.join(output_path, '%07d.jpg')
cmd = "ffmpeg -framerate {} -i {} -c:v libx264 -profile:v high -crf 20 -pix_fmt yuv420p -vf 'scale=trunc(iw/2)*2:trunc(ih/2)*2' {}".format(fps, ffmpeg_fname, name)
print(cmd)
subprocess.check_output(['bash', '-c', cmd])
# remove folder now
for file in os.listdir(output_path):
file_path = os.path.join(output_path, file)
if file_path.endswith('.jpg'):
os.remove(file_path)
def save_animate_refinement(filename, output_path, move_cam=False, mesh_weight=False):
"""Save the refinement animation
"""
# TODO Animate the changing of the mesh itself as a function of time
with h5py.File(filename, 'r') as hf:
# get the inertial state and asteroid mesh object
# time = hf['time'][()]
state_group = hf['refinement/state']
state_keys = np.array(utilities.sorted_nicely(list(hf['refinement/state'].keys())))
time = [int(t) for t in state_keys];
intersections_group = hf['refinement/inertial_intersections']
# extract out the entire state and intersections
state = []
inertial_intersections = []
for key in state_keys:
state.append(state_group[key][()])
inertial_intersections.append(intersections_group[key][()])
state = np.array(state)
inertial_intersections = np.array(inertial_intersections)
# get the true asteroid from the HDF5 file
true_vertices = hf['simulation_parameters/true_asteroid/vertices'][()]
true_faces = hf['simulation_parameters/true_asteroid/faces'][()]
true_name = hf['simulation_parameters/true_asteroid/name'][()]
est_initial_vertices = hf['simulation_parameters/estimate_asteroid/initial_vertices'][()]
est_initial_faces = hf['simulation_parameters/estimate_asteroid/initial_faces'][()]
# think about a black background as well
mfig = graphics.mayavi_figure(size=(800,600), offscreen=True, bg=(0 ,0 ,0))
if mesh_weight:
mesh = graphics.mayavi_addMesh(mfig, est_initial_vertices, est_initial_faces,
scalars=np.squeeze(hf['simulation_parameters/estimate_asteroid/initial_weight'][()]),
color=None, colormap='viridis')
else:
mesh = graphics.mayavi_addMesh(mfig, est_initial_vertices, est_initial_faces)
xaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([2, 0, 0]), color=(1, 0, 0))
yaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([0, 2, 0]), color=(0, 1, 0))
zaxis = graphics.mayavi_addLine(mfig, np.array([0, 0, 0]), np.array([0, 0, 2]), color=(0, 0, 1))
ast_axes = (xaxis, yaxis, zaxis)
# initialize a dumbbell object
dum = dumbbell.Dumbbell(hf['simulation_parameters/dumbbell/m1'][()],
hf['simulation_parameters/dumbbell/m2'][()],
hf['simulation_parameters/dumbbell/l'][()])
# com, dum_axes = graphics.draw_dumbbell_mayavi(state[0, :], dum, mfig)
if move_cam:
com = graphics.mayavi_addPoint(mfig, state[0, 0:3],
color=(1, 0, 0), radius=0.02,
opacity=0.5)
else:
com = graphics.mayavi_addPoint(mfig, state[0, 0:3],
color=(1, 0, 0), radius=0.1)
pc_points = graphics.mayavi_points3d(mfig, inertial_intersections[0],
color=(0, 0, 1), scale_factor=0.01)
# add some text objects
time_text = graphics.mlab.text(0.1, 0.1, "t: {:8.1f}".format(0), figure=mfig,
color=(1, 1, 1), width=0.05)
weight_text = graphics.mlab.text(0.1, 0.2, "w: {:8.1f}".format(0), figure=mfig,
color=(1, 1, 1), width=0.05)
# mayavi_objects = (mesh, ast_axes, com, dum_axes, pc_lines)
mayavi_objects = (mesh, com, pc_points, time_text, weight_text)
print("Images will be saved to {}".format(output_path))
animation.inertial_asteroid_refinement_cpp_save(time, state, inertial_intersections,
filename, mayavi_objects, move_cam=move_cam,
mesh_weight=mesh_weight,
output_path=output_path,
magnification=4)
# now call ffmpeg
fps = 60
name = os.path.join(output_path, 'refinement.mp4')
ffmpeg_fname = os.path.join(output_path, '%07d.jpg')
cmd = "ffmpeg -framerate {} -i {} -c:v libx264 -profile:v high -crf 20 -pix_fmt yuv420p -vf 'scale=trunc(iw/2)*2:trunc(ih/2)*2' {}.mp4".format(fps, ffmpeg_fname, name)
print(cmd)
subprocess.check_output(['bash', '-c', cmd])
# remove folder now
for file in os.listdir(output_path):
file_path = os.path.join(output_path, file)
if file_path.endswith('.jpg'):
os.remove(file_path)
def refine_landing_area(filename, asteroid_name, desired_landing_site):
"""Called after exploration is completed
We'll refine the area near the landing site.
Then using a bumpy model we'll take lots of measurements and try to recreate the bumpy terrain
using a mixed resolution mesh
Then in a differetn function we'll go and land
"""
logger = logging.getLogger(__name__)
num_steps = int(3600)
time = np.arange(0, num_steps)
t0, tf = time[0], time[-1]
dt = time[1] - time[0]
# intialize the simulation objects
(true_ast_meshdata, true_ast, complete_controller,
est_ast_meshdata, est_ast_rmesh, est_ast, lidar, caster, max_angle, dum,
AbsTol, RelTol) = initialize_refinement(filename, asteroid_name)
v_bumpy, f_bumpy = wavefront.read_obj('./data/shape_model/CASTALIA/castalia_bump.obj')
# define the initial condition as teh terminal state of the exploration sim
with h5py.File(filename, 'r') as hf:
state_keys = np.array(utilities.sorted_nicely(list(hf['state'].keys())))
explore_tf = hf['time'][()][-1]
explore_state = hf['state/' + str(explore_tf)][()]
explore_Ra = hf['Ra/' + str(explore_tf)][()]
explore_AbsTol = hf["simulation_parameters/AbsTol"][()]
explore_RelTol = hf["simulation_parameters/RelTol"][()]
initial_state = explore_state
# open the file and recreate the objects
with h5py.File(filename, 'r+') as hf:
# groups to save the refined data
if "refinement" in hf:
del hf['refinement']
refinement_group = hf.create_group("refinement")
refinement_group.create_dataset("time", data=time, compression=compression,
compression_opts=compression_opts)
refinement_group.create_dataset("initial_state", data=initial_state)
v_group = refinement_group.create_group("reconstructed_vertex")
f_group = refinement_group.create_group("reconstructed_face")
w_group = refinement_group.create_group("reconstructed_weight")
state_group = refinement_group.create_group("state")
targets_group = refinement_group.create_group("targets")
Ra_group = refinement_group.create_group("Ra")
inertial_intersections_group = refinement_group.create_group("inertial_intersections")
asteroid_intersections_group = refinement_group.create_group("asteroid_intersections")
logger.info("Estimated asteroid has {} vertices and {} faces".format(
est_ast_rmesh.get_verts().shape[0],
est_ast_rmesh.get_faces().shape[0]))
logger.info("Now refining the faces close to the landing site")
# perform remeshing over the landing area and take a bunch of measurements
est_ast_meshdata.remesh_faces_in_view(desired_landing_site, np.deg2rad(40),
0.02)
logger.info("Estimated asteroid has {} vertices and {} faces".format(
est_ast_rmesh.get_verts().shape[0],
est_ast_rmesh.get_faces().shape[0]))
logger.info("Now starting dynamic simulation and taking measurements again again")
complete_controller.set_vertices_in_view(est_ast_rmesh, desired_landing_site,
np.deg2rad(40))
system = integrate.ode(eoms.eoms_controlled_inertial_refinement_pybind)
# system = integrate.ode(eoms.eoms_controlled_inertial_control_cost_pybind)
system.set_integrator("lsoda", atol=explore_AbsTol, rtol=explore_RelTol, nsteps=10000)
system.set_initial_value(initial_state, t0)
system.set_f_params(true_ast, dum, complete_controller, est_ast_rmesh,
est_ast, desired_landing_site)
# system.set_f_params(true_ast, dum, complete_controller, est_ast_rmesh, est_ast)
# TODO make sure that at this point the new faces have a high weight
ii = 1
while system.successful() and system.t < tf:
t = system.t + dt
state = system.integrate(system.t + dt)
logger.info("Step: {} Time: {} Pos: {} Uncertainty: {}".format(ii, t,
state[0:3],
np.sum(est_ast_rmesh.get_weights())))
targets = lidar.define_targets(state[0:3],
state[6:15].reshape((3, 3)),
np.linalg.norm(state[0:3]))
# update the asteroid inside the caster
nv = true_ast.rotate_vertices(t)
Ra = true_ast.rot_ast2int(t)
nv = Ra.dot(v_bumpy.T).T
caster.update_mesh(nv, f_bumpy)
# do the raycasting
intersections = caster.castarray(state[0:3], targets)
# reconstruct the mesh with new measurements
# convert the intersections to the asteroid frame
ast_ints = []
for pt in intersections:
if np.linalg.norm(pt) < 1e-9:
logger.info("No intersection for this point")
pt_ast = np.array([np.nan, np.nan, np.nan])
else:
pt_ast = Ra.T.dot(pt)
ast_ints.append(pt_ast)
ast_ints = np.array(ast_ints)
# this updates the estimated asteroid mesh used in both rmesh and est_ast
est_ast_rmesh.update(ast_ints, max_angle)
v_group.create_dataset(str(ii), data=est_ast_rmesh.get_verts(), compression=compression,
compression_opts=compression_opts)
f_group.create_dataset(str(ii), data=est_ast_rmesh.get_faces(), compression=compression,
compression_opts=compression_opts)
w_group.create_dataset(str(ii), data=est_ast_rmesh.get_weights(), compression=compression,
compression_opts=compression_opts)
state_group.create_dataset(str(ii), data=state, compression=compression,
compression_opts=compression_opts)
targets_group.create_dataset(str(ii), data=targets, compression=compression,
compression_opts=compression_opts)
Ra_group.create_dataset(str(ii), data=Ra, compression=compression,
compression_opts=compression_opts)
inertial_intersections_group.create_dataset(str(ii), data=intersections, compression=compression,
compression_opts=compression_opts)
asteroid_intersections_group.create_dataset(str(ii), data=ast_ints, compression=compression,
compression_opts=compression_opts)
ii += 1
logger.info("Refinement complete")
def kinematics_refine_landing_area(filename, asteroid_name, desired_landing_site):
"""Perform a kinematics only refinement of the landing area
No dynamic simulation
"""
logger = logging.getLogger(__name__)
num_steps = int(3600*3)
time = np.arange(max_steps, max_steps + num_steps)
t0, tf = time[0], time[-1]
dt = time[1] - time[0]
# intialize the simulation objects
(true_ast_meshdata, true_ast, complete_controller,
est_ast_meshdata, est_ast_rmesh, est_ast, lidar, caster, max_angle, dum,
AbsTol, RelTol) = initialize_refinement(filename, asteroid_name)
v_bumpy, f_bumpy = wavefront.read_obj('./data/shape_model/CASTALIA/castalia_bump_2.obj')
# define the initial condition as teh terminal state of the exploration sim
with h5py.File(filename, 'r') as hf:
state_keys = np.array(utilities.sorted_nicely(list(hf['state'].keys())))
explore_tf = hf['time'][()][-1]
explore_state = hf['state/' + str(explore_tf)][()]
explore_Ra = hf['Ra/' + str(explore_tf)][()]
explore_AbsTol = hf["simulation_parameters/AbsTol"][()]
explore_RelTol = hf["simulation_parameters/RelTol"][()]
initial_state = explore_state
# open the file and recreate the objects
with h5py.File(filename, 'r+') as hf:
# groups to save the refined data
if "refinement" in hf:
input("Press ENTER to delete refinement group!!!")
del hf['refinement']
refinement_group = hf.create_group("refinement")
refinement_group.create_dataset("time", data=time, compression=compression,
compression_opts=compression_opts)
refinement_group.create_dataset("initial_state", data=initial_state)
v_group = refinement_group.create_group("reconstructed_vertex")
f_group = refinement_group.create_group("reconstructed_face")
w_group = refinement_group.create_group("reconstructed_weight")
state_group = refinement_group.create_group("state")
targets_group = refinement_group.create_group("targets")
Ra_group = refinement_group.create_group("Ra")
inertial_intersections_group = refinement_group.create_group("inertial_intersections")
asteroid_intersections_group = refinement_group.create_group("asteroid_intersections")
logger.info("Estimated asteroid has {} vertices and {} faces".format(
est_ast_rmesh.get_verts().shape[0],
est_ast_rmesh.get_faces().shape[0]))
logger.info("Now refining the faces close to the landing site")
# perform remeshing over the landing area and take a bunch of measurements
est_ast_meshdata.remesh_faces_in_view(desired_landing_site, np.deg2rad(20),
0.01)
logger.info("Estimated asteroid has {} vertices and {} faces".format(
est_ast_rmesh.get_verts().shape[0],
est_ast_rmesh.get_faces().shape[0]))
logger.info("Now starting dynamic simulation and taking measurements again again")
complete_controller.set_vertices_in_view(est_ast_rmesh, desired_landing_site,
np.deg2rad(25))
state = initial_state;
for ii, t in enumerate(time):
Ra = true_ast.rot_ast2int(t)
# compute next state to go to
complete_controller.refinement(t, state, est_ast_rmesh, est_ast, desired_landing_site)
# update the state
state[0:3] = Ra.dot(desired_landing_site) * 4
# state[0:3] = complete_controller.get_posd()
state[3:6] = complete_controller.get_veld()
state[6:15] = complete_controller.get_Rd().reshape(-1)
state[15:18] = complete_controller.get_ang_vel_d()
logger.info("Step: {} Time: {} Pos: {} Uncertainty: {}".format(ii, t,
state[0:3],
np.sum(est_ast_rmesh.get_weights())))
targets = lidar.define_targets(state[0:3],
state[6:15].reshape((3, 3)),
np.linalg.norm(state[0:3]))
# target = lidar.define_target(state[0:3], state[6:15].reshape((3, 3)),
# np.linalg.norm(state[0:3]))
# update the asteroid inside the caster
nv = Ra.dot(v_bumpy.T).T
caster.update_mesh(nv,f_bumpy)
# do the raycasting
intersections = caster.castarray(state[0:3], targets)
ast_ints = []
for pt in intersections:
if np.linalg.norm(pt) < 1e-9:
logger.info("No intersection for this point")
pt_ast = np.array([np.nan, np.nan, np.nan])
else:
pt_ast = Ra.T.dot(pt)
ast_ints.append(pt_ast)
ast_ints = np.array(ast_ints)
# this updates the estimated asteroid mesh used in both rmesh and est_ast
est_ast_rmesh.update(ast_ints, max_angle, meas_weight=1.0, vert_weight=1.0)
# intersection = caster.castray(state[0:3], target)
# ast_int = Ra.T.dot(intersection)
# est_ast_rmesh.single_update(ast_int, max_angle)
v_group.create_dataset(str(t), data=est_ast_rmesh.get_verts(), compression=compression,
compression_opts=compression_opts)
f_group.create_dataset(str(t), data=est_ast_rmesh.get_faces(), compression=compression,
compression_opts=compression_opts)
w_group.create_dataset(str(t), data=est_ast_rmesh.get_weights(), compression=compression,
compression_opts=compression_opts)
state_group.create_dataset(str(t), data=state, compression=compression,
compression_opts=compression_opts)
targets_group.create_dataset(str(t), data=targets, compression=compression,
compression_opts=compression_opts)
# targets_group.create_dataset(str(ii), data=target, compression=compression,
# compression_opts=compression_opts)
Ra_group.create_dataset(str(t), data=Ra, compression=compression,
compression_opts=compression_opts)
inertial_intersections_group.create_dataset(str(t), data=intersections, compression=compression,
compression_opts=compression_opts)
asteroid_intersections_group.create_dataset(str(t), data=ast_ints, compression=compression,
compression_opts=compression_opts)
# inertial_intersections_group.create_dataset(str(ii), data=intersection, compression=compression,
# compression_opts=compression_opts)
# asteroid_intersections_group.create_dataset(str(ii), data=ast_int, compression=compression,
# compression_opts=compression_opts)
logger.info("Refinement complete")
def landing(filename, desired_landing_site):
"""Open the HDF5 file and continue the simulation from the terminal state
to landing on the surface over an additional few hours
"""
logger = logging.getLogger(__name__)
logger.info("Opening the HDF5 file from refinement {}".format(filename))
# TODO Look at blender_sim
# get all the terminal states from the exploration stage
with h5py.File(filename, 'r') as hf:
state_keys = np.array(utilities.sorted_nicely(list(hf['refinement/state'].keys())))
explore_tf = hf['refinement/time'][()][-1]
explore_state = hf['refinement/state/' + str(explore_tf)][()]
explore_Ra = hf['refinement/Ra/' + str(explore_tf)][()]
explore_v = hf['refinement/reconstructed_vertex/' + str(explore_tf)][()]
explore_f = hf['refinement/reconstructed_face/' + str(explore_tf)][()]
explore_w = hf['refinement/reconstructed_weight/' + str(explore_tf)][()]
explore_name = hf['simulation_parameters/true_asteroid/name'][()][:-4]
explore_m1 = hf['simulation_parameters/dumbbell/m1'][()]
explore_m2 = hf['simulation_parameters/dumbbell/m2'][()]
explore_l = hf['simulation_parameters/dumbbell/l'][()]
explore_AbsTol = hf['simulation_parameters/AbsTol'][()]
explore_RelTol = hf['simulation_parameters/RelTol'][()]
explore_true_vertices = hf['simulation_parameters/true_asteroid/vertices'][()]
explore_true_faces = hf['simulation_parameters/true_asteroid/faces'][()]
num_steps = int(5000) # 2 hours to go from home pos to the surface
time = np.arange(explore_tf, explore_tf + num_steps)
t0, tf = time[0], time[-1]
dt = time[1] - time[0]
# initialize the asteroid and dumbbell objects
true_ast_meshdata = mesh_data.MeshData(explore_true_vertices, explore_true_faces)
true_ast = asteroid.Asteroid('castalia', true_ast_meshdata)
est_ast_meshdata = mesh_data.MeshData(explore_v, explore_f)
est_ast = asteroid.Asteroid('castalia', est_ast_meshdata)
dum = dumbbell.Dumbbell(m1=explore_m1, m2=explore_m2, l=explore_l)
initial_state = explore_state
with h5py.File(filename, 'r+') as hf:
# delete the landing group if it exists
if "landing" in hf:
input("Press ENTER to delete the landing group!!!")
del hf['landing']
# save data to HDF5 file
hf.create_dataset('landing/time', data=time, compression=compression,
compression_opts=compression_opts)
hf.create_dataset("landing/initial_state", data=initial_state, compression=compression,
compression_opts=compression_opts)
hf.create_dataset("landing/vertices", data=explore_v, compression=compression,
compression_opts=compression_opts)
hf.create_dataset("landing/faces", data=explore_f, compression=compression,
compression_opts=compression_opts)
hf.create_dataset("landing/weight", data=explore_w, compression=compression,
compression_opts=compression_opts)
state_group = hf.create_group("landing/state")
Ra_group = hf.create_group("landing/Ra")
# define the system EOMS and simulate
system = integrate.ode(eoms.eoms_controlled_land_pybind)
system.set_integrator("lsoda", atol=explore_AbsTol, rtol=explore_RelTol, nsteps=10000)
system.set_initial_value(initial_state, t0)
system.set_f_params(true_ast, dum, est_ast, desired_landing_site, t0, initial_state[0:3])
ii = 1
while system.successful() and system.t < tf:
t = system.t + dt
state = system.integrate(system.t + dt)
logger.info("Step: {} Time: {} Pos: {}".format(ii, t, state[0:3]))
state_group.create_dataset(str(t), data=state, compression=compression,
compression_opts=compression_opts)
Ra_group.create_dataset(str(t), data=est_ast.rot_ast2int(t), compression=compression,
compression_opts=compression_opts)
ii+=1
def reconstruct_images(filename, output_path="/tmp/reconstruct_images",
magnification=1, show=True):
"""Read teh HDF5 data and generate a bunch of images of the reconstructing
asteroid
"""
logger = logging.getLogger(__name__)
logger.info("Starting image generation")
# check if location exists
if not os.path.exists(output_path):
os.makedirs(output_path)
logger.info('Opening {}'.format(filename))
with h5py.File(filename, 'r') as hf:
rv = hf['reconstructed_vertex']
rf = hf['reconstructed_face']
rw = hf['reconstructed_weight']
# get all the keys for the groups
v_keys = np.array(utilities.sorted_nicely(list(rv.keys())))
f_keys = np.array(utilities.sorted_nicely(list(rf.keys())))
w_keys = np.array(utilities.sorted_nicely(list(rw.keys())))
v_initial = hf['simulation_parameters/estimate_asteroid/initial_vertices'][()]
f_initial = hf['simulation_parameters/estimate_asteroid/initial_faces'][()]
w_initial = np.squeeze(hf['simulation_parameters/estimate_asteroid/initial_weight'][()])
"""Partial images during the reconstruction"""
scale = 1.25
max_x = scale*np.max(v_initial[:, 0])
min_x = scale*np.min(v_initial[:, 0])
max_y = scale*np.max(v_initial[:, 1])
min_y = scale*np.min(v_initial[:, 1])
max_z = scale*np.max(v_initial[:, 2])
min_z = scale*np.min(v_initial[:, 2])
logger.info('Starting on partial reconstruction images')
mfig = graphics.mayavi_figure(offscreen=(not show))
mesh = graphics.mayavi_addMesh(mfig, v_initial, f_initial)
ms = mesh.mlab_source
graphics.mayavi_axes(mfig, [min_x, max_x, min_x, max_x, min_x, max_x], line_width=5, color=(1, 0, 0))
graphics.mayavi_view(fig=mfig)
partial_index = np.array([1, v_keys.shape[0]*1/4, v_keys.shape[0]*1/2,
v_keys.shape[0]*3/4, v_keys.shape[0]*4/4-1],
dtype=np.int)
for img_index, vk in enumerate(partial_index):
filename = os.path.join(output_path, 'partial_' + str(vk) + '.jpg')
v = rv[str(vk)][()]
# generate an image and save it
ms.set(x=v[:, 0], y=v[:, 1], z=v[:,2], triangles=f_initial)
graphics.mlab.savefig(filename, magnification=magnification)
"""Partial images using a colormap for the data"""
logger.info('Now using a colormap for the uncertainty')
mfig = graphics.mayavi_figure(offscreen=(not show))
mesh = graphics.mayavi_addMesh(mfig, v_initial, f_initial,
color=None, colormap='viridis',
scalars=w_initial)
ms = mesh.mlab_source
graphics.mayavi_axes(mfig, [-1, 1, -1, 1, -1, 1], line_width=5, color=(1, 0, 0))
graphics.mayavi_view(fig=mfig)
partial_index = np.array([1, v_keys.shape[0]*1/4, v_keys.shape[0]*1/2,
v_keys.shape[0]*3/4, v_keys.shape[0]*4/4-1],
dtype=np.int)
for img_index, vk in enumerate(partial_index):
filename = os.path.join(output_path, 'partial_weights_' + str(vk) + '.jpg')
v = rv[str(vk)][()]
w = np.squeeze(rw[str(vk)][()])
# generate an image and save it
ms.set(x=v[:, 0], y=v[:, 1], z=v[:,2], triangles=f_initial,
scalars=w)
graphics.mlab.savefig(filename, magnification=magnification)
# """Generate the completed shape at a variety of different angles"""
# logger.info('Now generating some views of the final shape')
# # change the mesh to the finished mesh
# ms.reset(x=rv[v_keys[-1]][()][:, 0],y=rv[v_keys[-1]][()][:, 1],z=rv[v_keys[-1]][()][:, 2],
# triangles=f_initial)
# elevation = np.array([30, -30])
# azimuth = np.array([0, 45, 135, 215, 315])
# for az, el in itertools.product(azimuth, elevation):
# filename = os.path.join(output_path,'final_az=' + str(az) + '_el=' + str(el) + '.jpg')
# graphics.mayavi_view(fig=mfig, azimuth=az, elevation=el)
# graphics.mlab.savefig(filename, magnification=magnification)
# """Create a bunch of images for animation"""
# logger.info('Now making images for a movie')
# animation_path = os.path.join(output_path, 'animation')
# if not os.path.exists(animation_path):
# os.makedirs(animation_path)
# ms.set(x=v_initial[:, 0], y=v_initial[:, 1], z=v_initial[:, 2], triangles=f_initial)
# for ii, vk in enumerate(v_keys):
# filename = os.path.join(animation_path, str(ii).zfill(7) + '.jpg')
# v = rv[vk][()]
# ms.reset(x=v[:, 0], y=v[:, 1], z=v[:, 2], triangles=f_initial)
# graphics.mayavi_savefig(mfig, filename, magnification=magnification)
logger.info('Finished')
return mfig
def plot_uncertainty(filename, img_path, show=True):
"""Compute the sum of uncertainty and plot as function of time"""
logger = logging.getLogger(__name__)
logger.info("Uncertainty plot as funciton of time")
with h5py.File(filename, 'r') as hf:
rv = hf['reconstructed_vertex']
rf = hf['reconstructed_face']
rw = hf['reconstructed_weight']
# get all the keys for the groups
v_keys = np.array(utilities.sorted_nicely(list(rv.keys())))
f_keys = np.array(utilities.sorted_nicely(list(rf.keys())))
w_keys = np.array(utilities.sorted_nicely(list(rw.keys())))
v_initial = hf['simulation_parameters/estimate_asteroid/initial_vertices'][()]
f_initial = hf['simulation_parameters/estimate_asteroid/initial_faces'][()]
w_initial = np.squeeze(hf['simulation_parameters/estimate_asteroid/initial_weight'][()])
t_array = []
w_array = []
for ii, wk in enumerate(w_keys):
logger.info("Step {}".format(ii))
t_array.append(ii)
w_array.append(np.sum(rw[wk][()]))
t_array = np.array(t_array)
w_array = np.array(w_array)
logger.info("Plotting")
publication.plot_uncertainty(t_array, w_array, img_path=img_path, pgf_save=True,
show=show)
def animate_uncertainty(filename):
"""Create a 2D projection of the uncertainty of the surface as a function of
time
"""
with h5py.File(filename, 'r') as hf:
rv = hf['reconstructed_vertex']
rw = hf['reconstructed_weight']
# get all the keys for the groups
v_keys = np.array(utilities.sorted_nicely(list(rv.keys())))
w_keys = np.array(utilities.sorted_nicely(list(rw.keys())))
v_initial = hf['simulation_parameters/estimate_asteroid/initial_vertices'][()]
w_initial = np.squeeze(hf['simulation_parameters/estimate_asteroid/initial_weight'][()])
# convert vertices to spherical
vs_initial = wavefront.cartesian2spherical(v_initial)
fig, ax = plt.subplots(1, 1)
ax.contourf(vs_initial[:, 2], vs_initial[:, 1], np.diag(w_initial))
plt.show()
def plot_state_trajectory(filename, img_path, show=False):
"""Plot the state trajectory of the satellite around the asteroid
This plots the data from the exploration step
"""
with h5py.File(filename, 'r') as hf:
state_group = hf['state']
Ra_group = hf['Ra']
rv = hf['reconstructed_vertex']
rf = hf['reconstructed_face']
state_keys = np.array(utilities.sorted_nicely(list(hf['state'].keys())))
t_array = np.zeros(len(state_keys))
state_inertial_array = np.zeros((len(state_keys), 18))
state_asteroid_array = np.zeros((len(state_keys), 18))
for ii, sk in enumerate(state_keys):
t_array[ii] = ii
state_inertial_array[ii, :] = state_group[sk][()]
Ra = Ra_group[sk][()]
pos_ast = Ra.T.dot(state_inertial_array[ii, 0:3].T).T
vel_ast = Ra.T.dot(state_inertial_array[ii, 3:6].T).T
R_sc2ast = Ra.T.dot(state_inertial_array[ii, 6:15].reshape((3, 3)))
w_ast = state_inertial_array[ii, 15:18]
state_asteroid_array[ii, :] = np.hstack((pos_ast, vel_ast, R_sc2ast.reshape(-1), w_ast))
# draw three dimensional trajectory
v_final = rv[state_keys[-1]][()]
f_final = rf[state_keys[-1]][()]
mfig = graphics.mayavi_figure(offscreen=(not show))
mesh = graphics.mayavi_addMesh(mfig, v_final, f_final)
scale = 1.25
max_x = scale*np.max(v_final[:, 0])
min_x = scale*np.min(v_final[:, 0])
max_y = scale*np.max(v_final[:, 1])
min_y = scale*np.min(v_final[:, 1])
max_z = scale*np.max(v_final[:, 2])
min_z = scale*np.min(v_final[:, 2])
graphics.mayavi_axes(mfig, [min_x, max_x, min_x, max_x, min_x, max_x], line_width=5, color=(1, 0, 0))
graphics.mayavi_plot_trajectory(mfig, state_asteroid_array[:, 0:3], color=(0, 0, 1), scale_factor=0.05, mode='sphere')
graphics.mayavi_points3d(mfig, state_asteroid_array[0, 0:3], color=(0, 1, 0), scale_factor=0.2)
graphics.mayavi_points3d(mfig, state_asteroid_array[-1, 0:3], color=(1, 0, 0), scale_factor=0.2)
graphics.mayavi_view(mfig)
graphics.mayavi_savefig(mfig, os.path.join(img_path, 'asteroid_trajectory.jpg'), magnification=4)
publication.plot_state(t_array, state_inertial_array, state_asteroid_array,
img_path=img_path, show=show)
def plot_volume(filename, img_path, show=True):
"""Compute the volume of the asteroid at each time step
"""
with h5py.File(filename, 'r') as hf:
rv_group = hf['reconstructed_vertex']
rf_group = hf['reconstructed_face']
rv_keys = np.array(utilities.sorted_nicely(list(rv_group.keys())))
t_array = np.zeros(len(rv_keys))
vol_array = np.zeros(len(rv_keys))
for ii, key in enumerate(rv_keys):
t_array[ii] = ii
vol_array[ii] = stats.volume(rv_group[key][()], rf_group[key][()])
true_vertices = hf['simulation_parameters/true_asteroid/vertices'][()]
true_faces = hf['simulation_parameters/true_asteroid/faces'][()]
true_volume = stats.volume(true_vertices, true_faces)
publication.plot_volume(t_array, vol_array, true_volume, img_path=img_path, pgf_save=True,
show=show)
def refine_site_plots(input_filename, img_path, show=False):
"""Given the exploration reconstruction data (after all the exploration)
This function will select a specific area and generate surface slope/roughness
plots
It returns the desired landing location on the surface in the asteroid fixed frame
"""
# generate a surface slope map for each face of an asteroid
# load a asteroid
with h5py.File(input_filename, 'r') as hf:
state_keys = np.array(utilities.sorted_nicely(list(hf['state'].keys())))
explore_tf = hf['time'][()][-1]
explore_name = hf['simulation_parameters/true_asteroid/name'][()]
# explore_tf = int(state_keys[-1])
explore_state = hf['state/' + str(explore_tf)][()]
explore_Ra = hf['Ra/' + str(explore_tf)][()]
explore_v = hf['reconstructed_vertex/' + str(explore_tf)][()]
explore_f = hf['reconstructed_face/' + str(explore_tf)][()]
explore_w = hf['reconstructed_weight/' + str(explore_tf)][()]
explore_name = hf['simulation_parameters/true_asteroid/name'][()][:-4]
explore_m1 = hf['simulation_parameters/dumbbell/m1'][()]
explore_m2 = hf['simulation_parameters/dumbbell/m2'][()]
explore_l = hf['simulation_parameters/dumbbell/l'][()]
explore_AbsTol = hf['simulation_parameters/AbsTol'][()]
explore_RelTol = hf['simulation_parameters/RelTol'][()]
explore_true_vertices = hf['simulation_parameters/true_asteroid/vertices'][()]
explore_true_faces = hf['simulation_parameters/true_asteroid/faces'][()]
# build meshdata and asteroid from the terminal estimate
est_meshdata = mesh_data.MeshData(explore_v, explore_f)
est_ast = asteroid.Asteroid(explore_name, est_meshdata)
# chose step size based on spacecraft landing footprint
max_radius = np.max(est_ast.get_axes()) # for castalia
delta_angle = 0.05 / max_radius
grid_long, grid_lat = np.meshgrid(np.arange(-np.pi, np.pi, delta_angle),
np.arange(-np.pi/2, np.pi/2, delta_angle))
# # interpolate and create a radius plot
# fig_radius, ax_radius = plt.subplots(1, 1)
# # compute radius of each vertex and lat/long
spherical_vertices = wavefront.cartesian2spherical(explore_v)
r = spherical_vertices[:, 0]
lat = spherical_vertices[:, 1]
long = spherical_vertices[:, 2]
grid_r = interpolate.griddata(np.vstack((long, lat)).T, r, (grid_long, grid_lat), method='nearest')
grid_r_smooth = ndimage.gaussian_filter(grid_r, sigma=10*delta_angle)
# # ax.scatter(long, lat,c=r)
# # ax.imshow(grid_r.T, extent=(-np.pi, np.pi, -np.pi/2, np.pi/2), origin='lower')
# ax_radius.contour(grid_long, grid_lat, grid_r)
# ax_radius.set_title('Radius (km)')
# ax_radius.set_xlabel('Longitude (rad)')
# ax_radius.set_ylabel('Latitude (rad)')
# fig_radius_img, ax_radius_img = plt.subplots(1, 1)
# img = ax_radius_img.imshow(grid_r, extent=(-np.pi, np.pi, -np.pi/2, np.pi/2), origin='lower')
# ax_radius_img.set_title('Radius (km)')
# ax_radius_img.set_ylabel('Latitude (rad)')
# fig_radius_img.colorbar(img)
# plot of surface slope
# get the surface slope(ast) and all face centers(mesh)
face_center = est_meshdata.get_all_face_center()
face_slope = est_ast.surface_slope()
spherical_face_center = wavefront.cartesian2spherical(face_center)
# plot of face area
# face_area = est_meshdata.get_all_face_area()
# grid_area = interpolate.griddata(np.vstack((spherical_face_center[:, 2],
# spherical_face_center[:, 1])).T,
# face_area,
# (grid_long, grid_lat),
# method='nearest') * 1e6 # convert to meters
# grid_area_smooth = ndimage.gaussian_filter(grid_area, sigma=10*delta_angle)
# fig_area, ax_area = plt.subplots(1, 1)
# # contour = ax_area.contour(grid_long, grid_lat, grid_area*1e6)
# img_area = ax_area.imshow(grid_area_smooth, extent=(-np.pi, np.pi, -np.pi/2, np.pi/2),
# origin="lower")
# ax_area.set_title('Face area (square meters)')
# ax_area.set_xlabel('Longitude')
# ax_area.set_ylabel('Latitude')
# fig_area.colorbar(img_area)
grid_slope = interpolate.griddata(np.vstack((spherical_face_center[:, 2],
spherical_face_center[:, 1])).T,
face_slope,
(grid_long, grid_lat),
method='nearest') * 180/np.pi
grid_slope_smooth = ndimage.gaussian_filter(grid_slope, sigma=10*delta_angle)
# build an image of the distance from the explore_state to each point on the surface
# compute geodesic distance to each face center
geodesic_distance = geodesic.central_angle(explore_state[0:3], face_center)
grid_dist = interpolate.griddata(np.vstack((spherical_face_center[:, 2],
spherical_face_center[:, 1])).T,
geodesic_distance,
(grid_long, grid_lat),
method="nearest")
grid_dist_smooth = ndimage.gaussian_filter(grid_dist, sigma=10*delta_angle)
desired_pos_cartesian = publication.plot_refinement_plots(spherical_vertices, grid_long, grid_lat,
delta_angle, grid_slope_smooth,
grid_dist_smooth, img_path=img_path,
show=show, pgf_save=True)
print("Desired Landing site: {} ".format(desired_pos_cartesian))
return desired_pos_cartesian
def landing_site_plots(input_filename, img_path, show=False):
"""Given the exploration reconstruction data (after all the exploration)
This function will select a specific area and generate surface slope/roughness
plots
It returns the desired landing location on the surface in the asteroid fixed frame
"""
# generate a surface slope map for each face of an asteroid
# load a asteroid
with h5py.File(input_filename, 'r') as hf:
state_keys = np.array(utilities.sorted_nicely(list(hf['refinement/state'].keys())))
explore_tf = hf['refinement/time'][()][-1]
explore_name = hf['simulation_parameters/true_asteroid/name'][()]
# explore_tf = int(state_keys[-1])
explore_state = hf['refinement/state/' + str(explore_tf)][()]
explore_Ra = hf['refinement/Ra/' + str(explore_tf)][()]
explore_v = hf['refinement/reconstructed_vertex/' + str(explore_tf)][()]
explore_f = hf['refinement/reconstructed_face/' + str(explore_tf)][()]
explore_w = hf['refinement/reconstructed_weight/' + str(explore_tf)][()]
explore_name = hf['simulation_parameters/true_asteroid/name'][()][:-4]
explore_m1 = hf['simulation_parameters/dumbbell/m1'][()]
explore_m2 = hf['simulation_parameters/dumbbell/m2'][()]
explore_l = hf['simulation_parameters/dumbbell/l'][()]
explore_AbsTol = hf['simulation_parameters/AbsTol'][()]
explore_RelTol = hf['simulation_parameters/RelTol'][()]
explore_true_vertices = hf['simulation_parameters/true_asteroid/vertices'][()]
explore_true_faces = hf['simulation_parameters/true_asteroid/faces'][()]
landing_keys = np.array(utilities.sorted_nicely(list(hf['landing/state'].keys())))
landing_state_group = hf['landing/state']
landing_Ra_group = hf['landing/Ra']
landing_time = hf['landing/time'][()]
landing_v = hf['landing/vertices'][()]
landing_f = hf['landing/faces'][()]
# draw the trajectory in the asteroid frame using mayavi
t_array = np.zeros(len(landing_keys))
state_inertial_array = np.zeros((len(landing_keys), 18))
state_asteroid_array = np.zeros((len(landing_keys), 18))
for ii, sk in enumerate(landing_keys):
t_array[ii] = ii
state_inertial_array[ii, :] = landing_state_group[sk][()]
Ra = landing_Ra_group[sk][()]
pos_ast = Ra.T.dot(state_inertial_array[ii, 0:3].T).T
vel_ast = Ra.T.dot(state_inertial_array[ii, 3:6].T).T
R_sc2ast = Ra.T.dot(state_inertial_array[ii, 6:15].reshape((3, 3)))
w_ast = state_inertial_array[ii, 15:18]
state_asteroid_array[ii, :] = np.hstack((pos_ast, vel_ast, R_sc2ast.reshape(-1), w_ast))
mfig = graphics.mayavi_figure(offscreen=(not show))
mesh = graphics.mayavi_addMesh(mfig, landing_v, landing_f)
scale = 1.25
max_x = scale*np.max(landing_v[:, 0])
min_x = scale*np.min(landing_v[:, 0])
max_y = scale*np.max(landing_v[:, 1])
min_y = scale*np.min(landing_v[:, 1])
max_z = scale*np.max(landing_v[:, 2])
min_z = scale*np.min(landing_v[:, 2])
graphics.mayavi_axes(mfig, [min_x, max_x, min_x, max_x, min_x, max_x], line_width=5, color=(1, 0, 0))
graphics.mayavi_plot_trajectory(mfig, state_asteroid_array[:, 0:3], color=(0, 0, 1), scale_factor=0.01, mode='sphere')
graphics.mayavi_points3d(mfig, state_asteroid_array[0, 0:3], color=(0, 1, 0), scale_factor=0.1)
graphics.mayavi_points3d(mfig, state_asteroid_array[-1, 0:3], color=(1, 0, 0), scale_factor=0.1)
graphics.mlab.view(*(45, 54, 4.9, np.array([0.43, -0.057, 0.22])))
graphics.mayavi_savefig(mfig, os.path.join(img_path, 'asteroid_trajectory.jpg'), magnification=4)
# build meshdata and asteroid from the terminal estimate
est_meshdata = mesh_data.MeshData(explore_v, explore_f)
est_ast = asteroid.Asteroid(explore_name, est_meshdata)
# chose step size based on spacecraft landing footprint
max_radius = np.max(est_ast.get_axes()) # for castalia
delta_angle = 0.05 / max_radius
grid_long, grid_lat = np.meshgrid(np.arange(-np.pi, np.pi, delta_angle),
np.arange(-np.pi/2, np.pi/2, delta_angle))
# # interpolate and create a radius plot
# fig_radius, ax_radius = plt.subplots(1, 1)
# # compute radius of each vertex and lat/long
spherical_vertices = wavefront.cartesian2spherical(explore_v)
r = spherical_vertices[:, 0]
lat = spherical_vertices[:, 1]
long = spherical_vertices[:, 2]
grid_r = interpolate.griddata(np.vstack((long, lat)).T, r, (grid_long, grid_lat), method='nearest')
grid_r_smooth = ndimage.gaussian_filter(grid_r, sigma=10*delta_angle)
# # ax.scatter(long, lat,c=r)
# # ax.imshow(grid_r.T, extent=(-np.pi, np.pi, -np.pi/2, np.pi/2), origin='lower')
# ax_radius.contour(grid_long, grid_lat, grid_r)
# ax_radius.set_title('Radius (km)')
# ax_radius.set_xlabel('Longitude (rad)')
# ax_radius.set_ylabel('Latitude (rad)')
# fig_radius_img, ax_radius_img = plt.subplots(1, 1)
# img = ax_radius_img.imshow(grid_r, extent=(-np.pi, np.pi, -np.pi/2, np.pi/2), origin='lower')
# ax_radius_img.set_title('Radius (km)')
# ax_radius_img.set_ylabel('Latitude (rad)')
# fig_radius_img.colorbar(img)
# plot of surface slope
# get the surface slope(ast) and all face centers(mesh)
face_center = est_meshdata.get_all_face_center()
face_slope = est_ast.surface_slope()
spherical_face_center = wavefront.cartesian2spherical(face_center)
# plot of face area
# face_area = est_meshdata.get_all_face_area()
# grid_area = interpolate.griddata(np.vstack((spherical_face_center[:, 2],
# spherical_face_center[:, 1])).T,
# face_area,
# (grid_long, grid_lat),
# method='nearest') * 1e6 # convert to meters
# grid_area_smooth = ndimage.gaussian_filter(grid_area, sigma=10*delta_angle)
# fig_area, ax_area = plt.subplots(1, 1)
# # contour = ax_area.contour(grid_long, grid_lat, grid_area*1e6)
# img_area = ax_area.imshow(grid_area_smooth, extent=(-np.pi, np.pi, -np.pi/2, np.pi/2),
# origin="lower")
# ax_area.set_title('Face area (square meters)')
# ax_area.set_xlabel('Longitude')
# ax_area.set_ylabel('Latitude')
# fig_area.colorbar(img_area)
grid_slope = interpolate.griddata(np.vstack((spherical_face_center[:, 2],
spherical_face_center[:, 1])).T,
face_slope,
(grid_long, grid_lat),
method='nearest') * 180/np.pi
grid_slope_smooth = ndimage.gaussian_filter(grid_slope, sigma=10*delta_angle)
# build an image of the distance from the explore_state to each point on the surface
# compute geodesic distance to each face center
geodesic_distance = geodesic.central_angle(explore_state[0:3], face_center)
grid_dist = interpolate.griddata(np.vstack((spherical_face_center[:, 2],
spherical_face_center[:, 1])).T,
geodesic_distance,
(grid_long, grid_lat),
method="nearest")
grid_dist_smooth = ndimage.gaussian_filter(grid_dist, sigma=10*delta_angle)
desired_pos_cartesian = publication.plot_refinement_plots(spherical_vertices, grid_long, grid_lat,
delta_angle, grid_slope_smooth,
grid_dist_smooth, img_path=img_path,
show=show, pgf_save=True)
print("Desired Landing site: {} ".format(desired_pos_cartesian))
return desired_pos_cartesian
if __name__ == "__main__":
logging_file = tempfile.mkstemp(suffix='.txt.')[1]
# logging_file = "/tmp/exploration_log.txt"
logging.basicConfig(filename=logging_file,
filemode='w', level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
print("Logging to {}".format(logging_file))
parser = argparse.ArgumentParser(description="Exploration and asteroid reconstruction simulation",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("simulation_data",
help="Filename to store the simulation data",
action="store")
parser.add_argument("name", help="Asteroid name",
action="store", type=str)
# parser.add_argument("reconstruct_data",
# help="Filename to store the reconstruction data")
parser.add_argument("-mc", "--move_cam", help="For use with the -a, --animate option. This will translate the camera and give you a view from the satellite",
action="store_true")
parser.add_argument("-mw", "--mesh_weight", help="For use with the -a, --animate option. This will add the uncertainty as a colormap to the asteroid",
action="store_true")
parser.add_argument("--show", help="Show the plots", action="store_true",
default=False)
parser.add_argument("-m", "--magnification", help="Magnification for images",
action="store", type=int, const=4, nargs='?', default=4)
group = parser.add_mutually_exclusive_group()
# group.add_argument("-s", "--simulate", help="Run the exploration simulation",
# action="store_true")
group.add_argument("-c", "--control_sim", help="Exploration with a control cost component",
action="store_true")
group.add_argument("-a", "--animate", help="Animate the data from the exploration sim",
action="store_true")
group.add_argument("-r", "--reconstruct", help="Generate images for the reconstruction",
action="store", type=str)
group.add_argument("-st", "--state" , help="Generate state trajectory plots",
action="store", type=str)
group.add_argument("-u", "--uncertainty", help="Generate uncertainty plot",
action="store", type=str)
group.add_argument("-au", "--animate_uncertainty", help="Animate map view of uncertainty over time",
action="store_true")
group.add_argument("-v", "--volume", help="Generate plot of volume",
action="store", type=str)
group.add_argument("-sa", "--save_animation", help="Save the animation as a sequence of images",
action="store", type=str)
group.add_argument("-l" , "--landing", help="Continue from the end of exploration to the surface",
action="store_true")
group.add_argument("-la", "--landing_animation", help="Landing animation",
action="store_true")
group.add_argument("-lsa", "--landing_save_animation", help="Save landing animation to a video",
action="store", type=str)
group.add_argument("-lp", "--landing_plots", help="Generate plots to select landing site",
action="store")
group.add_argument("-rp", "--refine_plots", help="Generate plots for landing refinement",
action="store", type=str)
group.add_argument("-lr", "--landing_refine", help="Determine best landing spot and refine prior to using -l",
action="store_true")
group.add_argument("-lkr", "--landing_kinematic_refine", help="Landing refinement using a kinematics only model",
action="store_true")
group.add_argument("-lra", "--landing_refine_animation", help="Animate the refinement process",
action="store_true")
group.add_argument("-lrsa", "--landing_refine_save_animation", help="Save the refinment animation",
action="store", type=str)
args = parser.parse_args()
if args.control_sim:
simulate_control(args.simulation_data, args.name)
elif args.reconstruct:
reconstruct_images(args.simulation_data,output_path=args.reconstruct , magnification=args.magnification,
show=args.show)
elif args.volume:
plot_volume(args.simulation_data, img_path=args.volume, show=args.show)
elif args.uncertainty:
plot_uncertainty(args.simulation_data, img_path=args.uncertainty, show=args.show)
elif args.refine_plots:
refine_site_plots(args.simulation_data, img_path=args.refine_plots,
show=args.show)
elif args.state:
plot_state_trajectory(args.simulation_data, img_path=args.state,
show=args.show)
elif args.landing_plots:
landing_site_plots(args.simulation_data, img_path=args.landing_plots,
show=args.show)
elif args.animate:
animate(args.simulation_data, move_cam=args.move_cam,
mesh_weight=args.mesh_weight)
elif args.animate_uncertainty:
animate_uncertainty(args.simulation_data)
elif args.save_animation:
save_animation(args.simulation_data, move_cam=args.move_cam,
mesh_weight=args.mesh_weight, output_path=args.save_animation)
elif args.landing:
# landing_site_plots(args.simulation_data)
desired_landing_spot = np.array([0.48501797, -0.02027519, 0.37758639])
landing(args.simulation_data, desired_landing_spot)
elif args.landing_animation:
animate_landing(args.simulation_data, move_cam=args.move_cam, mesh_weight=args.mesh_weight)
elif args.landing_save_animation:
save_animate_landing(args.simulation_data, move_cam=args.move_cam, mesh_weight=args.mesh_weight,
output_path=args.landing_save_animation)
elif args.landing_refine:
# landing location in the asteroid fixed frame
# desired_landing_spot = refine_site_plots(args.simulation_data)
desired_landing_spot = np.array([0.47180473, -0.01972284, 0.36729988])
refine_landing_area(args.simulation_data, args.name, desired_landing_spot)
elif args.landing_refine_animation:
animate_refinement(args.simulation_data, move_cam=args.move_cam, mesh_weight=args.mesh_weight)
elif args.landing_refine_save_animation:
save_animate_refinement(args.simulation_data, move_cam=args.move_cam,
mesh_weight=args.mesh_weight,
output_path=args.landing_refine_save_animation)
elif args.landing_kinematic_refine:
desired_landing_spot = np.array([0.47180473, -0.01972284, 0.36729988])
kinematics_refine_landing_area(args.simulation_data, args.name, desired_landing_spot)
|
skulumani/asteroid_dumbbell
|
exploration_sim.py
|
Python
|
gpl-3.0
| 100,130
|
[
"Mayavi"
] |
be5689020962d326d04b19dbb54512d3cbd9d25504b36e260ff03b803cac58c2
|
#!/usr/bin/env python
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""Setuptools-based setup script for MDAnalysis.
A working installation of NumPy <http://numpy.scipy.org> is required.
For a basic installation just type the command::
python setup.py install
For more in-depth instructions, see the installation section at the
MDAnalysis Wiki:
https://github.com/MDAnalysis/mdanalysis/wiki/INSTALL
Also free to ask on the MDAnalysis mailing list for help:
http://groups.google.com/group/mdnalysis-discussion
(Note that the group really is called `mdnalysis-discussion' because
Google groups forbids any name that contains the string `anal'.)
"""
from setuptools import setup, Extension, find_packages
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler
from packaging.version import Version
import codecs
import os
import sys
import re
import shutil
import tempfile
import warnings
import platform
# Make sure I have the right Python version.
if sys.version_info[:2] < (3, 7):
print('MDAnalysis requires Python 3.7 or better. Python {0:d}.{1:d} detected'.format(*
sys.version_info[:2]))
print('Please upgrade your version of Python.')
sys.exit(-1)
if sys.version_info[0] < 3:
import ConfigParser as configparser
else:
import configparser
if sys.version_info[0] >= 3:
from subprocess import getoutput
else:
from commands import getoutput
# NOTE: keep in sync with MDAnalysis.__version__ in version.py
RELEASE = "2.2.0-dev0"
is_release = 'dev' not in RELEASE
# Handle cython modules
try:
# cython has to be >=0.16 <0.28 to support cython.parallel
import Cython
from Cython.Build import cythonize
cython_found = True
required_version = "0.16"
if not Version(Cython.__version__) >= Version(required_version):
# We don't necessarily die here. Maybe we already have
# the cythonized '.c' files.
print("Cython version {0} was found but won't be used: version {1} "
"or greater is required because it offers a handy "
"parallelization module".format(
Cython.__version__, required_version))
cython_found = False
cython_linetrace = bool(os.environ.get('CYTHON_TRACE_NOGIL', False))
except ImportError:
cython_found = False
if not is_release:
print("*** package: Cython not found ***")
print("MDAnalysis requires cython for development builds")
sys.exit(1)
cython_linetrace = False
def abspath(file):
return os.path.join(os.path.dirname(os.path.abspath(__file__)),
file)
class Config(object):
"""Config wrapper class to get build options
This class looks for options in the environment variables and the
'setup.cfg' file. The order how we look for an option is.
1. Environment Variable
2. set in 'setup.cfg'
3. given default
Environment variables should start with 'MDA_' and be all uppercase.
Values passed to environment variables are checked (case-insensitively)
for specific strings with boolean meaning: 'True' or '1' will cause `True`
to be returned. '0' or 'False' cause `False` to be returned.
"""
def __init__(self, fname='setup.cfg'):
fname = abspath(fname)
if os.path.exists(fname):
self.config = configparser.ConfigParser()
self.config.read(fname)
def get(self, option_name, default=None):
environ_name = 'MDA_' + option_name.upper()
if environ_name in os.environ:
val = os.environ[environ_name]
if val.upper() in ('1', 'TRUE'):
return True
elif val.upper() in ('0', 'FALSE'):
return False
return val
try:
option = self.config.get('options', option_name)
return option
except configparser.NoOptionError:
return default
class MDAExtension(Extension, object):
"""Derived class to cleanly handle setup-time (numpy) dependencies.
"""
# The only setup-time numpy dependency comes when setting up its
# include dir.
# The actual numpy import and call can be delayed until after pip
# has figured it must install numpy.
# This is accomplished by passing the get_numpy_include function
# as one of the include_dirs. This derived Extension class takes
# care of calling it when needed.
def __init__(self, name, sources, *args, **kwargs):
self._mda_include_dirs = []
# don't abspath sources else packaging fails on Windows (issue #3129)
super(MDAExtension, self).__init__(name, sources, *args, **kwargs)
@property
def include_dirs(self):
if not self._mda_include_dirs:
for item in self._mda_include_dir_args:
try:
self._mda_include_dirs.append(item()) #The numpy callable
except TypeError:
item = abspath(item)
self._mda_include_dirs.append((item))
return self._mda_include_dirs
@include_dirs.setter
def include_dirs(self, val):
self._mda_include_dir_args = val
def get_numpy_include():
# Obtain the numpy include directory. This logic works across numpy
# versions.
# setuptools forgets to unset numpy's setup flag and we get a crippled
# version of it unless we do it ourselves.
import builtins
builtins.__NUMPY_SETUP__ = False
try:
import numpy as np
except ImportError:
print('*** package "numpy" not found ***')
print('MDAnalysis requires a version of NumPy (>=1.18.0), even for setup.')
print('Please get it from http://numpy.scipy.org/ or install it through '
'your package manager.')
sys.exit(-1)
return np.get_include()
def hasfunction(cc, funcname, include=None, extra_postargs=None):
# From http://stackoverflow.com/questions/
# 7018879/disabling-output-when-compiling-with-distutils
tmpdir = tempfile.mkdtemp(prefix='hasfunction-')
devnull = oldstderr = None
try:
try:
fname = os.path.join(tmpdir, 'funcname.c')
with open(fname, 'w') as f:
if include is not None:
f.write('#include {0!s}\n'.format(include))
f.write('int main(void) {\n')
f.write(' {0!s};\n'.format(funcname))
f.write('}\n')
# Redirect stderr to /dev/null to hide any error messages
# from the compiler.
# This will have to be changed if we ever have to check
# for a function on Windows.
devnull = open('/dev/null', 'w')
oldstderr = os.dup(sys.stderr.fileno())
os.dup2(devnull.fileno(), sys.stderr.fileno())
objects = cc.compile([fname], output_dir=tmpdir,
extra_postargs=extra_postargs)
cc.link_executable(objects, os.path.join(tmpdir, "a.out"))
except Exception:
return False
return True
finally:
if oldstderr is not None:
os.dup2(oldstderr, sys.stderr.fileno())
if devnull is not None:
devnull.close()
shutil.rmtree(tmpdir)
def detect_openmp():
"""Does this compiler support OpenMP parallelization?"""
print("Attempting to autodetect OpenMP support... ", end="")
compiler = new_compiler()
customize_compiler(compiler)
compiler.add_library('gomp')
include = '<omp.h>'
extra_postargs = ['-fopenmp']
hasopenmp = hasfunction(compiler, 'omp_get_num_threads()', include=include,
extra_postargs=extra_postargs)
if hasopenmp:
print("Compiler supports OpenMP")
else:
print("Did not detect OpenMP support.")
return hasopenmp
def using_clang():
"""Will we be using a clang compiler?"""
compiler = new_compiler()
customize_compiler(compiler)
compiler_ver = getoutput("{0} -v".format(compiler.compiler[0]))
if 'Spack GCC' in compiler_ver:
# when gcc toolchain is built from source with spack
# using clang, the 'clang' string may be present in
# the compiler metadata, but it is not clang
is_clang = False
elif 'clang' in compiler_ver:
# by default, Apple will typically alias gcc to
# clang, with some mention of 'clang' in the
# metadata
is_clang = True
else:
is_clang = False
return is_clang
def extensions(config):
# usually (except coming from release tarball) cython files must be generated
use_cython = config.get('use_cython', default=cython_found)
use_openmp = config.get('use_openmp', default=True)
extra_compile_args = ['-std=c99', '-ffast-math', '-O3', '-funroll-loops',
'-fsigned-zeros'] # see #2722
define_macros = []
if config.get('debug_cflags', default=False):
extra_compile_args.extend(['-Wall', '-pedantic'])
define_macros.extend([('DEBUG', '1')])
# encore is sensitive to floating point accuracy, especially on non-x86
# to avoid reducing optimisations on everything, we make a set of compile
# args specific to encore see #2997 for an example of this.
encore_compile_args = [a for a in extra_compile_args if 'O3' not in a]
if platform.machine() == 'aarch64' or platform.machine() == 'ppc64le':
encore_compile_args.append('-O1')
else:
encore_compile_args.append('-O3')
# allow using custom c/c++ flags and architecture specific instructions.
# This allows people to build optimized versions of MDAnalysis.
# Do here so not included in encore
extra_cflags = config.get('extra_cflags', default=False)
if extra_cflags:
flags = extra_cflags.split()
extra_compile_args.extend(flags)
cpp_extra_compile_args = [a for a in extra_compile_args if 'std' not in a]
cpp_extra_compile_args.append('-std=c++11')
cpp_extra_link_args=[]
# needed to specify c++ runtime library on OSX
if platform.system() == 'Darwin' and using_clang():
cpp_extra_compile_args.append('-stdlib=libc++')
cpp_extra_compile_args.append('-mmacosx-version-min=10.9')
cpp_extra_link_args.append('-stdlib=libc++')
cpp_extra_link_args.append('-mmacosx-version-min=10.9')
# Needed for large-file seeking under 32bit systems (for xtc/trr indexing
# and access).
largefile_macros = [
('_LARGEFILE_SOURCE', None),
('_LARGEFILE64_SOURCE', None),
('_FILE_OFFSET_BITS', '64')
]
has_openmp = detect_openmp()
if use_openmp and not has_openmp:
print('No openmp compatible compiler found default to serial build.')
parallel_args = ['-fopenmp'] if has_openmp and use_openmp else []
parallel_libraries = ['gomp'] if has_openmp and use_openmp else []
parallel_macros = [('PARALLEL', None)] if has_openmp and use_openmp else []
if use_cython:
print('Will attempt to use Cython.')
if not cython_found:
print("Couldn't find a Cython installation. "
"Not recompiling cython extensions.")
use_cython = False
else:
print('Will not attempt to use Cython.')
source_suffix = '.pyx' if use_cython else '.c'
cpp_source_suffix = '.pyx' if use_cython else '.cpp'
# The callable is passed so that it is only evaluated at install time.
include_dirs = [get_numpy_include]
# Windows automatically handles math library linking
# and will not build MDAnalysis if we try to specify one
if os.name == 'nt':
mathlib = []
else:
mathlib = ['m']
if cython_linetrace:
extra_compile_args.append("-DCYTHON_TRACE_NOGIL")
cpp_extra_compile_args.append("-DCYTHON_TRACE_NOGIL")
libdcd = MDAExtension('MDAnalysis.lib.formats.libdcd',
['MDAnalysis/lib/formats/libdcd' + source_suffix],
include_dirs=include_dirs + ['MDAnalysis/lib/formats/include'],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
distances = MDAExtension('MDAnalysis.lib.c_distances',
['MDAnalysis/lib/c_distances' + source_suffix],
include_dirs=include_dirs + ['MDAnalysis/lib/include'],
libraries=mathlib,
define_macros=define_macros,
extra_compile_args=extra_compile_args)
distances_omp = MDAExtension('MDAnalysis.lib.c_distances_openmp',
['MDAnalysis/lib/c_distances_openmp' + source_suffix],
include_dirs=include_dirs + ['MDAnalysis/lib/include'],
libraries=mathlib + parallel_libraries,
define_macros=define_macros + parallel_macros,
extra_compile_args=parallel_args + extra_compile_args,
extra_link_args=parallel_args)
qcprot = MDAExtension('MDAnalysis.lib.qcprot',
['MDAnalysis/lib/qcprot' + source_suffix],
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args)
transformation = MDAExtension('MDAnalysis.lib._transformations',
['MDAnalysis/lib/src/transformations/transformations.c'],
libraries=mathlib,
define_macros=define_macros,
include_dirs=include_dirs,
extra_compile_args=extra_compile_args)
libmdaxdr = MDAExtension('MDAnalysis.lib.formats.libmdaxdr',
sources=['MDAnalysis/lib/formats/libmdaxdr' + source_suffix,
'MDAnalysis/lib/formats/src/xdrfile.c',
'MDAnalysis/lib/formats/src/xdrfile_xtc.c',
'MDAnalysis/lib/formats/src/xdrfile_trr.c',
'MDAnalysis/lib/formats/src/trr_seek.c',
'MDAnalysis/lib/formats/src/xtc_seek.c',
],
include_dirs=include_dirs + ['MDAnalysis/lib/formats/include',
'MDAnalysis/lib/formats'],
define_macros=largefile_macros + define_macros,
extra_compile_args=extra_compile_args)
util = MDAExtension('MDAnalysis.lib.formats.cython_util',
sources=['MDAnalysis/lib/formats/cython_util' + source_suffix],
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args)
cutil = MDAExtension('MDAnalysis.lib._cutil',
sources=['MDAnalysis/lib/_cutil' + cpp_source_suffix],
language='c++',
libraries=mathlib,
include_dirs=include_dirs + ['MDAnalysis/lib/include'],
define_macros=define_macros,
extra_compile_args=cpp_extra_compile_args,
extra_link_args= cpp_extra_link_args)
augment = MDAExtension('MDAnalysis.lib._augment',
sources=['MDAnalysis/lib/_augment' + cpp_source_suffix],
language='c++',
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=cpp_extra_compile_args,
extra_link_args= cpp_extra_link_args)
encore_utils = MDAExtension('MDAnalysis.analysis.encore.cutils',
sources=['MDAnalysis/analysis/encore/cutils' + source_suffix],
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=encore_compile_args)
ap_clustering = MDAExtension('MDAnalysis.analysis.encore.clustering.affinityprop',
sources=['MDAnalysis/analysis/encore/clustering/affinityprop' + source_suffix,
'MDAnalysis/analysis/encore/clustering/src/ap.c'],
include_dirs=include_dirs+['MDAnalysis/analysis/encore/clustering/include'],
libraries=mathlib,
define_macros=define_macros,
extra_compile_args=encore_compile_args)
spe_dimred = MDAExtension('MDAnalysis.analysis.encore.dimensionality_reduction.stochasticproxembed',
sources=['MDAnalysis/analysis/encore/dimensionality_reduction/stochasticproxembed' + source_suffix,
'MDAnalysis/analysis/encore/dimensionality_reduction/src/spe.c'],
include_dirs=include_dirs+['MDAnalysis/analysis/encore/dimensionality_reduction/include'],
libraries=mathlib,
define_macros=define_macros,
extra_compile_args=encore_compile_args)
nsgrid = MDAExtension('MDAnalysis.lib.nsgrid',
['MDAnalysis/lib/nsgrid' + cpp_source_suffix],
include_dirs=include_dirs + ['MDAnalysis/lib/include'],
language='c++',
define_macros=define_macros,
extra_compile_args=cpp_extra_compile_args,
extra_link_args= cpp_extra_link_args)
pre_exts = [libdcd, distances, distances_omp, qcprot,
transformation, libmdaxdr, util, encore_utils,
ap_clustering, spe_dimred, cutil, augment, nsgrid]
cython_generated = []
if use_cython:
extensions = cythonize(
pre_exts,
compiler_directives={'linetrace': cython_linetrace,
'embedsignature': False,
'language_level': '3'},
)
if cython_linetrace:
print("Cython coverage will be enabled")
for pre_ext, post_ext in zip(pre_exts, extensions):
for source in post_ext.sources:
if source not in pre_ext.sources:
cython_generated.append(source)
else:
#Let's check early for missing .c files
extensions = pre_exts
for ext in extensions:
for source in ext.sources:
if not (os.path.isfile(source) and
os.access(source, os.R_OK)):
raise IOError("Source file '{}' not found. This might be "
"caused by a missing Cython install, or a "
"failed/disabled Cython build.".format(source))
return extensions, cython_generated
def dynamic_author_list():
"""Generate __authors__ from AUTHORS
This function generates authors.py that contains the list of the
authors from the AUTHORS file. This avoids having that list maintained in
several places. Note that AUTHORS is sorted chronologically while we want
__authors__ in authors.py to be sorted alphabetically.
The authors are written in AUTHORS as bullet points under the
"Chronological list of authors" title.
"""
authors = []
with codecs.open(abspath('AUTHORS'), encoding='utf-8') as infile:
# An author is a bullet point under the title "Chronological list of
# authors". We first want move the cursor down to the title of
# interest.
for line_no, line in enumerate(infile, start=1):
if line.rstrip() == "Chronological list of authors":
break
else:
# If we did not break, it means we did not find the authors.
raise IOError('EOF before the list of authors')
# Skip the next line as it is the title underlining
line = next(infile)
line_no += 1
if line[:4] != '----':
raise IOError('Unexpected content on line {0}, '
'should be a string of "-".'.format(line_no))
# Add each bullet point as an author until the next title underlining
for line in infile:
if line[:4] in ('----', '====', '~~~~'):
# The previous line was a title, hopefully it did not start as
# a bullet point so it got ignored. Since we hit a title, we
# are done reading the list of authors.
break
elif line.strip()[:2] == '- ':
# This is a bullet point, so it should be an author name.
name = line.strip()[2:].strip()
authors.append(name)
# So far, the list of authors is sorted chronologically. We want it
# sorted alphabetically of the last name.
authors.sort(key=lambda name: name.split()[-1])
# Move Naveen and Elizabeth first, and Oliver last.
authors.remove('Naveen Michaud-Agrawal')
authors.remove('Elizabeth J. Denning')
authors.remove('Oliver Beckstein')
authors = (['Naveen Michaud-Agrawal', 'Elizabeth J. Denning']
+ authors + ['Oliver Beckstein'])
# Write the authors.py file.
out_path = abspath('MDAnalysis/authors.py')
with codecs.open(out_path, 'w', encoding='utf-8') as outfile:
# Write the header
header = '''\
#-*- coding:utf-8 -*-
# This file is generated from the AUTHORS file during the installation process.
# Do not edit it as your changes will be overwritten.
'''
print(header, file=outfile)
# Write the list of authors as a python list
template = u'__authors__ = [\n{}\n]'
author_string = u',\n'.join(u' u"{}"'.format(name)
for name in authors)
print(template.format(author_string), file=outfile)
def long_description(readme):
"""Create reST SUMMARY file for PyPi."""
with open(abspath(readme)) as summary:
buffer = summary.read()
# remove top heading that messes up pypi display
m = re.search('====*\n[^\n]*README[^\n]*\n=====*\n', buffer,
flags=re.DOTALL)
assert m, "README.rst does not contain a level-1 heading"
return buffer[m.end():]
if __name__ == '__main__':
try:
dynamic_author_list()
except (OSError, IOError):
warnings.warn('Cannot write the list of authors.')
try:
# when building from repository for creating the distribution
LONG_DESCRIPTION = long_description("../README.rst")
except OSError:
# when building from a tar file for installation
# (LONG_DESCRIPTION is not really needed)
LONG_DESCRIPTION = "MDAnalysis -- https://www.mdanalysis.org/"
CLASSIFIERS = [
'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows ',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: C',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries :: Python Modules',
]
config = Config()
exts, cythonfiles = extensions(config)
install_requires = [
'numpy>=1.18.0',
'biopython>=1.71',
'networkx>=1.0',
'GridDataFormats>=0.4.0',
'mmtf-python>=1.0.0',
'joblib>=0.12',
'scipy>=1.0.0',
'matplotlib>=1.5.1',
'tqdm>=4.43.0',
'threadpoolctl',
'packaging',
]
if not os.name == 'nt':
install_requires.append('gsd>=1.4.0')
else:
install_requires.append('gsd>=1.9.3')
setup(name='MDAnalysis',
version=RELEASE,
description=('An object-oriented toolkit to analyze molecular dynamics '
'trajectories generated by CHARMM, Gromacs, NAMD, LAMMPS, or Amber.'),
long_description=LONG_DESCRIPTION,
long_description_content_type='text/x-rst',
author='MDAnalysis Development Team',
author_email='mdanalysis@numfocus.org',
maintainer='MDAnalysis Core Developers',
maintainer_email='mdanalysis@numfocus.org',
url='https://www.mdanalysis.org',
download_url='https://github.com/MDAnalysis/mdanalysis/releases',
project_urls={'Documentation': 'https://docs.mdanalysis.org/',
'User Guide': 'https://userguide.mdanalysis.org/',
'Issue Tracker': 'https://github.com/mdanalysis/mdanalysis/issues',
'User Group': 'https://groups.google.com/g/mdnalysis-discussion/',
'Discord': 'https://discord.com/channels/807348386012987462/',
'Blog': 'https://www.mdanalysis.org/blog/',
'Twitter': 'https://twitter.com/mdanalysis',
'Source': 'https://github.com/mdanalysis/mdanalysis',
},
license='GPL 2',
classifiers=CLASSIFIERS,
provides=['MDAnalysis'],
packages=find_packages(),
package_data={'MDAnalysis':
['analysis/data/*.npy',
],
},
ext_modules=exts,
python_requires='>=3.7',
# all standard requirements are available through PyPi and
# typically can be installed without difficulties through setuptools
setup_requires=[
'numpy>=1.18.0',
'packaging',
],
install_requires=install_requires,
# extras can be difficult to install through setuptools and/or
# you might prefer to use the version available through your
# packaging system
extras_require={
'AMBER': [
'netCDF4>=1.0', # for fast AMBER writing, also needs HDF5
],
'analysis': [
'seaborn', # for annotated heat map and nearest neighbor
# plotting in PSA
'sklearn', # For clustering and dimensionality reduction
# functionality in encore
'tidynamics>=1.0.0', # For MSD analysis method
],
},
test_suite="MDAnalysisTests",
tests_require=[
'MDAnalysisTests=={0!s}'.format(RELEASE), # same as this release!
],
zip_safe=False, # as a zipped egg the *.so files are not found (at
# least in Ubuntu/Linux)
)
# Releases keep their cythonized stuff for shipping.
if not config.get('keep_cythonized', default=is_release) and not cython_linetrace:
for cythonized in cythonfiles:
try:
os.unlink(cythonized)
except OSError as err:
print("Warning: failed to delete cythonized file {0}: {1}. "
"Moving on.".format(cythonized, err.strerror))
|
MDAnalysis/mdanalysis
|
package/setup.py
|
Python
|
gpl-2.0
| 29,101
|
[
"Amber",
"Biopython",
"CHARMM",
"Gromacs",
"LAMMPS",
"MDAnalysis",
"NAMD"
] |
93cc1314520b388aca1bd78397974ccf3011ffb0aa6f94d61ee483091c0520d9
|
# tuftedRS.py ---
#
# Filename: tuftedRS.py
# Description:
# Author: subhasis ray
# Maintainer:
# Created: Fri Oct 16 13:42:14 2009 (+0530)
# Version:
# Last-Updated: Fri Oct 21 17:16:30 2011 (+0530)
# By: Subhasis Ray
# Update #: 49
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
#
#
#
# Change log:
#
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
from datetime import datetime
import moose
import config
import trbutil
from cell import *
from capool import CaPool
class TuftedRS(TraubCell):
chan_params = {
'ENa': 50e-3,
'EK': -95e-3,
'EAR': -35e-3,
'ECa': 125e-3,
'EGABA': -75e-3, # Sanchez-Vives et al. 1997
'TauCa': 1e-3/0.075,
'X_AR': 0.25
}
ca_dep_chans = ['KAHP_DP', 'KC']
num_comp = 61
presyn = 60
level = None
depth = {
1: 1800 * 1e-6,
2: 1845 * 1e-6,
3: 1890 * 1e-6,
4: 1935 * 1e-6,
5: 1760 * 1e-6,
6: 1685 * 1e-6,
7: 1610 * 1e-6,
8: 1535 * 1e-6,
9: 1460 * 1e-6,
10: 1385 * 1e-6,
11: 1310 * 1e-6,
12: 1235 * 1e-6,
13: 1160 * 1e-6,
14: 1085 * 1e-6,
15: 1010 * 1e-6,
16: 935 * 1e-6,
17: 860 * 1e-6,
18: 790 * 1e-6,
}
proto_file = 'TuftedRS.p'
prototype = TraubCell.read_proto(proto_file, "TuftedRS", level_dict=level, depth_dict=depth, params=chan_params)
ca_dep_chans = ['KAHP_DP', 'KC']
def __init__(self, *args):
TraubCell.__init__(self, *args)
moose.CaConc(self.soma.path + '/CaPool').tau = 100e-3
# Special case: individually specified beta_cad's in level 2
moose.CaConc(self.comp[2].path + '/CaPool').tau = 1e-3/0.02
moose.CaConc(self.comp[5].path + '/CaPool' ).tau = 1e-3 / 0.02
moose.CaConc(self.comp[6].path + '/CaPool' ).tau = 1e-3 / 0.02
def _topology(self):
raise Exception, 'Deprecated'
def _setup_passive(self):
raise Exception, 'Deprecated'
def _setup_channels(self):
"""Set up connections between compartment and channels, and Ca pool"""
raise Exception, 'Deprecated'
@classmethod
def test_single_cell(cls):
"""Simulates a single thalamocortical relay cell
and plots the Vm and [Ca2+]"""
config.LOGGER.info("/**************************************************************************")
config.LOGGER.info(" *")
config.LOGGER.info(" * Simulating a single cell: %s" % (cls.__name__))
config.LOGGER.info(" *")
config.LOGGER.info(" **************************************************************************/")
sim = Simulation(cls.__name__)
mycell = TuftedRS(TuftedRS.prototype, sim.model.path + "/TuftedRS")
print 'MOOSE: Created cell:', mycell.path
vm_table = mycell.comp[cls.presyn].insertRecorder('Vm_tuftRS', 'Vm', sim.data)
# ca_conc_path = mycell.soma.path + '/CaPool'
# ca_table = None
# if config.context.exists(ca_conc_path):
# ca_conc = moose.CaConc(ca_conc_path)
# ca_table = moose.Table('Ca_tuftRS', sim.data)
# ca_table.stepMode = 3
# ca_conc.connect('Ca', ca_table, 'inputRequest')
# kc_path = mycell.soma.path + '/KC'
# gk_table = None
# if config.context.exists(kc_path):
# gk_table = moose.Table('gkc', sim.data)
# gk_table.stepMode = 3
# kc = moose.HHChannel(kc_path)
# kc.connect('Gk', gk_table, 'inputRequest')
# pymoose.showmsg(ca_conc)
pulsegen = mycell.soma.insertPulseGen('pulsegen', sim.model, firstLevel=10e-10, firstDelay=0.0, firstWidth=50e-3)
# pulsegen1 = mycell.soma.insertPulseGen('pulsegen1', sim.model, firstLevel=3e-7, firstDelay=150e-3, firstWidth=10e-3)
sim.schedule()
if mycell.has_cycle():
print "WARNING!! CYCLE PRESENT IN CICRUIT."
t1 = datetime.now()
sim.run(200e-3)
t2 = datetime.now()
delta = t2 - t1
print 'MOOSE: simulation time: ', delta.seconds + 1e-6 * delta.microseconds
sim.dump_data('data')
if config.has_pylab:
mus_vm = config.pylab.array(vm_table) * 1e3
mus_t = linspace(0, sim.simtime * 1e3, len(mus_vm))
try:
nrn_vm = config.pylab.loadtxt('../nrn/mydata/Vm_deepLTS.plot')
nrn_t = nrn_vm[:, 0]
nrn_vm = nrn_vm[:, 1]
config.pylab.plot(nrn_t, nrn_vm, 'y-', label='nrn vm')
except IOError:
print 'NEURON Data not available.'
config.pylab.plot(mus_t, mus_vm, 'g-.', label='mus vm')
config.pylab.legend()
config.pylab.show()
# test main --
from simulation import Simulation
from subprocess import call
if __name__ == "__main__":
# call(['/home/subha/neuron/nrn/x86_64/bin/nrngui', 'test_tuftRS.hoc'], cwd='../nrn')
TuftedRS.test_single_cell()
#
# tuftedRS.py ends here
|
BhallaLab/moose-thalamocortical
|
DEMOS/pymoose/traub2005/py/tuftedRS.py
|
Python
|
lgpl-2.1
| 5,796
|
[
"MOOSE",
"NEURON"
] |
f6b74909539f2b6189a62641221346cc122abac0f131ce61c4692ead3d5c6681
|
#!/usr/bin/env python
# Copyright (C) Duncan Macleod (2014)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Calculating (and plotting) event rate versus time
I would like to study the rate at which event triggers are generated by the
`ExcessPower` gravitational-wave burst detection algorithm, over a small
stretch of data.
The data from which these events were generated are a simulation of Gaussian noise
with the Advanced LIGO design spectrum, and so don't actually contain any real
gravitational waves, but will help tune the algorithm to improve detection of
future, real signals.
"""
__author__ = "Duncan Macleod <duncan.macleod@ligo.org>"
__currentmodule__ = 'gwpy.table.lsctables'
# First, import the `SnglBurstTable`:
from gwpy.table.lsctables import SnglBurstTable
# and read a table of (simulated) events:
events = SnglBurstTable.read('../../gwpy/tests/data/'
'H1-LDAS_STRAIN-968654552-10.xml.gz')
# We can calculate the rate of events (in Hertz) using the :meth:`~SnglBurstTable.event_rate` method:
rate = events.event_rate(1, start=968654552, end=968654562)
# The :meth:`~SnglBurstTable.event_rate` method has returned a :class:`~gwpy.timeseries.TimeSeries`, so we can display this using the :meth:`~gwpy.timeseries.TimeSeries.plot` method of that object:
plot = rate.plot()
plot.set_xlim(968654552, 968654562)
plot.set_ylabel('Event rate [Hz]')
plot.set_title('LIGO Hanford Observatory event rate for GW100916')
plot.show()
|
andrew-lundgren/gwpy
|
examples/table/rate.py
|
Python
|
gpl-3.0
| 2,074
|
[
"Gaussian"
] |
9d4c2502ce568569f8b6969ce63400200bc8c094cfb9f78054b4248d927faa65
|
###########################################################################
#
# This program is part of Zenoss Core, an open source monitoring platform.
# Copyright (C) 2008, Zenoss Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published by
# the Free Software Foundation.
#
# For complete information please visit: http://www.zenoss.com/oss/
#
###########################################################################
################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
NAME = 'ZenPacks.community.mib_utils'
VERSION = '1.08'
AUTHOR = 'Kells Kearney'
LICENSE = ''
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.community']
PACKAGES = ['ZenPacks', 'ZenPacks.community', 'ZenPacks.community.mib_utils']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = '>=2.2'
PREV_ZENPACK_NAME = ''
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name = NAME,
version = VERSION,
author = AUTHOR,
license = LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers = COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName = PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages = NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages = find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data = True,
# Tell setuptools what non-python files should also be included
# with the binary egg.
package_data = {
'': ['*.txt'],
'':['../COPYRIGHT.txt','../LICENSE.txt'],
NAME: ['objects/*','skins/*/*','services/*', 'reports/*/*',
'modeler/*/*', 'daemons/*', 'lib/*', 'libexec/*'],
},
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires = INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points = {
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe = False,
)
|
anksp21/Community-Zenpacks
|
ZenPacks.community.mib_utils/setup.py
|
Python
|
gpl-2.0
| 3,276
|
[
"VisIt"
] |
83dd70b3383630e86282db7afa2840924e07eab27af684a8d203846a89f4f271
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import six
'''
Created on 15.02.2016
@author: marscher
'''
from pyemma.coordinates.data.featurization.util import (_catch_unhashable,
_describe_atom,
hash_top, _hash_numpy_array)
import numpy as np
import mdtraj
from pyemma.coordinates.data.featurization._base import Feature
class CustomFeature(Feature):
"""
A CustomFeature is the base class for user-defined features. If you want to
implement a new fancy feature, derive from this class, calculate the quantity
of interest in the map method and return it as an ndarray.
If you have defined a map function that should be classed, you don't need to derive a class, but you
can simply pass a function to the constructor of this class
Parameters
----------
func : function
will be invoked with given args and kwargs on mapping traj
args : list of positional args (optional) passed to func
kwargs : named arguments (optional) passed to func
Notes
-----
Your passed in function will get a mdtraj.Trajectory object as first argument.
Examples
--------
We define a feature that transforms all coordinates by :math:`1 / x^2`:
>>> from pyemma.coordinates import source
>>> from pyemma.datasets import get_bpti_test_data
>>> inp = get_bpti_test_data()
Define a function which transforms the coordinates of the trajectory object.
Note that you need to define the output dimension, which we pass directly in
the feature construction. The trajectory contains 58 atoms, so the output
dimension will be 3 * 58 = 174:
>>> my_feature = CustomFeature(lambda x: (1.0 / x.xyz**2).reshape(-1, 174), dim=174)
>>> reader = source(inp['trajs'][0], top=inp['top'])
pass the feature to the featurizer and transform the data
>>> reader.featurizer.add_custom_feature(my_feature)
>>> data = reader.get_output()
"""
def __init__(self, func=None, *args, **kwargs):
self._func = func
self._args = args
self._kwargs = kwargs
self._dim = kwargs.pop('dim', 0)
def describe(self):
return ["CustomFeature calling %s with args %s" % (str(self._func),
str(self._args) +
str(self._kwargs))]
def transform(self, traj):
feature = self._func(traj, *self._args, **self._kwargs)
if not isinstance(feature, np.ndarray):
raise ValueError("your function should return a NumPy array!")
return feature
def __hash__(self):
hash_value = hash(self._func)
# if key contains numpy arrays, we hash their data arrays
key = tuple(list(map(_catch_unhashable, self._args)) +
list(map(_catch_unhashable, sorted(self._kwargs.items()))))
hash_value ^= hash(key)
return hash_value
class SelectionFeature(Feature):
"""
Just provide the cartesian coordinates of a selection of atoms (could be simply all atoms).
The coordinates are flattened as follows: [x1, y1, z1, x2, y2, z2, ...]
"""
def __init__(self, top, indexes):
self.top = top
self.indexes = np.array(indexes)
if len(self.indexes) == 0:
raise ValueError("empty indices")
self.prefix_label = "ATOM:"
def describe(self):
labels = []
for i in self.indexes:
labels.append("%s%s x" %
(self.prefix_label, _describe_atom(self.top, i)))
labels.append("%s%s y" %
(self.prefix_label, _describe_atom(self.top, i)))
labels.append("%s%s z" %
(self.prefix_label, _describe_atom(self.top, i)))
return labels
@property
def dimension(self):
return 3 * self.indexes.shape[0]
def transform(self, traj):
newshape = (traj.xyz.shape[0], 3 * self.indexes.shape[0])
return np.reshape(traj.xyz[:, self.indexes, :], newshape)
def __hash__(self):
hash_value = hash(self.prefix_label)
hash_value ^= hash_top(self.top)
hash_value ^= _hash_numpy_array(self.indexes)
return hash_value
class MinRmsdFeature(Feature):
def __init__(self, ref, ref_frame=0, atom_indices=None, topology=None, precentered=False):
assert isinstance(
ref_frame, int), "ref_frame has to be of type integer, and not %s" % type(ref_frame)
# Will be needing the hashed input parameter
self.__hashed_input__ = hash(ref)
# Types of inputs
# 1. Filename+top
if isinstance(ref, six.string_types):
# Store the filename
self.name = ref[:]
ref = mdtraj.load_frame(ref, ref_frame, top=topology)
# mdtraj is pretty good handling exceptions, we're not checking for
# types or anything here
# 2. md.Trajectory object
elif isinstance(ref, mdtraj.Trajectory):
self.name = ref.__repr__()[:]
else:
raise TypeError("input reference has to be either a filename or "
"a mdtraj.Trajectory object, and not of %s" % type(ref))
self.ref = ref
self.ref_frame = ref_frame
self.atom_indices = atom_indices
self.precentered = precentered
def describe(self):
label = "minrmsd to frame %u of %s" % (self.ref_frame, self.name)
if self.precentered:
label += ', precentered=True'
if self.atom_indices is not None:
label += ', subset of atoms '
return [label]
@property
def dimension(self):
return 1
def transform(self, traj):
return np.array(mdtraj.rmsd(traj, self.ref, atom_indices=self.atom_indices), ndmin=2).T
def __hash__(self):
hash_value = hash(self.__hashed_input__)
# TODO: identical md.Trajectory objects have different hashes need a
# way to differentiate them here
hash_value ^= hash(self.ref_frame)
if self.atom_indices is None:
hash_value ^= _hash_numpy_array(np.arange(self.ref.n_atoms))
else:
hash_value ^= _hash_numpy_array(np.array(self.atom_indices))
hash_value ^= hash(self.precentered)
return hash_value
|
gph82/PyEMMA
|
pyemma/coordinates/data/featurization/misc.py
|
Python
|
lgpl-3.0
| 7,202
|
[
"MDTraj"
] |
bc277eb21a3f4c0cfd37fb2dec1a2341c42aece5f086146eadbf9a662e948557
|
# -*- coding: utf-8 -*-
#
# gif_pop_psc_exp.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Population rate model of generalized integrate-and-fire neurons
--------------------------------------------------------------------
This script simulates a finite network of generalized integrate-and-fire
(GIF) neurons directly on the mesoscopic population level using the effective
stochastic population rate dynamics derived in the paper [1]_. The stochastic
population dynamics is implemented in the NEST model gif_pop_psc_exp. We
demonstrate this model using the example of a Brunel network of two coupled
populations, one excitatory and one inhibitory population.
Note that the population model represents the mesoscopic level
description of the corresponding microscopic network based on the
NEST model ``gif_psc_exp``.
References
~~~~~~~~~~~
.. [1] Schwalger T, Degert M, Gerstner W (2017). Towards a theory of cortical columns: From spiking
neurons to interacting neural populations of finite size. PLoS Comput Biol.
https://doi.org/10.1371/journal.pcbi.1005507
"""
# Loading the necessary modules:
import numpy as np
import matplotlib.pyplot as plt
import nest
###############################################################################
# We first set the parameters of the microscopic model:
# All times given in milliseconds
dt = 0.5
dt_rec = 1.
# Simulation time
t_end = 2000.
# Parameters
size = 200
N = np.array([4, 1]) * size
M = len(N) # number of populations
# neuronal parameters
t_ref = 4. * np.ones(M) # absolute refractory period
tau_m = 20 * np.ones(M) # membrane time constant
mu = 24. * np.ones(M) # constant base current mu=R*(I0+Vrest)
c = 10. * np.ones(M) # base rate of exponential link function
Delta_u = 2.5 * np.ones(M) # softness of exponential link function
V_reset = 0. * np.ones(M) # Reset potential
V_th = 15. * np.ones(M) # baseline threshold (non-accumulating part)
tau_sfa_exc = [100., 1000.] # adaptation time constants of excitatory neurons
tau_sfa_inh = [100., 1000.] # adaptation time constants of inhibitory neurons
J_sfa_exc = [1000., 1000.] # size of feedback kernel theta
# (= area under exponential) in mV*ms
J_sfa_inh = [1000., 1000.] # in mV*ms
tau_theta = np.array([tau_sfa_exc, tau_sfa_inh])
J_theta = np.array([J_sfa_exc, J_sfa_inh])
# connectivity
J = 0.3 # excitatory synaptic weight in mV if number of input connections
# is C0 (see below)
g = 5. # inhibition-to-excitation ratio
pconn = 0.2 * np.ones((M, M))
delay = 1. * np.ones((M, M))
C0 = np.array([[800, 200], [800, 200]]) * 0.2 # constant reference matrix
C = np.vstack((N, N)) * pconn # numbers of input connections
# final synaptic weights scaling as 1/C
J_syn = np.array([[J, -g * J], [J, -g * J]]) * C0 / C
taus1_ = [3., 6.] # time constants of exc./inh. postsynaptic currents (PSC's)
taus1 = np.array([taus1_ for k in range(M)])
# step current input
step = [[20.], [20.]] # jump size of mu in mV
tstep = np.array([[1500.], [1500.]]) # times of jumps
# synaptic time constants of excitatory and inhibitory connections
tau_ex = 3. # in ms
tau_in = 6. # in ms
###############################################################################
# Simulation on the mesoscopic level
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# To directly simulate the mesoscopic population activities (i.e. generating
# the activity of a finite-size population without simulating single
# neurons), we can build the populations using the NEST model
# ``gif_pop_psc_exp``:
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
nest.SetKernelStatus({'resolution': dt,
'print_time': True,
'local_num_threads': 1})
t0 = nest.GetKernelStatus('time')
nest_pops = nest.Create('gif_pop_psc_exp', M)
C_m = 250. # irrelevant value for membrane capacity, cancels out in simulation
g_L = C_m / tau_m
params = [{
'C_m': C_m,
'I_e': mu[i] * g_L[i],
'lambda_0': c[i], # in Hz!
'Delta_V': Delta_u[i],
'tau_m': tau_m[i],
'tau_sfa': tau_theta[i],
'q_sfa': J_theta[i] / tau_theta[i], # [J_theta]= mV*ms -> [q_sfa]=mV
'V_T_star': V_th[i],
'V_reset': V_reset[i],
'len_kernel': -1, # -1 triggers automatic history size
'N': N[i],
't_ref': t_ref[i],
'tau_syn_ex': max([tau_ex, dt]),
'tau_syn_in': max([tau_in, dt]),
'E_L': 0.
} for i in range(M)]
nest_pops.set(params)
# connect the populations
g_syn = np.ones_like(J_syn) # synaptic conductance
g_syn[:, 0] = C_m / tau_ex
g_syn[:, 1] = C_m / tau_in
for i in range(M):
for j in range(M):
nest.Connect(nest_pops[j], nest_pops[i],
syn_spec={'weight': J_syn[i, j] * g_syn[i, j] * pconn[i, j],
'delay': delay[i, j]})
###############################################################################
# To record the instantaneous population rate `Abar(t)` we use a multimeter,
# and to get the population activity `A_N(t)` we use spike recorder:
# monitor the output using a multimeter, this only records with dt_rec!
nest_mm = nest.Create('multimeter')
nest_mm.set(record_from=['n_events', 'mean'], interval=dt_rec)
nest.Connect(nest_mm, nest_pops)
# monitor the output using a spike recorder
nest_sr = []
for i in range(M):
nest_sr.append(nest.Create('spike_recorder'))
nest_sr[i].time_in_steps = True
nest.SetDefaults('static_synapse', {'weight': 1., 'delay': dt})
nest.Connect(nest_pops[i], nest_sr[i])
###############################################################################
# All neurons in a given population will be stimulated with a step input
# current:
# set initial value (at t0+dt) of step current generator to zero
tstep = np.hstack((dt * np.ones((M, 1)), tstep))
step = np.hstack((np.zeros((M, 1)), step))
# create the step current devices
nest_stepcurrent = nest.Create('step_current_generator', M)
# set the parameters for the step currents
for i in range(M):
nest_stepcurrent[i].set(amplitude_times=tstep[i] + t0,
amplitude_values=step[i] * g_L[i],
origin=t0,
stop=t_end)
pop_ = nest_pops[i]
nest.Connect(nest_stepcurrent[i], pop_, syn_spec={'weight': 1.})
###############################################################################
# We can now start the simulation:
local_num_threads = 1
seed = 1
msd = local_num_threads * seed + 1 # master seed
nest.SetKernelStatus({'rng_seeds': range(msd, msd + local_num_threads)})
t = np.arange(0., t_end, dt_rec)
A_N = np.ones((t.size, M)) * np.nan
Abar = np.ones_like(A_N) * np.nan
# simulate 1 step longer to make sure all t are simulated
nest.Simulate(t_end + dt)
data_mm = nest_mm.events
for i, nest_i in enumerate(nest_pops):
a_i = data_mm['mean'][data_mm['senders'] == nest_i.global_id]
a = a_i / N[i] / dt
min_len = np.min([len(a), len(Abar)])
Abar[:min_len, i] = a[:min_len]
data_sr = nest_sr[i].get('events', 'times')
data_sr = data_sr * dt - t0
bins = np.concatenate((t, np.array([t[-1] + dt_rec])))
A = np.histogram(data_sr, bins=bins)[0] / float(N[i]) / dt_rec
A_N[:, i] = A
###############################################################################
# and plot the activity:
plt.figure(1)
plt.clf()
plt.subplot(2, 1, 1)
plt.plot(t, A_N * 1000) # plot population activities (in Hz)
plt.ylabel(r'$A_N$ [Hz]')
plt.title('Population activities (mesoscopic sim.)')
plt.subplot(2, 1, 2)
plt.plot(t, Abar * 1000) # plot instantaneous population rates (in Hz)
plt.ylabel(r'$\bar A$ [Hz]')
plt.xlabel('time [ms]')
###############################################################################
# Microscopic ("direct") simulation
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# As mentioned above, the population model ``gif_pop_psc_exp`` directly
# simulates the mesoscopic population activities, i.e. without the need to
# simulate single neurons. On the other hand, if we want to know single
# neuron activities, we must simulate on the microscopic level. This is
# possible by building a corresponding network of ``gif_psc_exp`` neuron models:
nest.ResetKernel()
nest.SetKernelStatus(
{'resolution': dt, 'print_time': True, 'local_num_threads': 1})
t0 = nest.GetKernelStatus('time')
nest_pops = []
for k in range(M):
nest_pops.append(nest.Create('gif_psc_exp', N[k]))
# set single neuron properties
for i in range(M):
nest_pops[i].set(C_m=C_m,
I_e=mu[i] * g_L[i],
lambda_0=c[i],
Delta_V=Delta_u[i],
g_L=g_L[i],
tau_sfa=tau_theta[i],
q_sfa=J_theta[i] / tau_theta[i],
V_T_star=V_th[i],
V_reset=V_reset[i],
t_ref=t_ref[i],
tau_syn_ex=max([tau_ex, dt]),
tau_syn_in=max([tau_in, dt]),
E_L=0.,
V_m=0.)
# connect the populations
for i, nest_i in enumerate(nest_pops):
for j, nest_j in enumerate(nest_pops):
if np.allclose(pconn[i, j], 1.):
conn_spec = {'rule': 'all_to_all'}
else:
conn_spec = {
'rule': 'fixed_indegree', 'indegree': int(pconn[i, j] * N[j])}
nest.Connect(nest_j, nest_i,
conn_spec,
syn_spec={'weight': J_syn[i, j] * g_syn[i, j],
'delay': delay[i, j]})
###############################################################################
# We want to record all spikes of each population in order to compute the
# mesoscopic population activities `A_N(t)` from the microscopic simulation.
# We also record the membrane potentials of five example neurons:
# monitor the output using a multimeter and a spike recorder
nest_sr = []
for i, nest_i in enumerate(nest_pops):
nest_sr.append(nest.Create('spike_recorder'))
nest_sr[i].time_in_steps = True
# record all spikes from population to compute population activity
nest.Connect(nest_i, nest_sr[i], syn_spec={'weight': 1., 'delay': dt})
Nrecord = [5, 0] # for each population "i" the first Nrecord[i] neurons are recorded
nest_mm_Vm = []
for i, nest_i in enumerate(nest_pops):
nest_mm_Vm.append(nest.Create('multimeter'))
nest_mm_Vm[i].set(record_from=['V_m'], interval=dt_rec)
if Nrecord[i] != 0:
nest.Connect(nest_mm_Vm[i], nest_i[:Nrecord[i]])
###############################################################################
# As before, all neurons in a given population will be stimulated with a
# step input current. The following code block is identical to the one for
# the mesoscopic simulation above:
# create the step current devices if they do not exist already
nest_stepcurrent = nest.Create('step_current_generator', M)
# set the parameters for the step currents
for i in range(M):
nest_stepcurrent[i].set(amplitude_times=tstep[i] + t0,
amplitude_values=step[i] * g_L[i],
origin=t0,
stop=t_end)
nest_stepcurrent[i].set(amplitude_times=tstep[i] + t0,
amplitude_values=step[i] * g_L[i],
origin=t0,
stop=t_end)
# optionally a stopping time may be added by: 'stop': sim_T + t0
pop_ = nest_pops[i]
nest.Connect(nest_stepcurrent[i], pop_, syn_spec={'weight': 1.})
###############################################################################
# We can now start the microscopic simulation:
local_num_threads = 1
seed = 1
msd = local_num_threads * seed + 1 # master seed
nest.SetKernelStatus({'rng_seeds': range(msd, msd + local_num_threads)})
t = np.arange(0., t_end, dt_rec)
A_N = np.ones((t.size, M)) * np.nan
# simulate 1 step longer to make sure all t are simulated
nest.Simulate(t_end + dt)
###############################################################################
# Let's retrieve the data of the spike recorder and plot the activity of the
# excitatory population (in Hz):
for i in range(len(nest_pops)):
data_sr = nest_sr[i].get('events', 'times') * dt - t0
bins = np.concatenate((t, np.array([t[-1] + dt_rec])))
A = np.histogram(data_sr, bins=bins)[0] / float(N[i]) / dt_rec
A_N[:, i] = A * 1000 # in Hz
t = np.arange(dt, t_end + dt, dt_rec)
plt.figure(2)
plt.plot(t, A_N[:, 0])
plt.xlabel('time [ms]')
plt.ylabel('population activity [Hz]')
plt.title('Population activities (microscopic sim.)')
###############################################################################
# This should look similar to the population activity obtained from the
# mesoscopic simulation based on the NEST model ``gif_pop_psc_exp`` (cf. figure
# 1). Now we retrieve the data of the multimeter, which allows us to look at
# the membrane potentials of single neurons. Here we plot the voltage traces
# (in mV) of five example neurons:
voltage = []
for i in range(M):
if Nrecord[i] > 0:
senders = nest_mm_Vm[i].get('events', 'senders')
v = nest_mm_Vm[i].get('events', 'V_m')
voltage.append(
np.array([v[np.where(senders == j)] for j in set(senders)]))
else:
voltage.append(np.array([]))
f, axarr = plt.subplots(Nrecord[0], sharex=True)
for i in range(Nrecord[0]):
axarr[i].plot(voltage[0][i])
axarr[i].set_yticks((0, 15, 30))
axarr[i].set_xlabel('time [ms]')
axarr[2].set_ylabel('membrane potential [mV]')
axarr[0].set_title('5 example GIF neurons (microscopic sim.)')
###############################################################################
# Note that this plots only the subthreshold membrane potentials but not the
# spikes (as with every leaky integrate-and-fire model).
plt.show()
|
SepehrMN/nest-simulator
|
pynest/examples/gif_pop_psc_exp.py
|
Python
|
gpl-2.0
| 14,482
|
[
"NEURON"
] |
2f4cf2a2b33f3c91f420853ac502137bc4e8f7bc1d47f909f4be3bec52113502
|
"""
bct v0.01
Bitcoin Trade Simulator
Copyright 2011 Brian Monkaba
This file is part of ga-bitbot.
ga-bitbot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
ga-bitbot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with ga-bitbot. If not, see <http://www.gnu.org/licenses/>.
"""
import pdb
import time
from operator import itemgetter
from math import exp
import sys
import paths
from cache import *
class trade_engine:
def __init__(self):
self.cache = cache()
#configurable variables
self.input_file_name = "./datafeed/bcfeed_mtgoxUSD_1min.csv" #default input file
self.score_only = False #set to true to only calculate what is required for scoring a strategy
#to speed up performance.
self.shares = 0.1 #order size
self.wll = 180 #window length long
self.wls = 2 #window length short
self.buy_wait = 0 #min sample periods between buy orders
self.buy_wait_after_stop_loss = 6 #min sample periods between buy orders
#after a stop loss order
self.markup = 0.01 #order mark up
self.stop_loss = 0.282 #stop loss
self.enable_flash_crash_protection = True #convert a stop loss order into a short term hold position
self.flash_crash_protection_delay = 180 #max_hold in minutes
self.stop_age = 10000 #stop age - dump after n periods
self.macd_buy_trip = -0.66 #macd buy indicator
self.min_i_pos = 0 #min periods of increasing price
#before buy order placed
self.min_i_neg = 0 #min periods of declining price
#before sell order placed
self.stbf = 2.02 #short trade biasing factor
#-- increase to favor day trading
#-- decrease to 2 to eliminate bias
self.nlsf = 5.0 #non-linear scoring factor - favor the latest trades
#max factor = exp(self.nlsf) @ the last sample periord
self.commision = 0.006 #mt.gox commision
self.quartile = 1 #define which market detection quartile to trade on (1-4)
self.input_data = []
self.input_data_length = 0
self.market_class = []
self.current_quartile = 0
self.classified_market_data = False
self.max_data_len = 1000000
self.reset()
return
def load_input_data(self):
print "bct: loading input data"
self.input_data = self.cache.get('input_data')
if self.input_data == None:
f = open(self.input_file_name,'r')
d = f.readlines()
f.close()
if len(d) > self.max_data_len:
#truncate the dataset
d = d[self.max_data_len * -1:]
self.input_data = []
for row in d[1:]:
r = row.split(',')[1] #last price
t = row.split(',')[0] #time
self.input_data.append([int(float(t)),float(r)])
self.cache.set('input_data',self.input_data)
self.cache.expire('input_data',60*8)
self.input_data_length = len(self.input_data)
return self.input_data
def initialize(self):
print "bct: initializing"
self.load_input_data()
cm = self.cache.get('classify_market')
if cm == None:
print "bct: classifying market data..."
self.classify_market(self.input_data)
self.cache.set('classify_market',self.market_class)
self.cache.expire('classify_market',60*8)
else:
print "bct: cached data found."
self.market_class = cm
self.classified_market_data = True
return self.current_quartile
def run(self):
for i in self.input_data:
self.input(i[0],i[1])
return
def reset(self):
#metrics and state variables
self.history = [] #moving window of the inputs
self.period = 0 #current input period
self.time = 0 #current period timestamp
self.input_log = [] #record of the inputs
self.wl_log = [] #record of the wl
self.ws_log = [] #record of the ws
self.macd_pct_log = []
self.buy_log = []
self.sell_log = []
self.stop_log = []
self.net_worth_log = []
self.trigger_log = []
self.balance = 1000 #account balance
self.opening_balance = self.balance #record the starting balance
self.score_balance = 0 #cumlative score
self.buy_delay = 0 #delay buy counter
self.buy_delay_inital = self.buy_delay #delay buy counter
self.macd_pct = 0
self.macd_abs = 0
self.avg_wl = 0
self.avg_ws = 0
self.ema_short = 0
self.ema_long = 0
self.i_pos = 0 #periods of increasing price
self.i_neg = 0 #periods of decreasing price
self.positions_open = [] #open order subset of all trade positions
self.positions = [] #all trade positions
self.metric_macd_pct_max = -10000 #macd metrics
self.metric_macd_pct_min = 10000
self.wins = 0
self.loss = 0
self.order_history = "NOT GENERATED"
self.current_quartile = 0
return
def test_quartile(self,quartile):
#valid inputs are 1-4
self.quartile = quartile / 4.0
def classify_market(self,input_list):
#print "start market classify"
#market detection preprocessor splits the input data into
#quartiles based on the true range indicator
self.market_class = []
atr_depth = 60 * 1 #one hour atr
#print "calc the true pct range indicator"
last_t = 0
last_tr = 0
t = 0
tr = 0
for i in xrange(len(input_list)):
t,p = input_list[i]
t = int(t * 1000)
if i > atr_depth + 1:
dsp = [r[1] for r in input_list[i - atr_depth - 1:i]] #get the price data set
dsp_min = min(dsp)
dsp_max = max(dsp)
tr = (dsp_max - dsp_min) / dsp_min #put in terms of pct chg
self.market_class.append([t,tr])
last_t = t
last_tr = tr
else:
#pad out the initial data
self.market_class.append([t,0])
#pad the end of the data to support future order testing
for i in xrange(10):
self.market_class.append([t,tr])
#I was overthinking again...
quartiles = []
l = [r[1] for r in self.market_class]
l.sort()
quartiles.append(l[int(len(l) * 0.25)])
quartiles.append(l[int(len(l) * 0.50)])
quartiles.append(l[int(len(l) * 0.75)])
#and apply them to the market class log
for i in xrange(len(self.market_class)):
p = self.market_class[i][1]
self.market_class[i][1] = 0.25
if p > quartiles[0]:
self.market_class[i][1] = 0.50
if p > quartiles[1]:
self.market_class[i][1] = 0.75
if p > quartiles[2]:
self.market_class[i][1] = 1.0
if i < atr_depth + 1:
self.market_class[i][1] = 0.0 #ignore early (uncalculated) data
self.classified_market_data = True
self.current_quartile = int(self.market_class[len(self.market_class)-1][1] * 4) #return the current quartile (1-4)
return self.current_quartile
def metrics_report(self):
m = ""
m += "\nShares: " + str(self.shares)
m += "\nMarkup: " + str(self.markup * 100) + "%"
m += "\nStop Loss: " + str(self.stop_loss * 100) + "%"
m += "\nStop Age: " + str(self.stop_age)
m += "\nBuy Delay: " + str(self.buy_wait)
m += "\nBuy Delay After Stop Loss: " + str(self.buy_wait_after_stop_loss)
m += "\nMACD Trigger: " + str(self.macd_buy_trip) + "%"
m += "\nEMA Window Long: " + str(self.wll)
m += "\nEMA Window Short: " + str(self.wls)
m += "\niPos: " + str(self.i_pos)
m += "\niNeg: " + str(self.i_neg)
m += "\nShort Trade Bias: " + str(self.stbf)
m += "\nCommision: " + str(self.commision * 100) + "%"
m += "\nScore: " + str(self.score())
m += "\nTotal Periods : " + str(self.period)
m += "\nInitial Buy Delay : " + str(self.buy_delay_inital)
m += "\nOpening Balance: $" + str(self.opening_balance)
m += "\nClosing Balance: $" + str(self.balance)
m += "\nTransaction Count: " + str(len(self.positions))
m += "\nWin: " + str(self.wins)
m += "\nLoss: " + str(self.loss)
try:
m += "\nWin Pct: " + str(100 * (self.wins / float(self.wins + self.loss))) + "%"
except:
pass
m += "\nMACD Max Pct: " + str(self.metric_macd_pct_max)+ "%"
m += "\nMACD Min Pct: " + str(self.metric_macd_pct_min)+ "%"
return m
def dump_open_positions(self):
#dump all active trades to get a current balance
self.positions_open = [] #clear out the subset buffer
for position in self.positions:
if position['status'] == "active":
position['status'] = "dumped"
position['actual'] = self.history[1] #HACK! go back in time one period to make sure we're using a real price
#and not a buy order target from the reporting script.
position['sell_period'] = self.period
self.balance += position['actual'] * (position['shares'] - (position['shares'] * self.commision))
if position['age'] > 0:
self.score_balance += ((position['actual'] * (position['shares'] - (position['shares'] * self.commision))) / (position['buy'] * (position['shares'] + 0.0001))) * (pow(position['age'],self.stbf) / position['age'] )
def score(self):
self.dump_open_positions()
if (self.wins + self.loss) > 0:
self.positions = sorted(self.positions, key=itemgetter('buy_period'))
exp_scale = self.nlsf / self.period #float(self.positions[-1]['buy_period'])
final_score_balance = 0
for p in self.positions:
p['score'] = 0
if p['status'] == "sold":
p['age'] = float(p['age'])
p['score'] = (((p['target'] - p['buy']) / p['buy']) * 100.0 ) * self.shares
#apply non-linear scaling to the trade based on the round trip time (age)
#favors a faster turn around time on positions
p['score'] /= (pow(p['age'],self.stbf) / p['age'] )
p['score'] *= 10.0
if p['age'] < 4.0:
p['score'] *= (1/4.0) #I'm knocking down very short term trades because there's a chance the system will miss them in live trading
if p['status'] == "stop":
if p['actual'] > p['buy']:
age = float(self.stop_age)
#if there wasn't a loss caused by the stop order
p['score'] = (((p['buy'] - p['actual']) / p['buy']) * 100.0) * self.shares
#apply non-linear scaling to the trade based on the round trip time (age)
#favors a faster turn around time on positions
p['score'] /= (pow(age,self.stbf) / age )
p['score'] *= 100.0
else:
#losing position gets a negative score
age = float(self.stop_age)
p['score'] = ((((p['actual'] - p['buy']) / p['buy']) * 100.0) * self.shares)
#apply non-linear scaling to the trade based on the round trip time (age)
#favors a faster turn around time on positions
p['score'] /= ((pow(age,self.stbf) / (age + 0.000001) ) + 0.0001)
p['score'] *= 1000.0 #increase weighting for losing positions
#apply e^0 to e^1 weighting to favor the latest trade results
p['score'] *= exp(exp_scale * p['buy_period'])
final_score_balance += p['score']
#because stop loss will generaly be higher that the target (markup) percentage
#the loss count needs to be weighted by the pct difference
loss_weighting_factor = self.stop_loss / self.markup
final_score_balance *= self.wins / (self.wins + (self.loss * loss_weighting_factor) * 1.0)
#final_score_balance *= self.markup * len(self.positions)
#fine tune the score
final_score_balance -= self.buy_wait / 1000.0
final_score_balance -= self.buy_wait_after_stop_loss / 1000.0
final_score_balance -= (self.stop_loss * 1000)
final_score_balance += (self.wls / 1000.0)
final_score_balance -= (self.stop_age / 1000.0)
final_score_balance += self.shares
#severly penalize the score if the win/ratio is less than 75%
if self.wins / (self.wins + self.loss * 1.0) < 0.75:
final_score_balance /= 10000.0
#risk reward weighting
if final_score_balance > 0:
rr = self.markup / (self.stop_loss + 0.00001)
#clamp the risk reward weighting
if rr > 2.0:
rr = 2.0
final_score_balance *= rr
#if self.opening_balance > self.balance:
# #losing strategy
# final_score_balance -= 5000 #999999999
else:
#no trades generated
final_score_balance = -987654321.123456789
return final_score_balance
def ai(self):
#make sure the two moving averages (window length long and short) don't get inverted
if self.wll < self.wls:
self.wll += self.wls
#decrement the buy wait counter
if self.buy_delay > 0:
self.buy_delay -= 1
#place buy orders, set price targets and stop loss limits
current_price = self.history[0]
#if the balance is sufficient to place an order and there is no buy delay
buy = current_price * -1
#but only if the classified input data matches the quartile assigned
#OR if the input data was not pre-classified in which case quartile partitioning is disabled.
if self.classified_market_data == False or self.quartile == self.market_class[self.period][1]:
if self.balance > (current_price * self.shares) and self.buy_delay == 0 :
if self.macd_pct < self.macd_buy_trip:
#set delay until next buy order
self.buy_delay = self.buy_wait
self.balance -= (current_price * self.shares)
actual_shares = self.shares - (self.shares * self.commision)
buy = current_price
target = (buy * self.markup) + buy
stop = buy - (buy * self.stop_loss)
self.buy_log.append([self.time,buy])
new_position = {'master_index':len(self.positions),'age':0,'buy_period':self.period,'sell_period':0,'trade_pos': self.balance,'shares':actual_shares,'buy':buy,'cost':self.shares*buy,'target':target,'stop':stop,'status':"active",'actual':0,'score':0}
self.positions.append(new_position.copy())
#maintain a seperate subset of open positions to speed up the search to close the open positions
#after a long run there may be thousands of closed positions
#it was killing performance searching all of them for the few open positions at any given time
self.positions_open.append(new_position.copy())
current_net_worth = 0
#check for sold and stop loss orders
sell = current_price * -1
stop = current_price * -1
updated = False
for position in self.positions_open:
#handle sold positions
if position['status'] == "active" and position['target'] <= current_price: #and self.i_neg >= self.min_i_neg:
updated = True
position['status'] = "sold"
position['actual'] = current_price
sell = current_price
position['sell_period'] = self.period
self.wins += 1
self.balance += position['target'] * (position['shares'] - (position['shares'] * self.commision))
self.score_balance += ((position['target'] * (position['shares'] - (position['shares'] * self.commision))) / (position['buy'] * position['shares'])) * (pow(position['age'],self.stbf) / position['age'] )
#update the position in the master list
buy_period = position['buy_period']
#self.positions = filter(lambda x: x.get('buy_period') != buy_period, self.positions) #delete the old record
#self.positions.append(position.copy()) #and add the updated record
self.positions[position['master_index']] = position.copy()
#handle the stop orders
elif position['status'] == "active" and (position['stop'] >= current_price or position['age'] >= self.stop_age):
if position['stop'] >= current_price:
if self.enable_flash_crash_protection == True and self.market_class[self.period][1] == 1.0:
stop_order_executed = False
#convert the stop loss order into a short term hold position
position['age'] = self.stop_age - self.flash_crash_protection_delay
position['stop'] *= -1.0
else:
#stop loss
stop_order_executed = True
updated = True
position['status'] = "stop"
position['actual'] = current_price
stop = current_price
position['sell_period'] = self.period
self.loss += 1
self.buy_delay += self.buy_wait_after_stop_loss
else:
#stop wait
stop_order_executed = True
updated = True
position['status'] = "stop"
position['actual'] = current_price
stop = current_price
position['sell_period'] = self.period
self.loss += 1 #- (position['actual'] / position['target']) #fractional loss
self.buy_delay += self.buy_wait_after_stop_loss
if stop_order_executed == True:
self.balance += position['actual'] * (position['shares'] - (position['shares'] * self.commision))
if position['age'] > 0:
self.score_balance += ((position['actual'] * (position['shares'] - (position['shares'] * self.commision))) / (position['buy'] * (position['shares'] + 0.0001))) * (pow(position['age'],self.stbf) / position['age'] )
#update the position in the master list
buy_period = position['buy_period']
#self.positions = filter(lambda x: x.get('buy_period') != buy_period, self.positions) #delete the old record
#self.positions.append(position.copy()) #and add the updated record
self.positions[position['master_index']] = position.copy()
#handle active (open) positions
elif position['status'] == "active":
#position remains open, capture the current value
current_net_worth += current_price * (position['shares'] - (position['shares'] * self.commision))
position['age'] += 1
#remove any closed positions from the open position subset
if updated == True:
self.positions_open = filter(lambda x: x.get('status') == 'active', self.positions_open)
#add the balance to the net worth
current_net_worth += self.balance
if not self.score_only:
if self.classified_market_data == False or self.quartile == self.market_class[self.period][1]:
self.trigger_log.append([self.time,self.get_target()])
self.net_worth_log.append([self.time,current_net_worth])
if sell > 0:
self.sell_log.append([self.time,sell])
if stop > 0:
self.stop_log.append([self.time,stop])
return
def get_target(self):
#calculates the inverse macd
#wolfram alpha used to transform the macd equation to solve for the trigger price:
price = 0.0
try:
price = -1.0 * (100.0 *(self.wls+1)*(self.wll-1)*self.ema_long + (self.wll+1)*self.ema_short*(self.wls * (self.macd_buy_trip - 100) + self.macd_buy_trip + 100)) / (200 * (self.wls - self.wll))
price -= 0.01 #subtract a penny to satisfy the trigger criteria
except:
price = 0.0
if price < 0.0:
price = 0.0
#clamp the max value
if price > self.history[0]:
price = self.history[0]
#clamp the min value (70% of ema_long)
if price < self.ema_long * 0.7:
price = self.ema_long * 0.7
return price
def macd(self):
#wait until there is enough data to fill the moving windows
if len(self.history) == self.wll:
s = 0
l = 0
#calculate the ema weighting multipliers
ema_short_mult = (2.0 / (self.wls + 1) )
ema_long_mult = (2.0 / (self.wll + 1) )
#bootstrap the ema calc using a simple moving avg if needed
if self.ema_long == 0:
for i in xrange(self.wll):
if i < self.wls:
s += self.history[i]
l += self.history[i]
self.avg_ws = s / self.wls
self.avg_wl = l / self.wll
self.ema_long = self.avg_wl
self.ema_short = self.avg_ws
else:
#calculate the long and short ema
self.ema_long = (self.history[0] - self.ema_long) * ema_long_mult + self.ema_long
self.ema_short = (self.history[0] - self.ema_short) * ema_short_mult + self.ema_short
#calculate the absolute and pct differences between the
#long and short emas
self.macd_abs = self.ema_short - self.ema_long
self.macd_pct = (self.macd_abs / self.ema_short) * 100
"""
#track the number of sequential positive and negative periods
if self.history[0] - self.history[1] > 0:
self.i_neg = 0
self.i_pos += 1
if self.history[0] - self.history[1] < 0:
self.i_pos = 0
self.i_neg += 1
"""
if not self.score_only:
#track the max & min macd pcts (metric)
if self.macd_pct > self.metric_macd_pct_max:
self.metric_macd_pct_max = self.macd_pct
if self.macd_pct < self.metric_macd_pct_min:
self.metric_macd_pct_min = self.macd_pct
else:
self.ema_short = self.history[0]
self.ema_long = self.history[0]
self.macd_pct = 0
#log the indicators
if not self.score_only:
self.macd_pct_log.append([self.time,self.macd_pct])
self.wl_log.append([self.time,self.ema_long])
self.ws_log.append([self.time,self.ema_short])
def display(self):
#used for debug
print ",".join(map(str,[self.history[0],self.macd_pct,self.buy_wait]))
def input(self,time_stamp,record):
#self.time = int(time.mktime(time.strptime(time_stamp))) * 1000
self.time = int(time_stamp * 1000)
self.input_log.append([self.time,record])
###Date,Sell,Buy,Last,Vol,High,Low,###
self.history.insert(0,record)
if len(self.history) > self.wll:
self.history.pop() #maintain a moving window of
#the last wll records
self.macd() #calc macd
self.ai() #call the trade ai
self.period += 1 #increment the period counter
#self.display()
return
def log_orders(self,filename=None):
self.order_history = ""
print "log_orders: sorting data"
self.positions = sorted(self.positions, key=itemgetter('buy_period'),reverse=True)
if len(self.positions) > 0:
keys = self.positions[0].keys()
#write the header
self.order_history = "<table class='imgtbl'>\n"
self.order_history +="<tr>"
for key in keys:
self.order_history +="<th>%s</tht>"%key
self.order_history +="</tr>\n"
#only htmlize the last positions so the browser doesn't blow up ;)
reported_position_count_limit = 200
reported_position_count = 0
print "log_orders: generating html table for %s positions"%(len(self.positions))
for p in self.positions:
if reported_position_count >= reported_position_count_limit:
break
#I dont care about the dumped positions, they're not real transactions anyway.
#They're only generated to calculate/report the current account value.
if p['status']!='dumped':
reported_position_count += 1
self.order_history +="<tr>"
for key in keys:
if p.has_key(key):
#I dont care about the dumped positions, they're not real transactions anyway.
#They're only generated to calculate/report the current account value.
if p['status']!='dumped':
if p['status']=='stop':
color = 'r'
elif p['status']=='dumped': #Im leaving this here in case I want to turn it back on.
color = 'y'
elif p['status']=='sold':
color = 'g'
else:
color = 'b'
self.order_history +="<td class='%s'>"%color
if type(p[key]) == type(1.0):
self.order_history += "%.2f"% round(p[key],2)
else:
self.order_history += str(p[key])
self.order_history +="</td>"
elif p['status']!='dumped':
self.order_history +="<td>N/A</td>"
if p['status']!='dumped':
self.order_history +="</tr>\n"
self.order_history += "</table>"
return
def log_transactions(self,filename):
#log the transactions to a file
#used with excel / gdocs to chart price and buy/sell indicators
f = open(filename,'w')
for i in xrange(len(self.input_log)):
for position in self.positions:
if position['buy_period'] == i:
#print position['buy_period'],i
self.input_log[i].append('buy')
self.input_log[i].append(position['sell_period'] - position['buy_period'])
self.input_log[i].append(position['status'])
self.input_log[i].append(i)
if position['sell_period'] == i:
self.input_log[i].append('sell')
self.input_log[i].append('0')
self.input_log[i].append(position['status'])
self.input_log[i].append(i)
r = ",".join(map(str,self.input_log[i]))
f.write(r)
f.write('\n')
f.close()
return
def compress_log(self,log,lossless_compression = False):
#removes records with no change in price, before and after record n
compressible = True
while compressible:
compressible = False
ret_log = []
for i in xrange(len(log)):
if type(log[i][1]) == float:
log[i][1] = float("%.3f"%log[i][1])
if i >= 1 and i < len(log) - 1:
if log[i-1][1] == log[i][1] and log[i+1][1] == log[i][1]:
compressible = True #no change in value before or after, omit record
else:
ret_log.append(log[i])
else:
ret_log.append(log[i])
log = ret_log
if lossless_compression == True:
return ret_log
while len(log) > 2000:
avg = log[0][1]
avg = (log[0][1] - avg) * 0.2 + avg
ret_log = [log[0]] #capture the first record
for i in xrange(1,len(log),2):
#find which sample that deviates the most from the average
a = abs(log[i][1] - avg)
b = abs(log[i-1][1] - avg)
if a > b:
ret_log.append(log[i])
else:
ret_log.append(log[i-1])
#update the moving average
avg = (log[i-1][1] - avg) * 0.2 + avg
avg = (log[i][1] - avg) * 0.2 + avg
ret_log.append(log[len(log)-1]) #make sure the last record is captured
log = ret_log
return ret_log
def chart(self,template,filename,periods=-1,basic_chart=False):
self.log_orders()
f = open(template,'r')
tmpl = f.read()
f.close()
if periods < 0:
periods = self.period * -1
else:
periods *= -1
#insert all quartiles at the begining of the market class data to ensure correct
#chart scaling. This covers the case where the chart period doesn't see all quartiles
mc = self.market_class[periods:]
t = mc[0][0]
for i in range(0,4):
t += 1
q = (i + 1) / 4.0
mc.insert(0,[t,q])
print "chart: compressing data"
if not basic_chart:
wl = str(self.compress_log(self.wl_log[periods:])).replace('L','')
ws = str(self.compress_log(self.ws_log[periods:])).replace('L','')
net_worth = str(self.compress_log(self.net_worth_log[periods:],lossless_compression = True)).replace('L','')
else:
wl = str([])
ws = str([])
net_worth = str([])
macd_pct = str(self.compress_log(self.macd_pct_log[periods:])).replace('L','')
input = str(self.compress_log(self.input_log[periods:])).replace('L','')
volatility_quartile = str(self.compress_log(mc,lossless_compression = True)).replace('L','')
buy = str([])
sell = str([])
stop = str([])
trigger_price = str([])
if periods == self.period:
buy = str(self.buy_log[periods:]).replace('L','')
sell = str(self.sell_log[periods:]).replace('L','')
stop = str(self.stop_log[periods:]).replace('L','')
trigger_price = str(self.compress_log(self.trigger_log[periods:],lossless_compression = True)).replace('L','')
else:
print "chart: selecting data"
#get the timestamp for the start index
time_stamp = self.input_log[periods:periods+1][0][0]
#search the following for the time stamp
for i in xrange(len(self.buy_log)):
if self.buy_log[i][0] >= time_stamp:
buy = str(self.buy_log[i:]).replace('L','')
break
for i in xrange(len(self.sell_log)):
if self.sell_log[i][0] >= time_stamp:
sell = str(self.sell_log[i:]).replace('L','')
break
for i in xrange(len(self.stop_log)):
if self.stop_log[i][0] >= time_stamp:
stop = str(self.stop_log[i:]).replace('L','')
break
for i in xrange(len(self.trigger_log)):
if self.trigger_log[i][0] >= time_stamp:
trigger_price = str(self.trigger_log[i:]).replace('L','')
break
print "chart: filling the template"
tmpl = tmpl.replace("{LAST_UPDATE}",time.ctime())
tmpl = tmpl.replace("{PRICES}",input)
tmpl = tmpl.replace("{WINDOW_LONG}",wl)
tmpl = tmpl.replace("{WINDOW_SHORT}",ws)
tmpl = tmpl.replace("{MACD_PCT}",macd_pct)
tmpl = tmpl.replace("{BUY}",buy)
tmpl = tmpl.replace("{SELL}",sell)
tmpl = tmpl.replace("{STOP}",stop)
tmpl = tmpl.replace("{NET_WORTH}",net_worth)
tmpl = tmpl.replace("{TRIGGER_PRICE}",trigger_price)
tmpl = tmpl.replace("{METRICS_REPORT}",self.metrics_report().replace('\n','<BR>'))
tmpl = tmpl.replace("{ORDERS_REPORT}",self.order_history)
tmpl = tmpl.replace("{VOLATILITY_QUARTILE}",volatility_quartile)
print "chart: writing the data to a file"
f = open(filename,'w')
f.write(tmpl)
f.close()
return
def test():
te = trade_engine()
#set the trade engine class vars
te.shares = 0.1
te.wll = 242
te.wls = 1
te.buy_wait = 0
te.markup = 0.01
te.stop_loss = 0.128
te.stop_age = 2976
te.macd_buy_trip = -0.02
te.min_i_neg = 2
te.min_i_pos = 0
te.buy_wait_after_stop_loss = 0
for row in d[1:]:
r = row.split(',')[1] #last
t = row.split(',')[0] #time
te.input(float(t),float(r))
return te
if __name__ == "__main__":
__appversion__ = "0.02a"
print "Bitcoin trade simulator profiler v%s"%__appversion__
print " -- this is a test script to profile the performance of bct.py"
print " -- the trade results should be ignored as the trade strategy inputs"
print " are designed to stress the module with many trade positions"
print ""
print "Profiling bct...(This is going to take a while)"
#open the history file
f = open("./datafeed/bcfeed_mtgoxUSD_1min.csv",'r')
d = f.readlines()
f.close()
import hotshot,hotshot.stats
prof = hotshot.Profile("bct.prof")
te = prof.runcall(test)
prof.close()
stats = hotshot.stats.load("bct.prof")
stats.strip_dirs()
stats.sort_stats('time','calls')
stats.print_stats(20)
print "Score:",te.score()
print "Closing Balance:",te.balance
print "Transaction Count: ",len(te.positions)
#Commented out the follwing reports -- they generate very large files and in the case of this test script of limited use.
#print "Generating reports..."
#te.log_transactions('./report/profile_transactions.csv')
#te.log_orders('./report/profile_orders.csv')
#te.chart("./report/chart.templ","./report/chart_profile.html")
print "Done."
|
enikesha/ga-bitbot
|
bct.py
|
Python
|
gpl-3.0
| 35,986
|
[
"Brian"
] |
0b50d8bb2cfa5f9d00f2120365650a814ead40e7940f1752c23cde0c7b721429
|
import openvoronoi as ovd
import ovdvtk
import time
import vtk
import datetime
import math
import random
import os
def drawLine(myscreen, p1, p2):
myscreen.addActor(ovdvtk.Line(p1=(p1.x, p1.y, 0), p2=(p2.x, p2.y, 0), color=ovdvtk.yellow))
def writeFrame(w2if, lwr, n):
w2if.Modified()
current_dir = os.getcwd()
filename = current_dir + "/frames/vd_lineseg" + ('%05d' % n) + ".png"
lwr.SetFileName(filename)
lwr.Write()
def randomGenerators(far, Nmax):
pradius = (1.0 / math.sqrt(2)) * far
plist = []
for n in range(Nmax):
x = -pradius + 2 * pradius * random.random()
y = -pradius + 2 * pradius * random.random()
plist.append(ovd.Point(x, y))
return plist
if __name__ == "__main__":
myscreen = ovdvtk.VTKScreen(width=1024, height=720) # (width=1920, height=1080)
ovdvtk.drawOCLtext(myscreen, rev_text=ovd.version())
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInputConnection(w2if.GetOutputPort())
scale = 1
myscreen.render()
random.seed(42)
far = 1
camPos = far
zmult = 4
# camPos/float(1000)
myscreen.camera.SetPosition(0, -camPos / float(1000), zmult * camPos)
myscreen.camera.SetClippingRange(-(zmult + 1) * camPos, (zmult + 1) * camPos)
myscreen.camera.SetFocalPoint(0, 0, 0)
vd = ovd.VoronoiDiagram(far, 120)
print ovd.version()
# for vtk visualization
vod = ovdvtk.VD(myscreen, vd, float(scale), textscale=0.01, vertexradius=0.003)
vod.drawFarCircle()
# vod.clearance_disk=1
vod.vertexRadius = 0.005
vod.textScale = 0.02
Nmax = 60
plist = randomGenerators(far, Nmax)
t_before = time.time()
n = 0
id_list = []
npts = Nmax - 1
for p in plist:
print n, " adding ", p
id_list.append(vd.addVertexSite(p))
n = n + 1
nstep = 10
vd.addLineSite(id_list[0], id_list[30], nstep)
t_after = time.time()
calctime = t_after - t_before
if Nmax == 0:
Nmax = 1
print " VD done in ", calctime, " s, ", calctime / Nmax, " s per generator"
vod.setAll()
myscreen.render()
writeFrame(w2if, lwr, nstep)
print "PYTHON All DONE."
myscreen.render()
myscreen.iren.Start()
|
aewallin/openvoronoi
|
python_examples/line-segment/lineseg_animation.py
|
Python
|
lgpl-2.1
| 2,294
|
[
"VTK"
] |
5800011703c0bb52d8c6a5a3f12540f81d6b1c2c2cd56309696fdc8546a6295c
|
#!/usr/bin/env python
# Copyright 2014-2021 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
native implementation of DF-MP2/RI-MP2 with a UHF reference
'''
import numpy as np
import scipy
from pyscf import lib
from pyscf import scf
from pyscf import df
from pyscf.scf import ucphf
from pyscf.mp.dfmp2_native import DFRMP2, ints3c_cholesky, orbgrad_from_Gamma
class DFUMP2(DFRMP2):
'''
native implementation of DF-MP2/RI-MP2 with a UHF reference
'''
def __init__(self, mf, frozen=None, auxbasis=None):
'''
Args:
mf : UHF instance
frozen : number of frozen orbitals or list of frozen orbitals
auxbasis : name of auxiliary basis set, otherwise determined automatically
'''
if not isinstance(mf, scf.uhf.UHF):
raise TypeError('Class initialization with non-UHF object')
# UHF quantities are stored as numpy arrays
self.mo_coeff = np.array(mf.mo_coeff)
self.mo_energy = np.array(mf.mo_energy)
self.nocc = np.array([np.count_nonzero(mf.mo_occ[0]), np.count_nonzero(mf.mo_occ[1])])
# UHF MO coefficient matrix shape: (2, number of AOs, number of MOs)
self.nmo = self.mo_coeff.shape[2]
self.e_scf = mf.e_tot
self._scf = mf
# Process the frozen core option correctly as either an integer or two lists (alpha, beta).
# self.frozen_mask sets a flag for each orbital if it is frozen (True) or not (False).
# Only occupied orbitals can be frozen.
self.frozen_mask = np.zeros((2, self.nmo), dtype=bool)
if frozen is None:
pass
elif lib.isinteger(frozen):
if frozen > min(self.nocc):
raise ValueError('only occupied orbitals can be frozen')
self.frozen_mask[:, :frozen] = True
else:
try:
if len(frozen) != 2:
raise ValueError
for s in 0, 1:
if not lib.isintsequence(frozen[s]):
raise TypeError
except (TypeError, ValueError):
raise TypeError('frozen must be an integer or two integer lists')
if len(frozen[0]) != len(frozen[1]):
raise ValueError('frozen orbital lists not of equal length')
for s in 0, 1:
self.frozen_mask[s, frozen[s]] = True
# mask for occupied orbitals that are not frozen
self.occ_mask = np.zeros((2, self.nmo), dtype=bool)
for s in 0, 1:
self.occ_mask[s, :self.nocc[s]] = True
self.occ_mask[self.frozen_mask] = False
self.mol = mf.mol
if not auxbasis:
auxbasis = df.make_auxbasis(self.mol, mp2fit=True)
self.auxmol = df.make_auxmol(self.mol, auxbasis)
self.verbose = self.mol.verbose
self.stdout = self.mol.stdout
self.max_memory = self.mol.max_memory
# _intsfile will be a list with two elements for the alpha and beta integrals
self._intsfile = []
self.e_corr = None
# Spin component scaling factors
self.ps = 1.0
self.pt = 1.0
self.cphf_max_cycle = 100
self.cphf_tol = mf.conv_tol
def dump_flags(self, logger=None):
'''
Prints selected information.
Args:
logger : Logger object
'''
if not logger:
logger = lib.logger.new_logger(self)
logger.info('')
logger.info('******** {0:s} ********'.format(repr(self.__class__)))
logger.info('nmo = {0:d}'.format(self.nmo))
logger.info('nocc = {0:d}, {1:d}'.format(self.nocc[0], self.nocc[1]))
nfrozen = np.count_nonzero(self.frozen_mask[0])
logger.info('no. of frozen = {0:d}'.format(nfrozen))
frozen_tmp = np.arange(self.nmo)[self.frozen_mask[0]]
logger.debug('frozen (alpha) = {0}'.format(frozen_tmp))
frozen_tmp = np.arange(self.nmo)[self.frozen_mask[1]]
logger.debug('frozen (beta) = {0}'.format(frozen_tmp))
logger.info('basis = {0:s}'.format(repr(self.mol.basis)))
logger.info('auxbasis = {0:s}'.format(repr(self.auxmol.basis)))
logger.info('max_memory = {0:.1f} MB (current use {1:.1f} MB)'.
format(self.max_memory, lib.current_memory()[0]))
def calculate_energy(self):
'''
Calculates the MP2 correlation energy.
'''
if not self.has_ints:
self.calculate_integrals_()
logger = lib.logger.new_logger(self)
logger.info('')
logger.info('Calculating DF-MP2 energy')
self.e_corr = emp2_uhf(self._intsfile, self.mo_energy, self.frozen_mask,
logger, ps=self.ps, pt=self.pt)
logger.note('DF-MP2 correlation energy: {0:.14f}'.format(self.e_corr))
return self.e_corr
def make_rdm1(self, relaxed=False, ao_repr=False):
'''
Calculates the MP2 1-RDM.
- The relaxed density matrix can be used to calculate properties of systems
for which MP2 is well-behaved.
- The unrelaxed density is less suited to calculate properties accurately,
but it can be used to calculate CASSCF starting orbitals.
Args:
relaxed : relaxed density if True, unrelaxed density if False
ao_repr : density in AO or in MO basis
Returns:
the 1-RDM
'''
logger = lib.logger.new_logger(self)
if relaxed:
logger.info('')
logger.info('DF-MP2 relaxed density calculation')
else:
logger.info('')
logger.info('DF-MP2 unrelaxed density calculation')
rdm1_mo = make_rdm1(self, relaxed, logger)
if ao_repr:
return lib.einsum('sxp,spq,syq->sxy', self.mo_coeff, rdm1_mo, self.mo_coeff)
else:
return rdm1_mo
def make_natorbs(self, rdm1_mo=None, relaxed=False):
'''
Calculate natural orbitals.
Note: the most occupied orbitals come first (left)
and the least occupied orbitals last (right).
Args:
rdm1_mo : 1-RDM in MO basis
the function calculates a density matrix if none is provided
relaxed : calculated relaxed or unrelaxed density matrix
Returns:
natural occupation numbers, natural orbitals
'''
if rdm1_mo is None:
dm = self.make_rdm1(ao_repr=False, relaxed=relaxed)
elif isinstance(rdm1_mo, np.ndarray):
dm = rdm1_mo
else:
raise TypeError('rdm1_mo must be a 3-D array')
# Transform the beta component to the alpha basis and sum both together.
SAO = self.mol.intor_symmetric('int1e_ovlp')
Sab = lib.einsum('xp,xy,yq->pq', self.mo_coeff[0, :, :], SAO, self.mo_coeff[1, :, :])
rdm1_abas = dm[0, :, :] + lib.einsum('pr,rs,qs->pq', Sab, dm[1, :, :], Sab)
# Diagonalize the spin-traced 1-RDM in alpha basis to get the natural orbitals.
eigval, eigvec = np.linalg.eigh(rdm1_abas)
natocc = np.flip(eigval)
natorb = lib.dot(self.mo_coeff[0, :, :], np.fliplr(eigvec))
return natocc, natorb
def calculate_integrals_(self):
'''
Calculates the three center integrals for MP2.
'''
intsfile = []
logger = lib.logger.new_logger(self)
logger.info('')
logger.info('Calculating integrals')
for s in [0, 1]:
Co = self.mo_coeff[s][:, self.occ_mask[s]]
Cv = self.mo_coeff[s][:, self.nocc[s]:]
f = ints3c_cholesky(self.mol, self.auxmol, Co, Cv, self.max_memory, logger)
intsfile.append(f)
self._intsfile = intsfile
logger.info('Stored in files:\n{0:s}\n{1:s}'.
format(self._intsfile[0].filename, self._intsfile[1].filename))
def delete(self):
'''
Delete the temporary file(s).
'''
self._intsfile = []
def nuc_grad_method(self):
raise NotImplementedError
MP2 = UMP2 = DFMP2 = DFUMP2
class SCSDFUMP2(DFUMP2):
'''
UHF-DF-MP2 with spin-component scaling
S. Grimme, J. Chem. Phys. 118 (2003), 9095
https://doi.org/10.1063/1.1569242
'''
def __init__(self, mf, ps=6/5, pt=1/3, *args, **kwargs):
'''
mf : UHF instance
ps : opposite-spin (singlet) scaling factor
pt : same-spin (triplet) scaling factor
'''
super().__init__(mf, *args, **kwargs)
self.ps = ps
self.pt = pt
def dump_flags(self, logger=None):
if not logger:
logger = lib.logger.new_logger(self)
super().dump_flags(logger=logger)
logger.info('pt(scs) = {0:.6f}'.format(self.pt))
logger.info('ps(scs) = {0:.6f}'.format(self.ps))
SCSMP2 = SCSUMP2 = SCSDFMP2 = SCSDFUMP2
def emp2_uhf(intsfiles, mo_energy, frozen_mask, logger, ps=1.0, pt=1.0):
'''
Calculates the DF-MP2 energy with an UHF reference.
Args:
intsfiles : contains the three center integrals in MO basis
mo_energy : energies of the molecular orbitals
frozen_mask : boolean mask for frozen orbitals
logger : Logger instance
ps : SCS factor for opposite-spin contributions
pt : SCS factor for same-spin contributions
Returns:
the MP2 correlation energy
'''
ints_a = intsfiles[0]['ints_cholesky']
ints_b = intsfiles[1]['ints_cholesky']
nocc_act = np.array([ints_a.shape[0], ints_b.shape[0]])
nfrozen = np.count_nonzero(frozen_mask[0])
if np.count_nonzero(frozen_mask[1]) != nfrozen:
raise ValueError('number of frozen alpha and beta orbitals differs')
nocc = nocc_act + nfrozen
nvirt = np.array([ints_a.shape[2], ints_b.shape[2]])
logger.debug(' UHF-DF-MP2 energy routine')
logger.debug(' Occupied orbitals: {0:d}, {1:d}'.format(nocc[0], nocc[1]))
logger.debug(' Virtual orbitals: {0:d}, {1:d}'.format(nvirt[0], nvirt[1]))
logger.debug(' Frozen orbitals: {0:d}'.format(nfrozen))
logger.debug(' Integrals (alpha) from file: {0:s}'.format(intsfiles[0].filename))
logger.debug(' Integrals (beta) from file: {0:s}'.format(intsfiles[1].filename))
mo_energy_masked = mo_energy[~frozen_mask].reshape((2, -1))
energy_total = 0.0
# loop over spins to calculate same-spin energies
for s in 0, 1:
energy_contrib = 0.0
if s == 0:
logger.debug(' alpha-alpha pairs')
ints = ints_a
else:
logger.debug(' beta-beta pairs')
ints = ints_b
# precompute Eab[a, b] = mo_energy[a] + mo_energy[b] for the denominator
Eab = np.zeros((nvirt[s], nvirt[s]))
for a in range(nvirt[s]):
Eab[a, :] += mo_energy[s, nocc[s]+a]
Eab[:, a] += mo_energy[s, nocc[s]+a]
# loop over j < i
for i in range(nocc_act[s]):
ints3c_ia = ints[i, :, :]
for j in range(i):
ints3c_jb = ints[j, :, :]
Kab = lib.dot(ints3c_ia.T, ints3c_jb)
DE = mo_energy_masked[s, i] + mo_energy_masked[s, j] - Eab
Tab = (Kab - Kab.T) / DE
energy_contrib += pt * lib.einsum('ab,ab', Tab, Kab)
logger.debug(' E = {0:.14f}'.format(energy_contrib))
energy_total += energy_contrib
# opposite-spin energy
logger.debug(' alpha-beta pairs')
# precompute Eab[a, b] = mo_energy[a] + mo_energy[b] for the denominator
Eab = np.zeros((nvirt[0], nvirt[1]))
for a in range(nvirt[0]):
Eab[a, :] += mo_energy[0, nocc[0]+a]
for b in range(nvirt[1]):
Eab[:, b] += mo_energy[1, nocc[1]+b]
# loop over i(alpha), j(beta)
energy_contrib = 0.0
for i in range(nocc_act[0]):
ints3c_ia = ints_a[i, :, :]
for j in range(nocc_act[1]):
ints3c_jb = ints_b[j, :, :]
Kab = lib.dot(ints3c_ia.T, ints3c_jb)
DE = mo_energy_masked[0, i] + mo_energy_masked[1, j] - Eab
Tab = Kab / DE
energy_contrib += ps * lib.einsum('ab,ab', Tab, Kab)
logger.debug(' E = {0:.14f}'.format(energy_contrib))
energy_total += energy_contrib
logger.debug(' DF-MP2 correlation energy: {0:.14f}'.format(energy_total))
return energy_total
def make_rdm1(mp2, relaxed, logger=None):
'''
Calculates the unrelaxed or relaxed MP2 density matrix.
Args:
mp2 : DFUMP2 instance
relaxed : relaxed density if True, unrelaxed density if False
logger : Logger instance
Returns:
the 1-RDM in MO basis
'''
if not mp2.has_ints:
mp2.calculate_integrals_()
# Calculate the unrelaxed 1-RDM.
if logger is None:
logger = lib.logger.new_logger(mp2)
rdm1, GammaFile = \
ump2_densities_contribs(mp2._intsfile, mp2.mo_energy, mp2.frozen_mask, mp2.max_memory,
logger, calcGamma=relaxed, auxmol=mp2.auxmol, ps=mp2.ps, pt=mp2.pt)
if relaxed:
Lvo = [None, None]
for s, sstr in [(0, 'alpha'), (1, 'beta')]:
# right-hand side for the CPHF equation
Gamma = GammaFile['Gamma_'+sstr]
Lvo[s], Lfo_s = \
orbgrad_from_Gamma(mp2.mol, mp2.auxmol, Gamma, mp2.mo_coeff[s], mp2.frozen_mask[s],
mp2.max_memory, logger)
# frozen core orbital relaxation contribution
frozen_list = np.arange(mp2.nmo)[mp2.frozen_mask[s]]
for fm, f in enumerate(frozen_list):
for i in np.arange(mp2.nmo)[mp2.occ_mask[s]]:
zfo = Lfo_s[fm, i] / (mp2.mo_energy[s, f] - mp2.mo_energy[s, i])
rdm1[s, f, i] += 0.5 * zfo
rdm1[s, i, f] += 0.5 * zfo
# Fock response
Lvo_a, Lvo_b = fock_response_uhf(mp2._scf, rdm1)
Lvo[0] -= Lvo_a
Lvo[1] -= Lvo_b
# solving the CPHF equations
minusLvo = [-Lvo[0], -Lvo[1]]
zvo = solve_cphf_uhf(mp2._scf, minusLvo, mp2.cphf_max_cycle, mp2.cphf_tol, logger)
# add the relaxation contribution to the density
for s in 0, 1:
rdm1[s, mp2.nocc[s]:, :mp2.nocc[s]] += 0.5 * zvo[s]
rdm1[s, :mp2.nocc[s], mp2.nocc[s]:] += 0.5 * zvo[s].T
# HF contribution
for s in 0, 1:
rdm1[s, :mp2.nocc[s], :mp2.nocc[s]] += np.eye(mp2.nocc[s])
return rdm1
def ump2_densities_contribs(intsfiles, mo_energy, frozen_mask, max_memory, logger,
calcGamma=False, auxmol=None, ps=1.0, pt=1.0):
'''
Calculates the unrelaxed DF-MP2 density matrix contribution with a UHF reference.
Note: this is the difference density, i.e. without HF contribution.A
lso calculates the three-center two-particle density if requested.
Args:
intsfile : contains the three center integrals
mo_energy : molecular orbital energies
frozen_mask : boolean mask for frozen orbitals
max_memory : memory threshold in MB
logger : Logger instance
calcGamma : if True, calculate 3c2e density
auxmol : required if relaxed is True
ps : SCS factor for opposite-spin contributions
pt : SCS factor for same-spin contributions
Returns:
matrix containing the 1-RDM contribution, file with 3c2e density if requested
'''
ints = [intsfiles[s]['ints_cholesky'] for s in (0, 1)]
nocc_act = np.array([ints[s].shape[0] for s in (0, 1)])
naux = ints[0].shape[1]
if ints[1].shape[1] != naux:
raise ValueError('integrals have inconsistent aux dimensions')
nvirt = np.array([ints[s].shape[2] for s in (0, 1)])
nmo = mo_energy.shape[1]
nfrozen = np.count_nonzero(frozen_mask[0])
if np.count_nonzero(frozen_mask[0]) != nfrozen:
raise ValueError('unequal numbers of frozen orbitals for alpha and beta')
nocc = nfrozen + nocc_act
if np.any(nocc + nvirt != nmo):
raise ValueError('numbers of frozen, occupied and virtual orbitals inconsistent')
logger.debug(' Density matrix contributions for DF-MP2')
logger.debug(' Occupied orbitals: {0:d}, {1:d}'.format(nocc[0], nocc[1]))
logger.debug(' Virtual orbitals: {0:d}, {1:d}'.format(nvirt[0], nvirt[1]))
logger.debug(' Frozen orbitals: {0:d}'.format(nfrozen))
logger.debug(' Three center integrals (alpha) from file: {0:s}'.format(intsfiles[0].filename))
logger.debug(' Three center integrals (beta) from file: {0:s}'.format(intsfiles[1].filename))
GammaFile, LT = None, None
if calcGamma:
if not auxmol:
raise RuntimeError('auxmol needs to be specified for relaxed density computation')
# create temporary file to store the two-body density Gamma
GammaFile = lib.H5TmpFile(libver='latest')
GammaFile.create_dataset('Gamma_alpha', (nocc_act[0], naux, nvirt[0]), dtype='f8')
GammaFile.create_dataset('Gamma_beta', (nocc_act[1], naux, nvirt[1]), dtype='f8')
logger.debug(' Storing 3c2e density in file: {0:s}'.format(GammaFile.filename))
# We will need LT = L^T, where L L^T = V
LT = scipy.linalg.cholesky(auxmol.intor('int2c2e'), lower=False)
# We start forming P with contiguous frozen, occupied, virtual subblocks.
P = np.zeros((2, nmo, nmo))
mo_energy_masked = mo_energy[~frozen_mask].reshape(2, nmo-nfrozen)
# Loop over all the spin variants
for s1, s2 in [(0, 0), (0, 1), (1, 0), (1, 1)]:
with lib.H5TmpFile(libver='latest') as tfile:
tiset = \
tfile.create_dataset('amplitudes', (nocc_act[s2], nvirt[s1], nvirt[s2]), dtype='f8')
s1_str = ('alpha', 'beta')[s1]
s2_str = ('alpha', 'beta')[s2]
logger.debug(' {0:s}-{1:s} pairs'.format(s1_str, s2_str))
logger.debug(' Storing amplitudes in temporary file: {0:s}'.format(tfile.filename))
# Precompute Eab[a, b] = mo_energy[a] + mo_energy[b] for division with numpy.
Eab = np.zeros((nvirt[s1], nvirt[s2]))
for a in range(nvirt[s1]):
Eab[a, :] += mo_energy[s1, nocc[s1]+a]
for b in range(nvirt[s2]):
Eab[:, b] += mo_energy[s2, nocc[s2]+b]
# For each occupied spin orbital i, all amplitudes are calculated once and
# stored on disk. The occupied 1-RDM contribution is calculated in a batched
# algorithm. More memory -> more efficient I/O.
# The virtual contribution to the 1-RDM is calculated in memory.
for i in range(nocc_act[s1]):
ints3c_ia = ints[s1][i, :, :]
# Amplitudes T^ij_ab are calculated for a given orbital i with spin s1,
# and all j (s2), a (s1) and b (s2). These amplitudes are stored on disk.
for j in range(nocc_act[s2]):
ints3c_jb = ints[s2][j, :, :]
Kab = lib.dot(ints3c_ia.T, ints3c_jb)
DE = mo_energy_masked[s1, i] + mo_energy_masked[s2, j] - Eab
if s1 == s2:
numerator = Kab - Kab.T
prefactor = 0.5 * pt
else:
numerator = Kab
prefactor = ps
Tab = numerator / DE
tiset[j, :, :] = Tab
# virtual 1-RDM contribution
P[s1, nocc[s1]:, nocc[s1]:] += prefactor * lib.dot(Tab, Tab.T)
del ints3c_jb, Kab, DE, numerator, Tab
# Batches of amplitudes are read from disk to calculate the occupied
# 1-RDM contribution.
batchsize = int((max_memory - lib.current_memory()[0]) * 1e6 / (nocc_act[s2] * nvirt[s2] * 8))
batchsize = min(nvirt[s1], batchsize)
if batchsize < 1:
raise MemoryError('Insufficient memory (PYSCF_MAX_MEMORY).')
logger.debug2(' Batch size: {0:d} (of {1:d})'.format(batchsize, nvirt[s1]))
logger.debug2(' Pij formation - MO {0:d} ({1:s}), batch size {2:d} (of {3:d})'.
format(i, s1_str, batchsize, nvirt[s1]))
for astart in range(0, nvirt[s1], batchsize):
aend = min(astart+batchsize, nvirt[s1])
tbatch = tiset[:, astart:aend, :]
if s1 == s2:
prefactor = 0.5 * pt
else:
prefactor = ps
P[s2, nfrozen:nocc[s2], nfrozen:nocc[s2]] -= \
prefactor * lib.einsum('iab,jab->ij', tbatch, tbatch)
del tbatch
if calcGamma:
# This produces (P | Q)^-1 (Q | i a)
ints3cV1_ia = scipy.linalg.solve_triangular(LT, ints3c_ia, lower=False)
# Here, we construct Gamma for spin s2
Gamma = GammaFile['Gamma_'+s2_str]
# Read batches of amplitudes from disk and calculate the two-body density Gamma
size = nvirt[s1] * nvirt[s2] * 8 + naux * nvirt[s2] * 8
batchsize = int((max_memory - lib.current_memory()[0]) * 1e6 / size)
batchsize = min(nocc_act[s2], batchsize)
if batchsize < 1:
raise MemoryError('Insufficient memory (PYSCF_MAX_MEMORY).')
logger.debug2(' Gamma ({0:s}) formation - MO {1:d} ({2:s}), batch size {3:d} (of {4:d})'.
format(s2_str, i, s1_str, batchsize, nocc_act[s2]))
if s1 == s2:
prefactor = 2.0 * pt
else:
prefactor = 2.0 * ps
for jstart in range(0, nocc_act[s2], batchsize):
jend = min(jstart+batchsize, nocc_act[s2])
tbatch = tiset[jstart:jend, :, :]
# Here, we collect two-body density contributions for spin s2
Gbatch = Gamma[jstart:jend, :, :]
for jj in range(jend-jstart):
Tijab = tbatch[jj]
Gbatch[jj] += prefactor * lib.dot(ints3cV1_ia, Tijab)
Gamma[jstart:jend, :, :] = Gbatch
del tbatch, Gbatch
# now reorder P such that the frozen orbitals correspond to frozen_mask
for s in 0, 1:
idx_reordered = \
np.concatenate([np.arange(nmo)[frozen_mask[s]], np.arange(nmo)[~frozen_mask[s]]])
P[s][idx_reordered, :] = P[s].copy()
P[s][:, idx_reordered] = P[s].copy()
logger.debug(' Density matrix contributions calculation finished')
return P, GammaFile
def fock_response_uhf(mf, dm, full=True):
'''
Calculate the unrestricted Fock response function for a given density matrix.
Args:
mf : UHF instance
dm : density matrix in MO basis
full : full MO density matrix if True, [virt. x occ., virt. x occ.] if False
Returns:
Fock response in MO basis. Shape: [virt. x occ., virt. x occ.]
'''
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
nao = mf.mol.nao
dmao = np.zeros((2, nao, nao))
for s in 0, 1:
if full:
dmao[s, :, :] = lib.einsum('xp,pq,yq->xy', mo_coeff[s], dm[s], mo_coeff[s])
else:
Ci = mo_coeff[s][:, mo_occ[s]>0]
Ca = mo_coeff[s][:, mo_occ[s]==0]
dmao[s, :, :] = lib.einsum('xa,ai,yi->xy', Ca, dm[s], Ci)
rao = mf.get_veff(dm=dmao+dmao.transpose((0, 2, 1)))
rvo = [None, None]
for s in 0, 1:
Ci = mo_coeff[s][:, mo_occ[s]>0]
Ca = mo_coeff[s][:, mo_occ[s]==0]
rvo[s] = lib.einsum('xa,xy,yi->ai', Ca, rao[s], Ci)
return rvo
def solve_cphf_uhf(mf, Lvo, max_cycle, tol, logger):
'''
Solve the CPHF equations.
Args:
mf : a UHF object
Lvo : right-hand side the the response equation
max_cycle : number of iterations for the CPHF solver
tol : convergence tolerance for the CPHF solver
logger : Logger object
'''
logger.info('Solving the CPHF response equations')
logger.info('Max. iterations: {0:d}'.format(max_cycle))
logger.info('Convergence tolerance: {0:.3g}'.format(tol))
# Currently we need to make the CPHF solver somewhat more talkative to see anything at all.
cphf_verbose = logger.verbose
if logger.verbose == lib.logger.INFO:
cphf_verbose = lib.logger.DEBUG
nva, noa = Lvo[0].shape
nvb, nob = Lvo[1].shape
def fvind(zflat):
za = zflat[0, :noa*nva].reshape(nva, noa)
zb = zflat[0, -nob*nvb:].reshape(nvb, nob)
ra, rb = fock_response_uhf(mf, [za, zb], full=False)
rflat = np.hstack([ra.reshape((1, noa*nva)), rb.reshape((1, nob*nvb))])
return rflat
zvo = ucphf.solve(fvind, mf.mo_energy, mf.mo_occ, Lvo,
max_cycle=max_cycle, tol=tol, verbose=cphf_verbose)[0]
logger.info('CPHF iterations finished')
return zvo
if __name__ == '__main__':
from pyscf import gto
mol = gto.Mole()
mol.atom = [['O', (0., 0., 0.)],
['O', (1.21, 0., 0.)]]
mol.spin = 2
mol.basis = 'def2-SVP'
mol.verbose = lib.logger.INFO
mol.build()
mf = scf.UHF(mol)
mf.kernel()
with DFUMP2(mf) as pt:
pt.kernel()
natocc, _ = pt.make_natorbs()
print()
print(natocc)
|
sunqm/pyscf
|
pyscf/mp/dfump2_native.py
|
Python
|
apache-2.0
| 26,258
|
[
"PySCF"
] |
2db960ec1098263013504b6c4a44b7b74370526e63f72719e7780b6a31403198
|
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""Implementation of the Multiswarm Particle Swarm Optimization algorithm as
presented in *Blackwell, Branke, and Li, 2008, Particle Swarms for Dynamic
Optimization Problems.*
"""
import itertools
import math
import operator
import random
import numpy
try:
from itertools import imap
except:
# Python 3 nothing to do
pass
else:
map = imap
from deap import base
from deap.benchmarks import movingpeaks
from deap import creator
from deap import tools
scenario = movingpeaks.SCENARIO_2
NDIM = 5
BOUNDS = [scenario["min_coord"], scenario["max_coord"]]
mpb = movingpeaks.MovingPeaks(dim=NDIM, **scenario)
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Particle", list, fitness=creator.FitnessMax, speed=list,
best=None, bestfit=creator.FitnessMax)
creator.create("Swarm", list, best=None, bestfit=creator.FitnessMax)
def generate(pclass, dim, pmin, pmax, smin, smax):
part = pclass(random.uniform(pmin, pmax) for _ in range(dim))
part.speed = [random.uniform(smin, smax) for _ in range(dim)]
return part
def convertQuantum(swarm, rcloud, centre, dist):
dim = len(swarm[0])
for part in swarm:
position = [random.gauss(0, 1) for _ in range(dim)]
dist = math.sqrt(sum(x ** 2 for x in position))
if dist == "gaussian":
u = abs(random.gauss(0, 1.0 / 3.0))
part[:] = [(rcloud * x * u ** (1.0 / dim) / dist) + c for x, c in zip(position, centre)]
elif dist == "uvd":
u = random.random()
part[:] = [(rcloud * x * u ** (1.0 / dim) / dist) + c for x, c in zip(position, centre)]
elif dist == "nuvd":
u = abs(random.gauss(0, 1.0 / 3.0))
part[:] = [(rcloud * x * u / dist) + c for x, c in zip(position, centre)]
del part.fitness.values
del part.bestfit.values
part.best = None
return swarm
def updateParticle(part, best, chi, c):
ce1 = (c * random.uniform(0, 1) for _ in range(len(part)))
ce2 = (c * random.uniform(0, 1) for _ in range(len(part)))
ce1_p = map(operator.mul, ce1, map(operator.sub, best, part))
ce2_g = map(operator.mul, ce2, map(operator.sub, part.best, part))
a = map(operator.sub,
map(operator.mul,
itertools.repeat(chi),
map(operator.add, ce1_p, ce2_g)),
map(operator.mul,
itertools.repeat(1 - chi),
part.speed))
part.speed = list(map(operator.add, part.speed, a))
part[:] = list(map(operator.add, part, part.speed))
toolbox = base.Toolbox()
toolbox.register("particle", generate, creator.Particle, dim=NDIM,
pmin=BOUNDS[0], pmax=BOUNDS[1], smin=-(BOUNDS[1] - BOUNDS[0]) / 2.0,
smax=(BOUNDS[1] - BOUNDS[0]) / 2.0)
toolbox.register("swarm", tools.initRepeat, creator.Swarm, toolbox.particle)
toolbox.register("update", updateParticle, chi=0.729843788, c=2.05)
toolbox.register("convert", convertQuantum, dist="nuvd")
toolbox.register("evaluate", mpb)
def main(verbose=True):
NSWARMS = 1
NPARTICLES = 5
NEXCESS = 3
RCLOUD = 0.5 # 0.5 times the move severity
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "nswarm", "evals", "error", "offline_error", "avg", "max"
# Generate the initial population
population = [toolbox.swarm(n=NPARTICLES) for _ in range(NSWARMS)]
# Evaluate each particle
for swarm in population:
for part in swarm:
part.fitness.values = toolbox.evaluate(part)
# Update swarm's attractors personal best and global best
if not part.best or part.fitness > part.bestfit:
part.best = toolbox.clone(part[:]) # Get the position
part.bestfit.values = part.fitness.values # Get the fitness
if not swarm.best or part.fitness > swarm.bestfit:
swarm.best = toolbox.clone(part[:]) # Get the position
swarm.bestfit.values = part.fitness.values # Get the fitness
record = stats.compile(itertools.chain(*population))
logbook.record(gen=0, evals=mpb.nevals, nswarm=len(population),
error=mpb.currentError(), offline_error=mpb.offlineError(), **record)
if verbose:
print(logbook.stream)
generation = 1
while mpb.nevals < 5e5:
# Check for convergence
rexcl = (BOUNDS[1] - BOUNDS[0]) / (2 * len(population) ** (1.0 / NDIM))
not_converged = 0
worst_swarm_idx = None
worst_swarm = None
for i, swarm in enumerate(population):
# Compute the diameter of the swarm
for p1, p2 in itertools.combinations(swarm, 2):
d = math.sqrt(sum((x1 - x2) ** 2. for x1, x2 in zip(p1, p2)))
if d > 2 * rexcl:
not_converged += 1
# Search for the worst swarm according to its global best
if not worst_swarm or swarm.bestfit < worst_swarm.bestfit:
worst_swarm_idx = i
worst_swarm = swarm
break
# If all swarms have converged, add a swarm
if not_converged == 0:
population.append(toolbox.swarm(n=NPARTICLES))
# If too many swarms are roaming, remove the worst swarm
elif not_converged > NEXCESS:
population.pop(worst_swarm_idx)
# Update and evaluate the swarm
for swarm in population:
# Check for change
if swarm.best and toolbox.evaluate(swarm.best) != swarm.bestfit.values:
# Convert particles to quantum particles
swarm[:] = toolbox.convert(swarm, rcloud=RCLOUD, centre=swarm.best)
swarm.best = None
del swarm.bestfit.values
for part in swarm:
# Not necessary to update if it is a new swarm
# or a swarm just converted to quantum
if swarm.best and part.best:
toolbox.update(part, swarm.best)
part.fitness.values = toolbox.evaluate(part)
# Update swarm's attractors personal best and global best
if not part.best or part.fitness > part.bestfit:
part.best = toolbox.clone(part[:])
part.bestfit.values = part.fitness.values
if not swarm.best or part.fitness > swarm.bestfit:
swarm.best = toolbox.clone(part[:])
swarm.bestfit.values = part.fitness.values
record = stats.compile(itertools.chain(*population))
logbook.record(gen=generation, evals=mpb.nevals, nswarm=len(population),
error=mpb.currentError(), offline_error=mpb.offlineError(), **record)
if verbose:
print(logbook.stream)
# Apply exclusion
reinit_swarms = set()
for s1, s2 in itertools.combinations(range(len(population)), 2):
# Swarms must have a best and not already be set to reinitialize
if population[s1].best and population[s2].best and not (s1 in reinit_swarms or s2 in reinit_swarms):
dist = 0
for x1, x2 in zip(population[s1].best, population[s2].best):
dist += (x1 - x2) ** 2.
dist = math.sqrt(dist)
if dist < rexcl:
if population[s1].bestfit <= population[s2].bestfit:
reinit_swarms.add(s1)
else:
reinit_swarms.add(s2)
# Reinitialize and evaluate swarms
for s in reinit_swarms:
population[s] = toolbox.swarm(n=NPARTICLES)
for part in population[s]:
part.fitness.values = toolbox.evaluate(part)
# Update swarm's attractors personal best and global best
if not part.best or part.fitness > part.bestfit:
part.best = toolbox.clone(part[:])
part.bestfit.values = part.fitness.values
if not population[s].best or part.fitness > population[s].bestfit:
population[s].best = toolbox.clone(part[:])
population[s].bestfit.values = part.fitness.values
generation += 1
if __name__ == "__main__":
main()
|
DailyActie/Surrogate-Model
|
01-codes/deap-master/examples/pso/multiswarm.py
|
Python
|
mit
| 9,257
|
[
"Gaussian"
] |
58a7cf72310b213223e6afb99f46f4eac19edc6de353b1c4d41c7e7f8f944166
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Chromium.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
_EXCLUDED_PATHS = (
r"^breakpad[\\\/].*",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_rules.py",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_simple.py",
r"^native_client_sdk[\\\/]src[\\\/]tools[\\\/].*.mk",
r"^net[\\\/]tools[\\\/]spdyshark[\\\/].*",
r"^skia[\\\/].*",
r"^third_party[\\\/]WebKit[\\\/].*",
r"^v8[\\\/].*",
r".*MakeFile$",
r".+_autogen\.h$",
r".+[\\\/]pnacl_shim\.c$",
r"^gpu[\\\/]config[\\\/].*_list_json\.cc$",
r"^chrome[\\\/]browser[\\\/]resources[\\\/]pdf[\\\/]index.js",
)
# The NetscapePlugIn library is excluded from pan-project as it will soon
# be deleted together with the rest of the NPAPI and it's not worthwhile to
# update the coding style until then.
_TESTRUNNER_PATHS = (
r"^content[\\\/]shell[\\\/]tools[\\\/]plugin[\\\/].*",
)
# Fragment of a regular expression that matches C++ and Objective-C++
# implementation files.
_IMPLEMENTATION_EXTENSIONS = r'\.(cc|cpp|cxx|mm)$'
# Regular expression that matches code only used for test binaries
# (best effort).
_TEST_CODE_EXCLUDED_PATHS = (
r'.*[\\\/](fake_|test_|mock_).+%s' % _IMPLEMENTATION_EXTENSIONS,
r'.+_test_(base|support|util)%s' % _IMPLEMENTATION_EXTENSIONS,
r'.+_(api|browser|kif|perf|pixel|unit|ui)?test(_[a-z]+)?%s' %
_IMPLEMENTATION_EXTENSIONS,
r'.+profile_sync_service_harness%s' % _IMPLEMENTATION_EXTENSIONS,
r'.*[\\\/](test|tool(s)?)[\\\/].*',
# content_shell is used for running layout tests.
r'content[\\\/]shell[\\\/].*',
# At request of folks maintaining this folder.
r'chrome[\\\/]browser[\\\/]automation[\\\/].*',
# Non-production example code.
r'mojo[\\\/]examples[\\\/].*',
# Launcher for running iOS tests on the simulator.
r'testing[\\\/]iossim[\\\/]iossim\.mm$',
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.')
_INCLUDE_ORDER_WARNING = (
'Your #include order seems to be broken. Remember to use the right '
'collation (LC_COLLATE=C) and check\nhttps://google.github.io/styleguide/'
'cppguide.html#Names_and_Order_of_Includes')
_BANNED_OBJC_FUNCTIONS = (
(
'addTrackingRect:',
(
'The use of -[NSView addTrackingRect:owner:userData:assumeInside:] is'
'prohibited. Please use CrTrackingArea instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
r'/NSTrackingArea\W',
(
'The use of NSTrackingAreas is prohibited. Please use CrTrackingArea',
'instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'convertPointFromBase:',
(
'The use of -[NSView convertPointFromBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertPointToBase:',
(
'The use of -[NSView convertPointToBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectFromBase:',
(
'The use of -[NSView convertRectFromBase:] is almost certainly wrong.',
'Please use |convertRect:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectToBase:',
(
'The use of -[NSView convertRectToBase:] is almost certainly wrong.',
'Please use |convertRect:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeFromBase:',
(
'The use of -[NSView convertSizeFromBase:] is almost certainly wrong.',
'Please use |convertSize:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeToBase:',
(
'The use of -[NSView convertSizeToBase:] is almost certainly wrong.',
'Please use |convertSize:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
)
_BANNED_CPP_FUNCTIONS = (
# Make sure that gtest's FRIEND_TEST() macro is not used; the
# FRIEND_TEST_ALL_PREFIXES() macro from base/gtest_prod_util.h should be
# used instead since that allows for FLAKY_ and DISABLED_ prefixes.
(
'FRIEND_TEST(',
(
'Chromium code should not use gtest\'s FRIEND_TEST() macro. Include',
'base/gtest_prod_util.h and use FRIEND_TEST_ALL_PREFIXES() instead.',
),
False,
(),
),
(
'ScopedAllowIO',
(
'New code should not use ScopedAllowIO. Post a task to the blocking',
'pool or the FILE thread instead.',
),
True,
(
r"^base[\\\/]process[\\\/]process_linux\.cc$",
r"^base[\\\/]process[\\\/]process_metrics_linux\.cc$",
r"^blimp[\\\/]engine[\\\/]app[\\\/]blimp_browser_main_parts\.cc$",
r"^chrome[\\\/]browser[\\\/]chromeos[\\\/]boot_times_recorder\.cc$",
r"^chrome[\\\/]browser[\\\/]lifetime[\\\/]application_lifetime\.cc$",
r"^chrome[\\\/]browser[\\\/]chromeos[\\\/]"
"customization_document_browsertest\.cc$",
r"^components[\\\/]crash[\\\/]app[\\\/]breakpad_mac\.mm$",
r"^content[\\\/]shell[\\\/]browser[\\\/]layout_test[\\\/]" +
r"test_info_extractor\.cc$",
r"^content[\\\/].*browser(|_)test[a-zA-Z_]*\.cc$",
r"^content[\\\/]shell[\\\/]browser[\\\/]shell_browser_main\.cc$",
r"^content[\\\/]shell[\\\/]browser[\\\/]shell_message_filter\.cc$",
r"^content[\\\/]test[\\\/]ppapi[\\\/]ppapi_test\.cc$",
r"^mojo[\\\/]edk[\\\/]embedder[\\\/]" +
r"simple_platform_shared_buffer_posix\.cc$",
r"^net[\\\/]disk_cache[\\\/]cache_util\.cc$",
r"^net[\\\/]cert[\\\/]test_root_certs\.cc$",
r"^net[\\\/]test[\\\/]embedded_test_server[\\\/]" +
r"embedded_test_server\.cc$",
r"^net[\\\/]test[\\\/]spawned_test_server[\\\/]local_test_server\.cc$",
r"^net[\\\/]test[\\\/]test_data_directory\.cc$",
r"^net[\\\/]url_request[\\\/]test_url_fetcher_factory\.cc$",
r"^ui[\\\/]base[\\\/]material_design[\\\/]"
"material_design_controller\.cc$",
r"^ui[\\\/]gl[\\\/]init[\\\/]gl_initializer_mac\.cc$",
r"^ui[\\\/]gl[\\\/]init[\\\/]gl_initializer_win\.cc$",
r"^ui[\\\/]gl[\\\/]init[\\\/]gl_initializer_x11\.cc$",
r"^ui[\\\/]ozone[\\\/]platform[\\\/]drm[\\\/]host[\\\/]"
"drm_display_host_manager\.cc$",
),
),
(
'setMatrixClip',
(
'Overriding setMatrixClip() is prohibited; ',
'the base function is deprecated. ',
),
True,
(),
),
(
'SkRefPtr',
(
'The use of SkRefPtr is prohibited. ',
'Please use sk_sp<> instead.'
),
True,
(),
),
(
'SkAutoRef',
(
'The indirect use of SkRefPtr via SkAutoRef is prohibited. ',
'Please use sk_sp<> instead.'
),
True,
(),
),
(
'SkAutoTUnref',
(
'The use of SkAutoTUnref is dangerous because it implicitly ',
'converts to a raw pointer. Please use sk_sp<> instead.'
),
True,
(),
),
(
'SkAutoUnref',
(
'The indirect use of SkAutoTUnref through SkAutoUnref is dangerous ',
'because it implicitly converts to a raw pointer. ',
'Please use sk_sp<> instead.'
),
True,
(),
),
(
r'/HANDLE_EINTR\(.*close',
(
'HANDLE_EINTR(close) is invalid. If close fails with EINTR, the file',
'descriptor will be closed, and it is incorrect to retry the close.',
'Either call close directly and ignore its return value, or wrap close',
'in IGNORE_EINTR to use its return value. See http://crbug.com/269623'
),
True,
(),
),
(
r'/IGNORE_EINTR\((?!.*close)',
(
'IGNORE_EINTR is only valid when wrapping close. To wrap other system',
'calls, use HANDLE_EINTR. See http://crbug.com/269623',
),
True,
(
# Files that #define IGNORE_EINTR.
r'^base[\\\/]posix[\\\/]eintr_wrapper\.h$',
r'^ppapi[\\\/]tests[\\\/]test_broker\.cc$',
),
),
(
r'/v8::Extension\(',
(
'Do not introduce new v8::Extensions into the code base, use',
'gin::Wrappable instead. See http://crbug.com/334679',
),
True,
(
r'extensions[\\\/]renderer[\\\/]safe_builtins\.*',
),
),
(
'#pragma comment(lib,',
(
'Specify libraries to link with in build files and not in the source.',
),
True,
(),
),
)
_IPC_ENUM_TRAITS_DEPRECATED = (
'You are using IPC_ENUM_TRAITS() in your code. It has been deprecated.\n'
'See http://www.chromium.org/Home/chromium-security/education/security-tips-for-ipc')
_VALID_OS_MACROS = (
# Please keep sorted.
'OS_ANDROID',
'OS_BSD',
'OS_CAT', # For testing.
'OS_CHROMEOS',
'OS_FREEBSD',
'OS_IOS',
'OS_LINUX',
'OS_MACOSX',
'OS_NACL',
'OS_NACL_NONSFI',
'OS_NACL_SFI',
'OS_NETBSD',
'OS_OPENBSD',
'OS_POSIX',
'OS_QNX',
'OS_SOLARIS',
'OS_WIN',
)
_ANDROID_SPECIFIC_PYDEPS_FILES = [
'build/android/test_runner.pydeps',
'net/tools/testserver/testserver.pydeps',
]
_GENERIC_PYDEPS_FILES = [
'build/secondary/tools/swarming_client/isolate.pydeps',
]
_ALL_PYDEPS_FILES = _ANDROID_SPECIFIC_PYDEPS_FILES + _GENERIC_PYDEPS_FILES
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files and the like, as the declaration of
# for-testing functions in header files are hard to distinguish from
# calls to such functions without a proper C++ parser.
file_inclusion_pattern = r'.+%s' % _IMPLEMENTATION_EXTENSIONS
base_function_pattern = r'[ :]test::[^\s]+|ForTest(s|ing)?|for_test(s|ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
comment_pattern = input_api.re.compile(r'//.*(%s)' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
for line_number, line in f.ChangedContents():
if (inclusion_pattern.search(line) and
not comment_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
if problems:
return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CheckNoIOStreamInHeaders(input_api, output_api):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static '
'initialization into every file including the header. Instead, '
'#include <ostream>. See http://crbug.com/94794',
files) ]
return []
def _CheckNoUNIT_TESTInSourceFiles(input_api, output_api):
"""Checks to make sure no source files use UNIT_TEST."""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.mm'))):
continue
for line_num, line in f.ChangedContents():
if 'UNIT_TEST ' in line or line.endswith('UNIT_TEST'):
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('UNIT_TEST is only for headers.\n' +
'\n'.join(problems))]
def _CheckDCHECK_IS_ONHasBraces(input_api, output_api):
"""Checks to make sure DCHECK_IS_ON() does not skip the braces."""
errors = []
pattern = input_api.re.compile(r'DCHECK_IS_ON(?!\(\))',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if (not f.LocalPath().endswith(('.cc', '.mm', '.h'))):
continue
for lnum, line in f.ChangedContents():
if input_api.re.search(pattern, line):
errors.append(output_api.PresubmitError(
('%s:%d: Use of DCHECK_IS_ON() must be written as "#if ' +
'DCHECK_IS_ON()", not forgetting the braces.')
% (f.LocalPath(), lnum)))
return errors
def _FindHistogramNameInLine(histogram_name, line):
"""Tries to find a histogram name or prefix in a line."""
if not "affected-histogram" in line:
return histogram_name in line
# A histogram_suffixes tag type has an affected-histogram name as a prefix of
# the histogram_name.
if not '"' in line:
return False
histogram_prefix = line.split('\"')[1]
return histogram_prefix in histogram_name
def _CheckUmaHistogramChanges(input_api, output_api):
"""Check that UMA histogram names in touched lines can still be found in other
lines of the patch or in histograms.xml. Note that this check would not catch
the reverse: changes in histograms.xml not matched in the code itself."""
touched_histograms = []
histograms_xml_modifications = []
pattern = input_api.re.compile('UMA_HISTOGRAM.*\("(.*)"')
for f in input_api.AffectedFiles():
# If histograms.xml itself is modified, keep the modified lines for later.
if f.LocalPath().endswith(('histograms.xml')):
histograms_xml_modifications = f.ChangedContents()
continue
if not f.LocalPath().endswith(('cc', 'mm', 'cpp')):
continue
for line_num, line in f.ChangedContents():
found = pattern.search(line)
if found:
touched_histograms.append([found.group(1), f, line_num])
# Search for the touched histogram names in the local modifications to
# histograms.xml, and, if not found, on the base histograms.xml file.
unmatched_histograms = []
for histogram_info in touched_histograms:
histogram_name_found = False
for line_num, line in histograms_xml_modifications:
histogram_name_found = _FindHistogramNameInLine(histogram_info[0], line)
if histogram_name_found:
break
if not histogram_name_found:
unmatched_histograms.append(histogram_info)
histograms_xml_path = 'tools/metrics/histograms/histograms.xml'
problems = []
if unmatched_histograms:
with open(histograms_xml_path) as histograms_xml:
for histogram_name, f, line_num in unmatched_histograms:
histograms_xml.seek(0)
histogram_name_found = False
for line in histograms_xml:
histogram_name_found = _FindHistogramNameInLine(histogram_name, line)
if histogram_name_found:
break
if not histogram_name_found:
problems.append(' [%s:%d] %s' %
(f.LocalPath(), line_num, histogram_name))
if not problems:
return []
return [output_api.PresubmitPromptWarning('Some UMA_HISTOGRAM lines have '
'been modified and the associated histogram name has no match in either '
'%s or the modifications of it:' % (histograms_xml_path), problems)]
def _CheckFlakyTestUsage(input_api, output_api):
"""Check that FlakyTest annotation is our own instead of the android one"""
pattern = input_api.re.compile(r'import android.test.FlakyTest;')
files = []
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if f.LocalPath().endswith('Test.java'):
if pattern.search(input_api.ReadFile(f)):
files.append(f)
if len(files):
return [output_api.PresubmitError(
'Use org.chromium.base.test.util.FlakyTest instead of '
'android.test.FlakyTest',
files)]
return []
def _CheckNoNewWStrings(input_api, output_api):
"""Checks to make sure we don't introduce use of wstrings."""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.h')) or
f.LocalPath().endswith(('test.cc', '_win.cc', '_win.h')) or
'/win/' in f.LocalPath() or
'chrome_elf' in f.LocalPath() or
'install_static' in f.LocalPath()):
continue
allowWString = False
for line_num, line in f.ChangedContents():
if 'presubmit: allow wstring' in line:
allowWString = True
elif not allowWString and 'wstring' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
allowWString = False
else:
allowWString = False
if not problems:
return []
return [output_api.PresubmitPromptWarning('New code should not use wstrings.'
' If you are calling a cross-platform API that accepts a wstring, '
'fix the API.\n' +
'\n'.join(problems))]
def _CheckNoDEPSGIT(input_api, output_api):
"""Make sure .DEPS.git is never modified manually."""
if any(f.LocalPath().endswith('.DEPS.git') for f in
input_api.AffectedFiles()):
return [output_api.PresubmitError(
'Never commit changes to .DEPS.git. This file is maintained by an\n'
'automated system based on what\'s in DEPS and your changes will be\n'
'overwritten.\n'
'See https://sites.google.com/a/chromium.org/dev/developers/how-tos/get-the-code#Rolling_DEPS\n'
'for more information')]
return []
def _CheckValidHostsInDEPS(input_api, output_api):
"""Checks that DEPS file deps are from allowed_hosts."""
# Run only if DEPS file has been modified to annoy fewer bystanders.
if all(f.LocalPath() != 'DEPS' for f in input_api.AffectedFiles()):
return []
# Outsource work to gclient verify
try:
input_api.subprocess.check_output(['gclient', 'verify'])
return []
except input_api.subprocess.CalledProcessError, error:
return [output_api.PresubmitError(
'DEPS file must have only git dependencies.',
long_text=error.output)]
def _CheckNoBannedFunctions(input_api, output_api):
"""Make sure that banned functions are not used."""
warnings = []
errors = []
def IsBlacklisted(affected_file, blacklist):
local_path = affected_file.LocalPath()
for item in blacklist:
if input_api.re.match(item, local_path):
return True
return False
def CheckForMatch(affected_file, line_num, line, func_name, message, error):
matched = False
if func_name[0:1] == '/':
regex = func_name[1:]
if input_api.re.search(regex, line):
matched = True
elif func_name in line:
matched = True
if matched:
problems = warnings
if error:
problems = errors
problems.append(' %s:%d:' % (affected_file.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
file_filter = lambda f: f.LocalPath().endswith(('.mm', '.m', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error in _BANNED_OBJC_FUNCTIONS:
CheckForMatch(f, line_num, line, func_name, message, error)
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.mm', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error, excluded_paths in _BANNED_CPP_FUNCTIONS:
if IsBlacklisted(f, excluded_paths):
continue
CheckForMatch(f, line_num, line, func_name, message, error)
result = []
if (warnings):
result.append(output_api.PresubmitPromptWarning(
'Banned functions were used.\n' + '\n'.join(warnings)))
if (errors):
result.append(output_api.PresubmitError(
'Banned functions were used.\n' + '\n'.join(errors)))
return result
def _CheckNoPragmaOnce(input_api, output_api):
"""Make sure that banned functions are not used."""
files = []
pattern = input_api.re.compile(r'^#pragma\s+once',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if files:
return [output_api.PresubmitError(
'Do not use #pragma once in header files.\n'
'See http://www.chromium.org/developers/coding-style#TOC-File-headers',
files)]
return []
def _CheckNoTrinaryTrueFalse(input_api, output_api):
"""Checks to make sure we don't introduce use of foo ? true : false."""
problems = []
pattern = input_api.re.compile(r'\?\s*(true|false)\s*:\s*(true|false)')
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.cc', '.h', '.inl', '.m', '.mm')):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning(
'Please consider avoiding the "? true : false" pattern if possible.\n' +
'\n'.join(problems))]
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
import sys
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'buildtools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(input_api.PresubmitLocalPath())
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckFilePermissions(input_api, output_api):
"""Check that all files have their permissions properly set."""
if input_api.platform == 'win32':
return []
checkperms_tool = input_api.os_path.join(
input_api.PresubmitLocalPath(),
'tools', 'checkperms', 'checkperms.py')
args = [input_api.python_executable, checkperms_tool,
'--root', input_api.change.RepositoryRoot()]
for f in input_api.AffectedFiles():
args += ['--file', f.LocalPath()]
try:
input_api.subprocess.check_output(args)
return []
except input_api.subprocess.CalledProcessError as error:
return [output_api.PresubmitError(
'checkperms.py failed:',
long_text=error.output)]
def _CheckNoAuraWindowPropertyHInHeaders(input_api, output_api):
"""Makes sure we don't include ui/aura/window_property.h
in header files.
"""
pattern = input_api.re.compile(r'^#include\s*"ui/aura/window_property.h"')
errors = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('.h'):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d' % (f.LocalPath(), line_num))
results = []
if errors:
results.append(output_api.PresubmitError(
'Header files should not include ui/aura/window_property.h', errors))
return results
def _CheckIncludeOrderForScope(scope, input_api, file_path, changed_linenums):
"""Checks that the lines in scope occur in the right order.
1. C system files in alphabetical order
2. C++ system files in alphabetical order
3. Project's .h files
"""
c_system_include_pattern = input_api.re.compile(r'\s*#include <.*\.h>')
cpp_system_include_pattern = input_api.re.compile(r'\s*#include <.*>')
custom_include_pattern = input_api.re.compile(r'\s*#include ".*')
C_SYSTEM_INCLUDES, CPP_SYSTEM_INCLUDES, CUSTOM_INCLUDES = range(3)
state = C_SYSTEM_INCLUDES
previous_line = ''
previous_line_num = 0
problem_linenums = []
out_of_order = " - line belongs before previous line"
for line_num, line in scope:
if c_system_include_pattern.match(line):
if state != C_SYSTEM_INCLUDES:
problem_linenums.append((line_num, previous_line_num,
" - C system include file in wrong block"))
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num,
out_of_order))
elif cpp_system_include_pattern.match(line):
if state == C_SYSTEM_INCLUDES:
state = CPP_SYSTEM_INCLUDES
elif state == CUSTOM_INCLUDES:
problem_linenums.append((line_num, previous_line_num,
" - c++ system include file in wrong block"))
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num, out_of_order))
elif custom_include_pattern.match(line):
if state != CUSTOM_INCLUDES:
state = CUSTOM_INCLUDES
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num, out_of_order))
else:
problem_linenums.append((line_num, previous_line_num,
"Unknown include type"))
previous_line = line
previous_line_num = line_num
warnings = []
for (line_num, previous_line_num, failure_type) in problem_linenums:
if line_num in changed_linenums or previous_line_num in changed_linenums:
warnings.append(' %s:%d:%s' % (file_path, line_num, failure_type))
return warnings
def _CheckIncludeOrderInFile(input_api, f, changed_linenums):
"""Checks the #include order for the given file f."""
system_include_pattern = input_api.re.compile(r'\s*#include \<.*')
# Exclude the following includes from the check:
# 1) #include <.../...>, e.g., <sys/...> includes often need to appear in a
# specific order.
# 2) <atlbase.h>, "build/build_config.h"
excluded_include_pattern = input_api.re.compile(
r'\s*#include (\<.*/.*|\<atlbase\.h\>|"build/build_config.h")')
custom_include_pattern = input_api.re.compile(r'\s*#include "(?P<FILE>.*)"')
# Match the final or penultimate token if it is xxxtest so we can ignore it
# when considering the special first include.
test_file_tag_pattern = input_api.re.compile(
r'_[a-z]+test(?=(_[a-zA-Z0-9]+)?\.)')
if_pattern = input_api.re.compile(
r'\s*#\s*(if|elif|else|endif|define|undef).*')
# Some files need specialized order of includes; exclude such files from this
# check.
uncheckable_includes_pattern = input_api.re.compile(
r'\s*#include '
'("ipc/.*macros\.h"|<windows\.h>|".*gl.*autogen.h")\s*')
contents = f.NewContents()
warnings = []
line_num = 0
# Handle the special first include. If the first include file is
# some/path/file.h, the corresponding including file can be some/path/file.cc,
# some/other/path/file.cc, some/path/file_platform.cc, some/path/file-suffix.h
# etc. It's also possible that no special first include exists.
# If the included file is some/path/file_platform.h the including file could
# also be some/path/file_xxxtest_platform.h.
including_file_base_name = test_file_tag_pattern.sub(
'', input_api.os_path.basename(f.LocalPath()))
for line in contents:
line_num += 1
if system_include_pattern.match(line):
# No special first include -> process the line again along with normal
# includes.
line_num -= 1
break
match = custom_include_pattern.match(line)
if match:
match_dict = match.groupdict()
header_basename = test_file_tag_pattern.sub(
'', input_api.os_path.basename(match_dict['FILE'])).replace('.h', '')
if header_basename not in including_file_base_name:
# No special first include -> process the line again along with normal
# includes.
line_num -= 1
break
# Split into scopes: Each region between #if and #endif is its own scope.
scopes = []
current_scope = []
for line in contents[line_num:]:
line_num += 1
if uncheckable_includes_pattern.match(line):
continue
if if_pattern.match(line):
scopes.append(current_scope)
current_scope = []
elif ((system_include_pattern.match(line) or
custom_include_pattern.match(line)) and
not excluded_include_pattern.match(line)):
current_scope.append((line_num, line))
scopes.append(current_scope)
for scope in scopes:
warnings.extend(_CheckIncludeOrderForScope(scope, input_api, f.LocalPath(),
changed_linenums))
return warnings
def _CheckIncludeOrder(input_api, output_api):
"""Checks that the #include order is correct.
1. The corresponding header for source files.
2. C system files in alphabetical order
3. C++ system files in alphabetical order
4. Project's .h files in alphabetical order
Each region separated by #if, #elif, #else, #endif, #define and #undef follows
these rules separately.
"""
def FileFilterIncludeOrder(affected_file):
black_list = (_EXCLUDED_PATHS + input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(affected_file, black_list=black_list)
warnings = []
for f in input_api.AffectedFiles(file_filter=FileFilterIncludeOrder):
if f.LocalPath().endswith(('.cc', '.h', '.mm')):
changed_linenums = set(line_num for line_num, _ in f.ChangedContents())
warnings.extend(_CheckIncludeOrderInFile(input_api, f, changed_linenums))
results = []
if warnings:
results.append(output_api.PresubmitPromptOrNotify(_INCLUDE_ORDER_WARNING,
warnings))
return results
def _CheckForVersionControlConflictsInFile(input_api, f):
pattern = input_api.re.compile('^(?:<<<<<<<|>>>>>>>) |^=======$')
errors = []
for line_num, line in f.ChangedContents():
if f.LocalPath().endswith('.md'):
# First-level headers in markdown look a lot like version control
# conflict markers. http://daringfireball.net/projects/markdown/basics
continue
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckForVersionControlConflicts(input_api, output_api):
"""Usually this is not intentional and will cause a compile failure."""
errors = []
for f in input_api.AffectedFiles():
errors.extend(_CheckForVersionControlConflictsInFile(input_api, f))
results = []
if errors:
results.append(output_api.PresubmitError(
'Version control conflict markers found, please resolve.', errors))
return results
def _CheckHardcodedGoogleHostsInLowerLayers(input_api, output_api):
def FilterFile(affected_file):
"""Filter function for use with input_api.AffectedSourceFiles,
below. This filters out everything except non-test files from
top-level directories that generally speaking should not hard-code
service URLs (e.g. src/android_webview/, src/content/ and others).
"""
return input_api.FilterSourceFile(
affected_file,
white_list=(r'^(android_webview|base|content|net)[\\\/].*', ),
black_list=(_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST))
base_pattern = ('"[^"]*(google|googleapis|googlezip|googledrive|appspot)'
'\.(com|net)[^"]*"')
comment_pattern = input_api.re.compile('//.*%s' % base_pattern)
pattern = input_api.re.compile(base_pattern)
problems = [] # items are (filename, line_number, line)
for f in input_api.AffectedSourceFiles(FilterFile):
for line_num, line in f.ChangedContents():
if not comment_pattern.search(line) and pattern.search(line):
problems.append((f.LocalPath(), line_num, line))
if problems:
return [output_api.PresubmitPromptOrNotify(
'Most layers below src/chrome/ should not hardcode service URLs.\n'
'Are you sure this is correct?',
[' %s:%d: %s' % (
problem[0], problem[1], problem[2]) for problem in problems])]
else:
return []
def _CheckNoAbbreviationInPngFileName(input_api, output_api):
"""Makes sure there are no abbreviations in the name of PNG files.
The native_client_sdk directory is excluded because it has auto-generated PNG
files for documentation.
"""
errors = []
white_list = (r'.*_[a-z]_.*\.png$|.*_[a-z]\.png$',)
black_list = (r'^native_client_sdk[\\\/]',)
file_filter = lambda f: input_api.FilterSourceFile(
f, white_list=white_list, black_list=black_list)
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=file_filter):
errors.append(' %s' % f.LocalPath())
results = []
if errors:
results.append(output_api.PresubmitError(
'The name of PNG files should not have abbreviations. \n'
'Use _hover.png, _center.png, instead of _h.png, _c.png.\n'
'Contact oshima@chromium.org if you have questions.', errors))
return results
def _FilesToCheckForIncomingDeps(re, changed_lines):
"""Helper method for _CheckAddedDepsHaveTargetApprovals. Returns
a set of DEPS entries that we should look up.
For a directory (rather than a specific filename) we fake a path to
a specific filename by adding /DEPS. This is chosen as a file that
will seldom or never be subject to per-file include_rules.
"""
# We ignore deps entries on auto-generated directories.
AUTO_GENERATED_DIRS = ['grit', 'jni']
# This pattern grabs the path without basename in the first
# parentheses, and the basename (if present) in the second. It
# relies on the simple heuristic that if there is a basename it will
# be a header file ending in ".h".
pattern = re.compile(
r"""['"]\+([^'"]+?)(/[a-zA-Z0-9_]+\.h)?['"].*""")
results = set()
for changed_line in changed_lines:
m = pattern.match(changed_line)
if m:
path = m.group(1)
if path.split('/')[0] not in AUTO_GENERATED_DIRS:
if m.group(2):
results.add('%s%s' % (path, m.group(2)))
else:
results.add('%s/DEPS' % path)
return results
def _CheckAddedDepsHaveTargetApprovals(input_api, output_api):
"""When a dependency prefixed with + is added to a DEPS file, we
want to make sure that the change is reviewed by an OWNER of the
target file or directory, to avoid layering violations from being
introduced. This check verifies that this happens.
"""
changed_lines = set()
file_filter = lambda f: not input_api.re.match(
r"^third_party[\\\/]WebKit[\\\/].*", f.LocalPath())
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=file_filter):
filename = input_api.os_path.basename(f.LocalPath())
if filename == 'DEPS':
changed_lines |= set(line.strip()
for line_num, line
in f.ChangedContents())
if not changed_lines:
return []
virtual_depended_on_files = _FilesToCheckForIncomingDeps(input_api.re,
changed_lines)
if not virtual_depended_on_files:
return []
if input_api.is_committing:
if input_api.tbr:
return [output_api.PresubmitNotifyResult(
'--tbr was specified, skipping OWNERS check for DEPS additions')]
if input_api.dry_run:
return [output_api.PresubmitNotifyResult(
'This is a dry run, skipping OWNERS check for DEPS additions')]
if not input_api.change.issue:
return [output_api.PresubmitError(
"DEPS approval by OWNERS check failed: this change has "
"no Rietveld issue number, so we can't check it for approvals.")]
output = output_api.PresubmitError
else:
output = output_api.PresubmitNotifyResult
owners_db = input_api.owners_db
owner_email, reviewers = (
input_api.canned_checks.GetCodereviewOwnerAndReviewers(
input_api,
owners_db.email_regexp,
approval_needed=input_api.is_committing))
owner_email = owner_email or input_api.change.author_email
reviewers_plus_owner = set(reviewers)
if owner_email:
reviewers_plus_owner.add(owner_email)
missing_files = owners_db.files_not_covered_by(virtual_depended_on_files,
reviewers_plus_owner)
# We strip the /DEPS part that was added by
# _FilesToCheckForIncomingDeps to fake a path to a file in a
# directory.
def StripDeps(path):
start_deps = path.rfind('/DEPS')
if start_deps != -1:
return path[:start_deps]
else:
return path
unapproved_dependencies = ["'+%s'," % StripDeps(path)
for path in missing_files]
if unapproved_dependencies:
output_list = [
output('You need LGTM from owners of depends-on paths in DEPS that were '
'modified in this CL:\n %s' %
'\n '.join(sorted(unapproved_dependencies)))]
suggested_owners = owners_db.reviewers_for(missing_files, owner_email)
output_list.append(output(
'Suggested missing target path OWNERS:\n %s' %
'\n '.join(suggested_owners or [])))
return output_list
return []
def _CheckSpamLogging(input_api, output_api):
file_inclusion_pattern = r'.+%s' % _IMPLEMENTATION_EXTENSIONS
black_list = (_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST +
(r"^base[\\\/]logging\.h$",
r"^base[\\\/]logging\.cc$",
r"^chrome[\\\/]app[\\\/]chrome_main_delegate\.cc$",
r"^chrome[\\\/]browser[\\\/]chrome_browser_main\.cc$",
r"^chrome[\\\/]browser[\\\/]ui[\\\/]startup[\\\/]"
r"startup_browser_creator\.cc$",
r"^chrome[\\\/]installer[\\\/]setup[\\\/].*",
r"chrome[\\\/]browser[\\\/]diagnostics[\\\/]" +
r"diagnostics_writer\.cc$",
r"^chrome_elf[\\\/]dll_hash[\\\/]dll_hash_main\.cc$",
r"^chromecast[\\\/]",
r"^cloud_print[\\\/]",
r"^components[\\\/]html_viewer[\\\/]"
r"web_test_delegate_impl\.cc$",
# TODO(peter): Remove this exception. https://crbug.com/534537
r"^content[\\\/]browser[\\\/]notifications[\\\/]"
r"notification_event_dispatcher_impl\.cc$",
r"^content[\\\/]common[\\\/]gpu[\\\/]client[\\\/]"
r"gl_helper_benchmark\.cc$",
r"^courgette[\\\/]courgette_minimal_tool\.cc$",
r"^courgette[\\\/]courgette_tool\.cc$",
r"^extensions[\\\/]renderer[\\\/]logging_native_handler\.cc$",
r"^ipc[\\\/]ipc_logging\.cc$",
r"^native_client_sdk[\\\/]",
r"^remoting[\\\/]base[\\\/]logging\.h$",
r"^remoting[\\\/]host[\\\/].*",
r"^sandbox[\\\/]linux[\\\/].*",
r"^tools[\\\/]",
r"^ui[\\\/]aura[\\\/]bench[\\\/]bench_main\.cc$",
r"^ui[\\\/]ozone[\\\/]platform[\\\/]cast[\\\/]",
r"^storage[\\\/]browser[\\\/]fileapi[\\\/]" +
r"dump_file_system.cc$",))
source_file_filter = lambda x: input_api.FilterSourceFile(
x, white_list=(file_inclusion_pattern,), black_list=black_list)
log_info = []
printf = []
for f in input_api.AffectedSourceFiles(source_file_filter):
contents = input_api.ReadFile(f, 'rb')
if input_api.re.search(r"\bD?LOG\s*\(\s*INFO\s*\)", contents):
log_info.append(f.LocalPath())
elif input_api.re.search(r"\bD?LOG_IF\s*\(\s*INFO\s*,", contents):
log_info.append(f.LocalPath())
if input_api.re.search(r"\bprintf\(", contents):
printf.append(f.LocalPath())
elif input_api.re.search(r"\bfprintf\((stdout|stderr)", contents):
printf.append(f.LocalPath())
if log_info:
return [output_api.PresubmitError(
'These files spam the console log with LOG(INFO):',
items=log_info)]
if printf:
return [output_api.PresubmitError(
'These files spam the console log with printf/fprintf:',
items=printf)]
return []
def _CheckForAnonymousVariables(input_api, output_api):
"""These types are all expected to hold locks while in scope and
so should never be anonymous (which causes them to be immediately
destroyed)."""
they_who_must_be_named = [
'base::AutoLock',
'base::AutoReset',
'base::AutoUnlock',
'SkAutoAlphaRestore',
'SkAutoBitmapShaderInstall',
'SkAutoBlitterChoose',
'SkAutoBounderCommit',
'SkAutoCallProc',
'SkAutoCanvasRestore',
'SkAutoCommentBlock',
'SkAutoDescriptor',
'SkAutoDisableDirectionCheck',
'SkAutoDisableOvalCheck',
'SkAutoFree',
'SkAutoGlyphCache',
'SkAutoHDC',
'SkAutoLockColors',
'SkAutoLockPixels',
'SkAutoMalloc',
'SkAutoMaskFreeImage',
'SkAutoMutexAcquire',
'SkAutoPathBoundsUpdate',
'SkAutoPDFRelease',
'SkAutoRasterClipValidate',
'SkAutoRef',
'SkAutoTime',
'SkAutoTrace',
'SkAutoUnref',
]
anonymous = r'(%s)\s*[({]' % '|'.join(they_who_must_be_named)
# bad: base::AutoLock(lock.get());
# not bad: base::AutoLock lock(lock.get());
bad_pattern = input_api.re.compile(anonymous)
# good: new base::AutoLock(lock.get())
good_pattern = input_api.re.compile(r'\bnew\s*' + anonymous)
errors = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.cc', '.h', '.inl', '.m', '.mm')):
continue
for linenum, line in f.ChangedContents():
if bad_pattern.search(line) and not good_pattern.search(line):
errors.append('%s:%d' % (f.LocalPath(), linenum))
if errors:
return [output_api.PresubmitError(
'These lines create anonymous variables that need to be named:',
items=errors)]
return []
def _CheckCygwinShell(input_api, output_api):
source_file_filter = lambda x: input_api.FilterSourceFile(
x, white_list=(r'.+\.(gyp|gypi)$',))
cygwin_shell = []
for f in input_api.AffectedSourceFiles(source_file_filter):
for linenum, line in f.ChangedContents():
if 'msvs_cygwin_shell' in line:
cygwin_shell.append(f.LocalPath())
break
if cygwin_shell:
return [output_api.PresubmitError(
'These files should not use msvs_cygwin_shell (the default is 0):',
items=cygwin_shell)]
return []
def _CheckUserActionUpdate(input_api, output_api):
"""Checks if any new user action has been added."""
if any('actions.xml' == input_api.os_path.basename(f) for f in
input_api.LocalPaths()):
# If actions.xml is already included in the changelist, the PRESUBMIT
# for actions.xml will do a more complete presubmit check.
return []
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.mm'))
action_re = r'[^a-zA-Z]UserMetricsAction\("([^"]*)'
current_actions = None
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
match = input_api.re.search(action_re, line)
if match:
# Loads contents in tools/metrics/actions/actions.xml to memory. It's
# loaded only once.
if not current_actions:
with open('tools/metrics/actions/actions.xml') as actions_f:
current_actions = actions_f.read()
# Search for the matched user action name in |current_actions|.
for action_name in match.groups():
action = 'name="{0}"'.format(action_name)
if action not in current_actions:
return [output_api.PresubmitPromptWarning(
'File %s line %d: %s is missing in '
'tools/metrics/actions/actions.xml. Please run '
'tools/metrics/actions/extract_actions.py to update.'
% (f.LocalPath(), line_num, action_name))]
return []
def _GetJSONParseError(input_api, filename, eat_comments=True):
try:
contents = input_api.ReadFile(filename)
if eat_comments:
import sys
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(),
'tools', 'json_comment_eater')]
import json_comment_eater
finally:
sys.path = original_sys_path
contents = json_comment_eater.Nom(contents)
input_api.json.loads(contents)
except ValueError as e:
return e
return None
def _GetIDLParseError(input_api, filename):
try:
contents = input_api.ReadFile(filename)
idl_schema = input_api.os_path.join(
input_api.PresubmitLocalPath(),
'tools', 'json_schema_compiler', 'idl_schema.py')
process = input_api.subprocess.Popen(
[input_api.python_executable, idl_schema],
stdin=input_api.subprocess.PIPE,
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE,
universal_newlines=True)
(_, error) = process.communicate(input=contents)
return error or None
except ValueError as e:
return e
def _CheckParseErrors(input_api, output_api):
"""Check that IDL and JSON files do not contain syntax errors."""
actions = {
'.idl': _GetIDLParseError,
'.json': _GetJSONParseError,
}
# These paths contain test data and other known invalid JSON files.
excluded_patterns = [
r'test[\\\/]data[\\\/]',
r'^components[\\\/]policy[\\\/]resources[\\\/]policy_templates\.json$',
]
# Most JSON files are preprocessed and support comments, but these do not.
json_no_comments_patterns = [
r'^testing[\\\/]',
]
# Only run IDL checker on files in these directories.
idl_included_patterns = [
r'^chrome[\\\/]common[\\\/]extensions[\\\/]api[\\\/]',
r'^extensions[\\\/]common[\\\/]api[\\\/]',
]
def get_action(affected_file):
filename = affected_file.LocalPath()
return actions.get(input_api.os_path.splitext(filename)[1])
def MatchesFile(patterns, path):
for pattern in patterns:
if input_api.re.search(pattern, path):
return True
return False
def FilterFile(affected_file):
action = get_action(affected_file)
if not action:
return False
path = affected_file.LocalPath()
if MatchesFile(excluded_patterns, path):
return False
if (action == _GetIDLParseError and
not MatchesFile(idl_included_patterns, path)):
return False
return True
results = []
for affected_file in input_api.AffectedFiles(
file_filter=FilterFile, include_deletes=False):
action = get_action(affected_file)
kwargs = {}
if (action == _GetJSONParseError and
MatchesFile(json_no_comments_patterns, affected_file.LocalPath())):
kwargs['eat_comments'] = False
parse_error = action(input_api,
affected_file.AbsoluteLocalPath(),
**kwargs)
if parse_error:
results.append(output_api.PresubmitError('%s could not be parsed: %s' %
(affected_file.LocalPath(), parse_error)))
return results
def _CheckJavaStyle(input_api, output_api):
"""Runs checkstyle on changed java files and returns errors if any exist."""
import sys
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools', 'android', 'checkstyle')]
import checkstyle
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
return checkstyle.RunCheckstyle(
input_api, output_api, 'tools/android/checkstyle/chromium-style-5.0.xml',
black_list=_EXCLUDED_PATHS + input_api.DEFAULT_BLACK_LIST)
def _CheckIpcOwners(input_api, output_api):
"""Checks that affected files involving IPC have an IPC OWNERS rule.
Whether or not a file affects IPC is determined by a simple whitelist of
filename patterns."""
file_patterns = [
'*_messages.cc',
'*_messages*.h',
'*_param_traits*.*',
'*.mojom',
'*_struct_traits*.*',
'*_type_converter*.*',
# Blink uses a different file naming convention
'*StructTraits*.*',
'*TypeConverter*.*',
]
# Dictionary mapping an OWNERS file path to Patterns.
# Patterns is a dictionary mapping glob patterns (suitable for use in per-file
# rules ) to a PatternEntry.
# PatternEntry is a dictionary with two keys:
# - 'files': the files that are matched by this pattern
# - 'rules': the per-file rules needed for this pattern
# For example, if we expect OWNERS file to contain rules for *.mojom and
# *_struct_traits*.*, Patterns might look like this:
# {
# '*.mojom': {
# 'files': ...,
# 'rules': [
# 'per-file *.mojom=set noparent',
# 'per-file *.mojom=file://ipc/SECURITY_OWNERS',
# ],
# },
# '*_struct_traits*.*': {
# 'files': ...,
# 'rules': [
# 'per-file *_struct_traits*.*=set noparent',
# 'per-file *_struct_traits*.*=file://ipc/SECURITY_OWNERS',
# ],
# },
# }
to_check = {}
# Iterate through the affected files to see what we actually need to check
# for. We should only nag patch authors about per-file rules if a file in that
# directory would match that pattern. If a directory only contains *.mojom
# files and no *_messages*.h files, we should only nag about rules for
# *.mojom files.
for f in input_api.change.AffectedFiles(include_deletes=False):
for pattern in file_patterns:
if input_api.fnmatch.fnmatch(
input_api.os_path.basename(f.LocalPath()), pattern):
owners_file = input_api.os_path.join(
input_api.os_path.dirname(f.LocalPath()), 'OWNERS')
if owners_file not in to_check:
to_check[owners_file] = {}
if pattern not in to_check[owners_file]:
to_check[owners_file][pattern] = {
'files': [],
'rules': [
'per-file %s=set noparent' % pattern,
'per-file %s=file://ipc/SECURITY_OWNERS' % pattern,
]
}
to_check[owners_file][pattern]['files'].append(f)
break
# Now go through the OWNERS files we collected, filtering out rules that are
# already present in that OWNERS file.
for owners_file, patterns in to_check.iteritems():
try:
with file(owners_file) as f:
lines = set(f.read().splitlines())
for entry in patterns.itervalues():
entry['rules'] = [rule for rule in entry['rules'] if rule not in lines
]
except IOError:
# No OWNERS file, so all the rules are definitely missing.
continue
# All the remaining lines weren't found in OWNERS files, so emit an error.
errors = []
for owners_file, patterns in to_check.iteritems():
missing_lines = []
files = []
for pattern, entry in patterns.iteritems():
missing_lines.extend(entry['rules'])
files.extend([' %s' % f.LocalPath() for f in entry['files']])
if missing_lines:
errors.append(
'%s is missing the following lines:\n\n%s\n\nfor changed files:\n%s' %
(owners_file, '\n'.join(missing_lines), '\n'.join(files)))
results = []
if errors:
if input_api.is_committing:
output = output_api.PresubmitError
else:
output = output_api.PresubmitPromptWarning
results.append(output(
'Found changes to IPC files without a security OWNER!',
long_text='\n\n'.join(errors)))
return results
def _CheckMojoUsesNewWrapperTypes(input_api, output_api):
"""Checks to make sure that all newly added mojom targets map array/map/string
to STL (for chromium) or WTF (for blink) types.
TODO(yzshen): remove this check once crbug.com/624136 is completed.
"""
files = []
pattern = input_api.re.compile(r'use_new_wrapper_types.*false',
input_api.re.MULTILINE)
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.gyp', '.gypi', 'gn', 'gni')):
continue
for _, line in f.ChangedContents():
if pattern.search(line):
files.append(f)
break
if len(files):
return [output_api.PresubmitError(
'Do not introduce new mojom targets with use_new_wrapper_types set to '
'false. The mode is deprecated and will be removed soon.',
files)]
return []
def _CheckAndroidToastUsage(input_api, output_api):
"""Checks that code uses org.chromium.ui.widget.Toast instead of
android.widget.Toast (Chromium Toast doesn't force hardware
acceleration on low-end devices, saving memory).
"""
toast_import_pattern = input_api.re.compile(
r'^import android\.widget\.Toast;$')
errors = []
sources = lambda affected_file: input_api.FilterSourceFile(
affected_file,
black_list=(_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST +
(r'^chromecast[\\\/].*',
r'^remoting[\\\/].*')),
white_list=(r'.*\.java$',))
for f in input_api.AffectedSourceFiles(sources):
for line_num, line in f.ChangedContents():
if toast_import_pattern.search(line):
errors.append("%s:%d" % (f.LocalPath(), line_num))
results = []
if errors:
results.append(output_api.PresubmitError(
'android.widget.Toast usage is detected. Android toasts use hardware'
' acceleration, and can be\ncostly on low-end devices. Please use'
' org.chromium.ui.widget.Toast instead.\n'
'Contact dskiba@chromium.org if you have any questions.',
errors))
return results
def _CheckAndroidCrLogUsage(input_api, output_api):
"""Checks that new logs using org.chromium.base.Log:
- Are using 'TAG' as variable name for the tags (warn)
- Are using a tag that is shorter than 20 characters (error)
"""
# Do not check format of logs in //chrome/android/webapk because
# //chrome/android/webapk cannot depend on //base
cr_log_check_excluded_paths = [
r"^chrome[\\\/]android[\\\/]webapk[\\\/].*",
]
cr_log_import_pattern = input_api.re.compile(
r'^import org\.chromium\.base\.Log;$', input_api.re.MULTILINE)
class_in_base_pattern = input_api.re.compile(
r'^package org\.chromium\.base;$', input_api.re.MULTILINE)
has_some_log_import_pattern = input_api.re.compile(
r'^import .*\.Log;$', input_api.re.MULTILINE)
# Extract the tag from lines like `Log.d(TAG, "*");` or `Log.d("TAG", "*");`
log_call_pattern = input_api.re.compile(r'^\s*Log\.\w\((?P<tag>\"?\w+\"?)\,')
log_decl_pattern = input_api.re.compile(
r'^\s*private static final String TAG = "(?P<name>(.*))";',
input_api.re.MULTILINE)
REF_MSG = ('See docs/android_logging.md '
'or contact dgn@chromium.org for more info.')
sources = lambda x: input_api.FilterSourceFile(x, white_list=(r'.*\.java$',),
black_list=cr_log_check_excluded_paths)
tag_decl_errors = []
tag_length_errors = []
tag_errors = []
tag_with_dot_errors = []
util_log_errors = []
for f in input_api.AffectedSourceFiles(sources):
file_content = input_api.ReadFile(f)
has_modified_logs = False
# Per line checks
if (cr_log_import_pattern.search(file_content) or
(class_in_base_pattern.search(file_content) and
not has_some_log_import_pattern.search(file_content))):
# Checks to run for files using cr log
for line_num, line in f.ChangedContents():
# Check if the new line is doing some logging
match = log_call_pattern.search(line)
if match:
has_modified_logs = True
# Make sure it uses "TAG"
if not match.group('tag') == 'TAG':
tag_errors.append("%s:%d" % (f.LocalPath(), line_num))
else:
# Report non cr Log function calls in changed lines
for line_num, line in f.ChangedContents():
if log_call_pattern.search(line):
util_log_errors.append("%s:%d" % (f.LocalPath(), line_num))
# Per file checks
if has_modified_logs:
# Make sure the tag is using the "cr" prefix and is not too long
match = log_decl_pattern.search(file_content)
tag_name = match.group('name') if match else None
if not tag_name:
tag_decl_errors.append(f.LocalPath())
elif len(tag_name) > 20:
tag_length_errors.append(f.LocalPath())
elif '.' in tag_name:
tag_with_dot_errors.append(f.LocalPath())
results = []
if tag_decl_errors:
results.append(output_api.PresubmitPromptWarning(
'Please define your tags using the suggested format: .\n'
'"private static final String TAG = "<package tag>".\n'
'They will be prepended with "cr_" automatically.\n' + REF_MSG,
tag_decl_errors))
if tag_length_errors:
results.append(output_api.PresubmitError(
'The tag length is restricted by the system to be at most '
'20 characters.\n' + REF_MSG,
tag_length_errors))
if tag_errors:
results.append(output_api.PresubmitPromptWarning(
'Please use a variable named "TAG" for your log tags.\n' + REF_MSG,
tag_errors))
if util_log_errors:
results.append(output_api.PresubmitPromptWarning(
'Please use org.chromium.base.Log for new logs.\n' + REF_MSG,
util_log_errors))
if tag_with_dot_errors:
results.append(output_api.PresubmitPromptWarning(
'Dot in log tags cause them to be elided in crash reports.\n' + REF_MSG,
tag_with_dot_errors))
return results
def _CheckAndroidNewMdpiAssetLocation(input_api, output_api):
"""Checks if MDPI assets are placed in a correct directory."""
file_filter = lambda f: (f.LocalPath().endswith('.png') and
('/res/drawable/' in f.LocalPath() or
'/res/drawable-ldrtl/' in f.LocalPath()))
errors = []
for f in input_api.AffectedFiles(include_deletes=False,
file_filter=file_filter):
errors.append(' %s' % f.LocalPath())
results = []
if errors:
results.append(output_api.PresubmitError(
'MDPI assets should be placed in /res/drawable-mdpi/ or '
'/res/drawable-ldrtl-mdpi/\ninstead of /res/drawable/ and'
'/res/drawable-ldrtl/.\n'
'Contact newt@chromium.org if you have questions.', errors))
return results
class PydepsChecker(object):
def __init__(self, input_api, pydeps_files):
self._file_cache = {}
self._input_api = input_api
self._pydeps_files = pydeps_files
def _LoadFile(self, path):
"""Returns the list of paths within a .pydeps file relative to //."""
if path not in self._file_cache:
with open(path) as f:
self._file_cache[path] = f.read()
return self._file_cache[path]
def _ComputeNormalizedPydepsEntries(self, pydeps_path):
"""Returns an interable of paths within the .pydep, relativized to //."""
os_path = self._input_api.os_path
pydeps_dir = os_path.dirname(pydeps_path)
entries = (l.rstrip() for l in self._LoadFile(pydeps_path).splitlines()
if not l.startswith('*'))
return (os_path.normpath(os_path.join(pydeps_dir, e)) for e in entries)
def _CreateFilesToPydepsMap(self):
"""Returns a map of local_path -> list_of_pydeps."""
ret = {}
for pydep_local_path in self._pydeps_files:
for path in self._ComputeNormalizedPydepsEntries(pydep_local_path):
ret.setdefault(path, []).append(pydep_local_path)
return ret
def ComputeAffectedPydeps(self):
"""Returns an iterable of .pydeps files that might need regenerating."""
affected_pydeps = set()
file_to_pydeps_map = None
for f in self._input_api.AffectedFiles(include_deletes=True):
local_path = f.LocalPath()
if local_path == 'DEPS':
return self._pydeps_files
elif local_path.endswith('.pydeps'):
if local_path in self._pydeps_files:
affected_pydeps.add(local_path)
elif local_path.endswith('.py'):
if file_to_pydeps_map is None:
file_to_pydeps_map = self._CreateFilesToPydepsMap()
affected_pydeps.update(file_to_pydeps_map.get(local_path, ()))
return affected_pydeps
def DetermineIfStale(self, pydeps_path):
"""Runs print_python_deps.py to see if the files is stale."""
old_pydeps_data = self._LoadFile(pydeps_path).splitlines()
cmd = old_pydeps_data[1][1:].strip()
new_pydeps_data = self._input_api.subprocess.check_output(
cmd + ' --output ""', shell=True)
if old_pydeps_data[2:] != new_pydeps_data.splitlines()[2:]:
return cmd
def _CheckPydepsNeedsUpdating(input_api, output_api, checker_for_tests=None):
"""Checks if a .pydeps file needs to be regenerated."""
# This check is mainly for Android, and involves paths not only in the
# PRESUBMIT.py, but also in the .pydeps files. It doesn't work on Windows and
# Mac, so skip it on other platforms.
if input_api.platform != 'linux2':
return []
# TODO(agrieve): Update when there's a better way to detect this: crbug/570091
is_android = input_api.os_path.exists('third_party/android_tools')
pydeps_files = _ALL_PYDEPS_FILES if is_android else _GENERIC_PYDEPS_FILES
results = []
# First, check for new / deleted .pydeps.
for f in input_api.AffectedFiles(include_deletes=True):
if f.LocalPath().endswith('.pydeps'):
if f.Action() == 'D' and f.LocalPath() in _ALL_PYDEPS_FILES:
results.append(output_api.PresubmitError(
'Please update _ALL_PYDEPS_FILES within //PRESUBMIT.py to '
'remove %s' % f.LocalPath()))
elif f.Action() != 'D' and f.LocalPath() not in _ALL_PYDEPS_FILES:
results.append(output_api.PresubmitError(
'Please update _ALL_PYDEPS_FILES within //PRESUBMIT.py to '
'include %s' % f.LocalPath()))
if results:
return results
checker = checker_for_tests or PydepsChecker(input_api, pydeps_files)
for pydep_path in checker.ComputeAffectedPydeps():
try:
cmd = checker.DetermineIfStale(pydep_path)
if cmd:
results.append(output_api.PresubmitError(
'File is stale: %s\nTo regenerate, run:\n\n %s' %
(pydep_path, cmd)))
except input_api.subprocess.CalledProcessError as error:
return [output_api.PresubmitError('Error running: %s' % error.cmd,
long_text=error.output)]
return results
def _CheckForCopyrightedCode(input_api, output_api):
"""Verifies that newly added code doesn't contain copyrighted material
and is properly licensed under the standard Chromium license.
As there can be false positives, we maintain a whitelist file. This check
also verifies that the whitelist file is up to date.
"""
import sys
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools')]
from copyright_scanner import copyright_scanner
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
return copyright_scanner.ScanAtPresubmit(input_api, output_api)
def _CheckSingletonInHeaders(input_api, output_api):
"""Checks to make sure no header files have |Singleton<|."""
def FileFilter(affected_file):
# It's ok for base/memory/singleton.h to have |Singleton<|.
black_list = (_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST +
(r"^base[\\\/]memory[\\\/]singleton\.h$",))
return input_api.FilterSourceFile(affected_file, black_list=black_list)
pattern = input_api.re.compile(r'(?<!class\sbase::)Singleton\s*<')
files = []
for f in input_api.AffectedSourceFiles(FileFilter):
if (f.LocalPath().endswith('.h') or f.LocalPath().endswith('.hxx') or
f.LocalPath().endswith('.hpp') or f.LocalPath().endswith('.inl')):
contents = input_api.ReadFile(f)
for line in contents.splitlines(False):
if (not line.lstrip().startswith('//') and # Strip C++ comment.
pattern.search(line)):
files.append(f)
break
if files:
return [output_api.PresubmitError(
'Found base::Singleton<T> in the following header files.\n' +
'Please move them to an appropriate source file so that the ' +
'template gets instantiated in a single compilation unit.',
files) ]
return []
def _CheckNoDeprecatedCompiledResourcesGYP(input_api, output_api):
"""Checks for old style compiled_resources.gyp files."""
is_compiled_resource = lambda fp: fp.endswith('compiled_resources.gyp')
added_compiled_resources = filter(is_compiled_resource, [
f.LocalPath() for f in input_api.AffectedFiles() if f.Action() == 'A'
])
if not added_compiled_resources:
return []
return [output_api.PresubmitError(
"Found new compiled_resources.gyp files:\n%s\n\n"
"compiled_resources.gyp files are deprecated,\n"
"please use compiled_resources2.gyp instead:\n"
"https://chromium.googlesource.com/chromium/src/+/master/docs/closure_compilation.md"
%
"\n".join(added_compiled_resources))]
_DEPRECATED_CSS = [
# Values
( "-webkit-box", "flex" ),
( "-webkit-inline-box", "inline-flex" ),
( "-webkit-flex", "flex" ),
( "-webkit-inline-flex", "inline-flex" ),
( "-webkit-min-content", "min-content" ),
( "-webkit-max-content", "max-content" ),
# Properties
( "-webkit-background-clip", "background-clip" ),
( "-webkit-background-origin", "background-origin" ),
( "-webkit-background-size", "background-size" ),
( "-webkit-box-shadow", "box-shadow" ),
# Functions
( "-webkit-gradient", "gradient" ),
( "-webkit-repeating-gradient", "repeating-gradient" ),
( "-webkit-linear-gradient", "linear-gradient" ),
( "-webkit-repeating-linear-gradient", "repeating-linear-gradient" ),
( "-webkit-radial-gradient", "radial-gradient" ),
( "-webkit-repeating-radial-gradient", "repeating-radial-gradient" ),
]
def _CheckNoDeprecatedCSS(input_api, output_api):
""" Make sure that we don't use deprecated CSS
properties, functions or values. Our external
documentation and iOS CSS for dom distiller
(reader mode) are ignored by the hooks as it
needs to be consumed by WebKit. """
results = []
file_inclusion_pattern = (r".+\.css$",)
black_list = (_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST +
(r"^chrome/common/extensions/docs",
r"^chrome/docs",
r"^components/dom_distiller/core/css/distilledpage_ios.css",
r"^components/flags_ui/resources/apple_flags.css",
r"^components/neterror/resources/neterror.css",
r"^native_client_sdk"))
file_filter = lambda f: input_api.FilterSourceFile(
f, white_list=file_inclusion_pattern, black_list=black_list)
for fpath in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in fpath.ChangedContents():
for (deprecated_value, value) in _DEPRECATED_CSS:
if deprecated_value in line:
results.append(output_api.PresubmitError(
"%s:%d: Use of deprecated CSS %s, use %s instead" %
(fpath.LocalPath(), line_num, deprecated_value, value)))
return results
_DEPRECATED_JS = [
( "__lookupGetter__", "Object.getOwnPropertyDescriptor" ),
( "__defineGetter__", "Object.defineProperty" ),
( "__defineSetter__", "Object.defineProperty" ),
]
def _CheckNoDeprecatedJS(input_api, output_api):
"""Make sure that we don't use deprecated JS in Chrome code."""
results = []
file_inclusion_pattern = (r".+\.js$",) # TODO(dbeam): .html?
black_list = (_EXCLUDED_PATHS + _TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
file_filter = lambda f: input_api.FilterSourceFile(
f, white_list=file_inclusion_pattern, black_list=black_list)
for fpath in input_api.AffectedFiles(file_filter=file_filter):
for lnum, line in fpath.ChangedContents():
for (deprecated, replacement) in _DEPRECATED_JS:
if deprecated in line:
results.append(output_api.PresubmitError(
"%s:%d: Use of deprecated JS %s, use %s instead" %
(fpath.LocalPath(), lnum, deprecated, replacement)))
return results
def _AndroidSpecificOnUploadChecks(input_api, output_api):
"""Groups checks that target android code."""
results = []
results.extend(_CheckAndroidCrLogUsage(input_api, output_api))
results.extend(_CheckAndroidNewMdpiAssetLocation(input_api, output_api))
results.extend(_CheckAndroidToastUsage(input_api, output_api))
return results
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api,
excluded_paths=_EXCLUDED_PATHS + _TESTRUNNER_PATHS))
results.extend(_CheckAuthorizedAuthor(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(_CheckNoIOStreamInHeaders(input_api, output_api))
results.extend(_CheckNoUNIT_TESTInSourceFiles(input_api, output_api))
results.extend(_CheckDCHECK_IS_ONHasBraces(input_api, output_api))
results.extend(_CheckNoNewWStrings(input_api, output_api))
results.extend(_CheckNoDEPSGIT(input_api, output_api))
results.extend(_CheckNoBannedFunctions(input_api, output_api))
results.extend(_CheckNoPragmaOnce(input_api, output_api))
results.extend(_CheckNoTrinaryTrueFalse(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(_CheckFilePermissions(input_api, output_api))
results.extend(_CheckNoAuraWindowPropertyHInHeaders(input_api, output_api))
results.extend(_CheckIncludeOrder(input_api, output_api))
results.extend(_CheckForVersionControlConflicts(input_api, output_api))
results.extend(_CheckPatchFiles(input_api, output_api))
results.extend(_CheckHardcodedGoogleHostsInLowerLayers(input_api, output_api))
results.extend(_CheckNoAbbreviationInPngFileName(input_api, output_api))
results.extend(_CheckForInvalidOSMacros(input_api, output_api))
results.extend(_CheckForInvalidIfDefinedMacros(input_api, output_api))
results.extend(_CheckFlakyTestUsage(input_api, output_api))
results.extend(_CheckAddedDepsHaveTargetApprovals(input_api, output_api))
results.extend(
input_api.canned_checks.CheckChangeHasNoTabs(
input_api,
output_api,
source_file_filter=lambda x: x.LocalPath().endswith('.grd')))
results.extend(_CheckSpamLogging(input_api, output_api))
results.extend(_CheckForAnonymousVariables(input_api, output_api))
results.extend(_CheckCygwinShell(input_api, output_api))
results.extend(_CheckUserActionUpdate(input_api, output_api))
results.extend(_CheckNoDeprecatedCSS(input_api, output_api))
results.extend(_CheckNoDeprecatedJS(input_api, output_api))
results.extend(_CheckParseErrors(input_api, output_api))
results.extend(_CheckForIPCRules(input_api, output_api))
results.extend(_CheckForCopyrightedCode(input_api, output_api))
results.extend(_CheckForWindowsLineEndings(input_api, output_api))
results.extend(_CheckSingletonInHeaders(input_api, output_api))
results.extend(_CheckNoDeprecatedCompiledResourcesGYP(input_api, output_api))
results.extend(_CheckPydepsNeedsUpdating(input_api, output_api))
results.extend(_CheckJavaStyle(input_api, output_api))
results.extend(_CheckIpcOwners(input_api, output_api))
results.extend(_CheckMojoUsesNewWrapperTypes(input_api, output_api))
if any('PRESUBMIT.py' == f.LocalPath() for f in input_api.AffectedFiles()):
results.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api,
input_api.PresubmitLocalPath(),
whitelist=[r'^PRESUBMIT_test\.py$']))
return results
def _CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
"""
author = input_api.change.author_email
if not author:
input_api.logging.info('No author, skipping AUTHOR check')
return []
authors_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'AUTHORS')
valid_authors = (
input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
for line in open(authors_path))
valid_authors = [item.group(1).lower() for item in valid_authors if item]
if not any(input_api.fnmatch.fnmatch(author.lower(), valid)
for valid in valid_authors):
input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
return [output_api.PresubmitPromptWarning(
('%s is not in AUTHORS file. If you are a new contributor, please visit'
'\n'
'http://www.chromium.org/developers/contributing-code and read the '
'"Legal" section\n'
'If you are a chromite, verify the contributor signed the CLA.') %
author)]
return []
def _CheckPatchFiles(input_api, output_api):
problems = [f.LocalPath() for f in input_api.AffectedFiles()
if f.LocalPath().endswith(('.orig', '.rej'))]
if problems:
return [output_api.PresubmitError(
"Don't commit .rej and .orig files.", problems)]
else:
return []
def _DidYouMeanOSMacro(bad_macro):
try:
return {'A': 'OS_ANDROID',
'B': 'OS_BSD',
'C': 'OS_CHROMEOS',
'F': 'OS_FREEBSD',
'L': 'OS_LINUX',
'M': 'OS_MACOSX',
'N': 'OS_NACL',
'O': 'OS_OPENBSD',
'P': 'OS_POSIX',
'S': 'OS_SOLARIS',
'W': 'OS_WIN'}[bad_macro[3].upper()]
except KeyError:
return ''
def _CheckForInvalidOSMacrosInFile(input_api, f):
"""Check for sensible looking, totally invalid OS macros."""
preprocessor_statement = input_api.re.compile(r'^\s*#')
os_macro = input_api.re.compile(r'defined\((OS_[^)]+)\)')
results = []
for lnum, line in f.ChangedContents():
if preprocessor_statement.search(line):
for match in os_macro.finditer(line):
if not match.group(1) in _VALID_OS_MACROS:
good = _DidYouMeanOSMacro(match.group(1))
did_you_mean = ' (did you mean %s?)' % good if good else ''
results.append(' %s:%d %s%s' % (f.LocalPath(),
lnum,
match.group(1),
did_you_mean))
return results
def _CheckForInvalidOSMacros(input_api, output_api):
"""Check all affected files for invalid OS macros."""
bad_macros = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.py', '.js', '.html', '.css', '.md')):
bad_macros.extend(_CheckForInvalidOSMacrosInFile(input_api, f))
if not bad_macros:
return []
return [output_api.PresubmitError(
'Possibly invalid OS macro[s] found. Please fix your code\n'
'or add your macro to src/PRESUBMIT.py.', bad_macros)]
def _CheckForInvalidIfDefinedMacrosInFile(input_api, f):
"""Check all affected files for invalid "if defined" macros."""
ALWAYS_DEFINED_MACROS = (
"TARGET_CPU_PPC",
"TARGET_CPU_PPC64",
"TARGET_CPU_68K",
"TARGET_CPU_X86",
"TARGET_CPU_ARM",
"TARGET_CPU_MIPS",
"TARGET_CPU_SPARC",
"TARGET_CPU_ALPHA",
"TARGET_IPHONE_SIMULATOR",
"TARGET_OS_EMBEDDED",
"TARGET_OS_IPHONE",
"TARGET_OS_MAC",
"TARGET_OS_UNIX",
"TARGET_OS_WIN32",
)
ifdef_macro = input_api.re.compile(r'^\s*#.*(?:ifdef\s|defined\()([^\s\)]+)')
results = []
for lnum, line in f.ChangedContents():
for match in ifdef_macro.finditer(line):
if match.group(1) in ALWAYS_DEFINED_MACROS:
always_defined = ' %s is always defined. ' % match.group(1)
did_you_mean = 'Did you mean \'#if %s\'?' % match.group(1)
results.append(' %s:%d %s\n\t%s' % (f.LocalPath(),
lnum,
always_defined,
did_you_mean))
return results
def _CheckForInvalidIfDefinedMacros(input_api, output_api):
"""Check all affected files for invalid "if defined" macros."""
bad_macros = []
for f in input_api.AffectedFiles():
if f.LocalPath().endswith(('.h', '.c', '.cc', '.m', '.mm')):
bad_macros.extend(_CheckForInvalidIfDefinedMacrosInFile(input_api, f))
if not bad_macros:
return []
return [output_api.PresubmitError(
'Found ifdef check on always-defined macro[s]. Please fix your code\n'
'or check the list of ALWAYS_DEFINED_MACROS in src/PRESUBMIT.py.',
bad_macros)]
def _CheckForIPCRules(input_api, output_api):
"""Check for same IPC rules described in
http://www.chromium.org/Home/chromium-security/education/security-tips-for-ipc
"""
base_pattern = r'IPC_ENUM_TRAITS\('
inclusion_pattern = input_api.re.compile(r'(%s)' % base_pattern)
comment_pattern = input_api.re.compile(r'//.*(%s)' % base_pattern)
problems = []
for f in input_api.AffectedSourceFiles(None):
local_path = f.LocalPath()
if not local_path.endswith('.h'):
continue
for line_number, line in f.ChangedContents():
if inclusion_pattern.search(line) and not comment_pattern.search(line):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
if problems:
return [output_api.PresubmitPromptWarning(
_IPC_ENUM_TRAITS_DEPRECATED, problems)]
else:
return []
def _CheckForWindowsLineEndings(input_api, output_api):
"""Check source code and known ascii text files for Windows style line
endings.
"""
known_text_files = r'.*\.(txt|html|htm|mhtml|py|gyp|gypi|gn|isolate)$'
file_inclusion_pattern = (
known_text_files,
r'.+%s' % _IMPLEMENTATION_EXTENSIONS
)
filter = lambda f: input_api.FilterSourceFile(
f, white_list=file_inclusion_pattern, black_list=None)
files = [f.LocalPath() for f in
input_api.AffectedSourceFiles(filter)]
problems = []
for file in files:
fp = open(file, 'r')
for line in fp:
if line.endswith('\r\n'):
problems.append(file)
break
fp.close()
if problems:
return [output_api.PresubmitPromptWarning('Are you sure that you want '
'these files to contain Windows style line endings?\n' +
'\n'.join(problems))]
return []
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckValidHostsInDEPS(input_api, output_api))
results.extend(
input_api.canned_checks.CheckGNFormatted(input_api, output_api))
results.extend(_CheckUmaHistogramChanges(input_api, output_api))
results.extend(_AndroidSpecificOnUploadChecks(input_api, output_api))
return results
def GetTryServerMasterForBot(bot):
"""Returns the Try Server master for the given bot.
It tries to guess the master from the bot name, but may still fail
and return None. There is no longer a default master.
"""
# Potentially ambiguous bot names are listed explicitly.
master_map = {
'chromium_presubmit': 'master.tryserver.chromium.linux',
'tools_build_presubmit': 'master.tryserver.chromium.linux',
}
master = master_map.get(bot)
if not master:
if 'android' in bot:
master = 'master.tryserver.chromium.android'
elif 'linux' in bot or 'presubmit' in bot:
master = 'master.tryserver.chromium.linux'
elif 'win' in bot:
master = 'master.tryserver.chromium.win'
elif 'mac' in bot or 'ios' in bot:
master = 'master.tryserver.chromium.mac'
return master
def GetDefaultTryConfigs(bots):
"""Returns a list of ('bot', set(['tests']), filtered by [bots].
"""
builders_and_tests = dict((bot, set(['defaulttests'])) for bot in bots)
# Build up the mapping from tryserver master to bot/test.
out = dict()
for bot, tests in builders_and_tests.iteritems():
out.setdefault(GetTryServerMasterForBot(bot), {})[bot] = tests
return out
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
# Make sure the tree is 'open'.
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://chromium-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckChangeHasBugField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
return results
|
danakj/chromium
|
PRESUBMIT.py
|
Python
|
bsd-3-clause
| 83,575
|
[
"VisIt"
] |
468f6f82e1645ed2389335e749bf7abee24801e72461ef7039a94841303ef886
|
# coding: utf-8
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import random
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_integer_types,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
compat_xml_parse_error,
)
from ..downloader.f4m import (
get_base_url,
remove_encrypted_media,
)
from ..utils import (
NO_DEFAULT,
age_restricted,
base_url,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
determine_protocol,
error_to_compat_str,
ExtractorError,
extract_attributes,
fix_xml_ampersands,
float_or_none,
GeoRestrictedError,
GeoUtils,
int_or_none,
js_to_json,
JSON_LD_RE,
mimetype2ext,
orderedSet,
parse_codecs,
parse_duration,
parse_iso8601,
parse_m3u8_attributes,
RegexNotFoundError,
sanitized_Request,
sanitize_filename,
unescapeHTML,
unified_strdate,
unified_timestamp,
update_Request,
update_url_query,
urljoin,
url_basename,
url_or_none,
xpath_element,
xpath_text,
xpath_with_ns,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* manifest_url
The URL of the manifest file in case of
fragmented media (DASH, hls, hds)
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* fragment_base_url
Base URL for fragments. Each fragment's path
value (if present) will be relative to
this URL.
* fragments A list of fragments of a fragmented media.
Each fragment entry must contain either an url
or a path. If an url is present it should be
considered by a client. Otherwise both path and
fragment_base_url must be present. Here is
the list of all potential fields:
* "url" - fragment's URL
* "path" - fragment's path relative to
fragment_base_url
* "duration" (optional, int or float)
* "filesize" (optional, int)
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
* downloader_options A dictionary of downloader options as
described in FileDownloader
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
channel: Full name of the channel the video is uploaded on.
Note that channel fields may or may not repeat uploader
fields. This depends on a particular extractor.
channel_id: Id of the channel.
channel_url: Full URL to a channel webpage.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{tag: subformats}. "tag" is usually a language code, and
"subformats" is a list sorted from lower to higher
preference, each element is a dictionary with the "ext"
entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
chapters: A list of dictionaries, with the following entries:
* "start_time" - The start time of the chapter in seconds
* "end_time" - The end time of the chapter in seconds
* "title" (optional, string)
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series, programme or podcast:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "id", "title", "description", "uploader",
"uploader_id", "uploader_url" attributes with the same semantics as videos
(see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
_GEO_BYPASS attribute may be set to False in order to disable
geo restriction bypass mechanisms for a particular extractor.
Though it won't disable explicit geo restriction bypass based on
country code provided with geo_bypass_country.
_GEO_COUNTRIES attribute may contain a list of presumably geo unrestricted
countries for this extractor. One of these countries will be used by
geo restriction bypass mechanism right away in order to bypass
geo restriction, of course, if the mechanism is not disabled.
_GEO_IP_BLOCKS attribute may contain a list of presumably geo unrestricted
IP blocks in CIDR notation for this extractor. One of these IP blocks
will be used by geo restriction bypass mechanism similarly
to _GEO_COUNTRIES.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_x_forwarded_for_ip = None
_GEO_BYPASS = True
_GEO_COUNTRIES = None
_GEO_IP_BLOCKS = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self._x_forwarded_for_ip = None
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return compat_str(m.group('id'))
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
self._initialize_geo_bypass({
'countries': self._GEO_COUNTRIES,
'ip_blocks': self._GEO_IP_BLOCKS,
})
if not self._ready:
self._real_initialize()
self._ready = True
def _initialize_geo_bypass(self, geo_bypass_context):
"""
Initialize geo restriction bypass mechanism.
This method is used to initialize geo bypass mechanism based on faking
X-Forwarded-For HTTP header. A random country from provided country list
is selected and a random IP belonging to this country is generated. This
IP will be passed as X-Forwarded-For HTTP header in all subsequent
HTTP requests.
This method will be used for initial geo bypass mechanism initialization
during the instance initialization with _GEO_COUNTRIES and
_GEO_IP_BLOCKS.
You may also manually call it from extractor's code if geo bypass
information is not available beforehand (e.g. obtained during
extraction) or due to some other reason. In this case you should pass
this information in geo bypass context passed as first argument. It may
contain following fields:
countries: List of geo unrestricted countries (similar
to _GEO_COUNTRIES)
ip_blocks: List of geo unrestricted IP blocks in CIDR notation
(similar to _GEO_IP_BLOCKS)
"""
if not self._x_forwarded_for_ip:
# Geo bypass mechanism is explicitly disabled by user
if not self._downloader.params.get('geo_bypass', True):
return
if not geo_bypass_context:
geo_bypass_context = {}
# Backward compatibility: previously _initialize_geo_bypass
# expected a list of countries, some 3rd party code may still use
# it this way
if isinstance(geo_bypass_context, (list, tuple)):
geo_bypass_context = {
'countries': geo_bypass_context,
}
# The whole point of geo bypass mechanism is to fake IP
# as X-Forwarded-For HTTP header based on some IP block or
# country code.
# Path 1: bypassing based on IP block in CIDR notation
# Explicit IP block specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
ip_block = self._downloader.params.get('geo_bypass_ip_block', None)
# Otherwise use random IP block from geo bypass context but only
# if extractor is known as geo bypassable
if not ip_block:
ip_blocks = geo_bypass_context.get('ip_blocks')
if self._GEO_BYPASS and ip_blocks:
ip_block = random.choice(ip_blocks)
if ip_block:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s as X-Forwarded-For.'
% self._x_forwarded_for_ip)
return
# Path 2: bypassing based on country code
# Explicit country code specified by user, use it right away
# regardless of whether extractor is geo bypassable or not
country = self._downloader.params.get('geo_bypass_country', None)
# Otherwise use random country code from geo bypass context but
# only if extractor is known as geo bypassable
if not country:
countries = geo_bypass_context.get('countries')
if self._GEO_BYPASS and countries:
country = random.choice(countries)
if country:
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country)
if self._downloader.params.get('verbose', False):
self._downloader.to_screen(
'[debug] Using fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country.upper()))
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
for _ in range(2):
try:
self.initialize()
ie_result = self._real_extract(url)
if self._x_forwarded_for_ip:
ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
return ie_result
except GeoRestrictedError as e:
if self.__maybe_fake_ip_and_retry(e.countries):
continue
raise
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def __maybe_fake_ip_and_retry(self, countries):
if (not self._downloader.params.get('geo_bypass_country', None) and
self._GEO_BYPASS and
self._downloader.params.get('geo_bypass', True) and
not self._x_forwarded_for_ip and
countries):
country_code = random.choice(countries)
self._x_forwarded_for_ip = GeoUtils.random_ipv4(country_code)
if self._x_forwarded_for_ip:
self.report_warning(
'Video is geo restricted. Retrying extraction with fake IP %s (%s) as X-Forwarded-For.'
% (self._x_forwarded_for_ip, country_code.upper()))
return True
return False
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
@staticmethod
def __can_accept_status_code(err, expected_status):
assert isinstance(err, compat_urllib_error.HTTPError)
if expected_status is None:
return False
if isinstance(expected_status, compat_integer_types):
return err.code == expected_status
elif isinstance(expected_status, (list, tuple)):
return err.code in expected_status
elif callable(expected_status):
return expected_status(err.code) is True
else:
assert False
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={},
query={}, expected_status=None):
"""
Return the response handle.
See _download_webpage docstring for arguments specification.
"""
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
# Some sites check X-Forwarded-For HTTP header in order to figure out
# the origin of the client behind proxy. This allows bypassing geo
# restriction by faking this header's value to IP that belongs to some
# geo unrestricted country. We will do so once we encounter any
# geo restriction error.
if self._x_forwarded_for_ip:
if 'X-Forwarded-For' not in headers:
headers['X-Forwarded-For'] = self._x_forwarded_for_ip
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if isinstance(err, compat_urllib_error.HTTPError):
if self.__can_accept_status_code(err, expected_status):
# Retain reference to error to prevent file object from
# being closed before it can be read. Works around the
# effects of <https://bugs.python.org/issue15002>
# introduced in Python 3.4.1.
err.fp._error = err
return err.fp
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None,
data=None, headers={}, query={}, expected_status=None):
"""
Return a tuple (page content as string, URL handle).
See _download_webpage docstring for arguments specification.
"""
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers,
query=query, expected_status=expected_status)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def __check_blocked(self, content):
first_block = content[:512]
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in first_block):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in first_block:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
if ('<title>TTK :: Доступ к ресурсу ограничен</title>' in content and
'blocklist.rkn.gov.ru' in content):
raise ExtractorError(
'Access to this webpage has been blocked by decision of the Russian government. '
'Visit http://blocklist.rkn.gov.ru/ for a block reason.',
expected=True)
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None,
encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
self.to_screen('Dumping request to ' + urlh.geturl())
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
basen = '%s_%s' % (video_id, urlh.geturl())
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
self.__check_blocked(content)
return content
def _download_webpage(
self, url_or_request, video_id, note=None, errnote=None,
fatal=True, tries=1, timeout=5, encoding=None, data=None,
headers={}, query={}, expected_status=None):
"""
Return the data of the page as a string.
Arguments:
url_or_request -- plain text URL as a string or
a compat_urllib_request.Requestobject
video_id -- Video/playlist/item identifier (string)
Keyword arguments:
note -- note printed before downloading (string)
errnote -- note printed in case of an error (string)
fatal -- flag denoting whether error should be considered fatal,
i.e. whether it should cause ExtractionError to be raised,
otherwise a warning will be reported and extraction continued
tries -- number of tries
timeout -- sleep interval between tries
encoding -- encoding for a page content decoding, guessed automatically
when not explicitly specified
data -- POST data (bytes)
headers -- HTTP headers (dict)
query -- URL query (dict)
expected_status -- allows to accept failed HTTP requests (non 2xx
status code) by explicitly specifying a set of accepted status
codes. Can be any of the following entities:
- an integer type specifying an exact failed status code to
accept
- a list or a tuple of integer types specifying a list of
failed status codes to accept
- a callable accepting an actual failed status code and
returning True if it should be accepted
Note that this argument does not affect success status codes (2xx)
which are always accepted.
"""
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml_handle(
self, url_or_request, video_id, note='Downloading XML',
errnote='Unable to download XML', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (xml as an xml.etree.ElementTree.Element, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
xml_string, urlh = res
return self._parse_xml(
xml_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_xml(
self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None,
data=None, headers={}, query={}, expected_status=None):
"""
Return the xml as an xml.etree.ElementTree.Element.
See _download_webpage docstring for arguments specification.
"""
res = self._download_xml_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_xml(self, xml_string, video_id, transform_source=None, fatal=True):
if transform_source:
xml_string = transform_source(xml_string)
try:
return compat_etree_fromstring(xml_string.encode('utf-8'))
except compat_xml_parse_error as ve:
errmsg = '%s: Failed to parse XML ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def _download_json_handle(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return a tuple (JSON object, URL handle).
See _download_webpage docstring for arguments specification.
"""
res = self._download_webpage_handle(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query,
expected_status=expected_status)
if res is False:
return res
json_string, urlh = res
return self._parse_json(
json_string, video_id, transform_source=transform_source,
fatal=fatal), urlh
def _download_json(
self, url_or_request, video_id, note='Downloading JSON metadata',
errnote='Unable to download JSON metadata', transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={},
expected_status=None):
"""
Return the JSON object as a dict.
See _download_webpage docstring for arguments specification.
"""
res = self._download_json_handle(
url_or_request, video_id, note=note, errnote=errnote,
transform_source=transform_source, fatal=fatal, encoding=encoding,
data=data, headers=headers, query=query,
expected_status=expected_status)
return res if res is False else res[0]
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None):
raise GeoRestrictedError(msg, countries=countries)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
def playlist_from_matches(self, matches, playlist_id=None, playlist_title=None, getter=None, ie=None):
urls = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urls, playlist_id=playlist_id, playlist_title=playlist_title)
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError(
'No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning(
'parsing .netrc: %s' % error_to_compat_str(err))
return username, password
def _get_login_info(self, username_option='username', password_option='password', netrc_machine=None):
"""
Get the login info as (username, password)
First look for the manually specified credentials using username_option
and password_option as keys in params dictionary. If no such credentials
available look in the netrc file using the netrc_machine or _NETRC_MACHINE
value.
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get(username_option) is not None:
username = downloader_params[username_option]
password = downloader_params[password_option]
else:
username, password = self._get_netrc_login_info(netrc_machine)
return username, password
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta(
'isFamilyFriendly', html, default=None)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
JSON_LD_RE, html, 'JSON-LD', group='json_ld', **kwargs)
default = kwargs.get('default', NO_DEFAULT)
if not json_ld:
return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
INTERACTION_TYPE_MAP = {
'CommentAction': 'comment',
'AgreeAction': 'like',
'DisagreeAction': 'dislike',
'LikeAction': 'like',
'DislikeAction': 'dislike',
'ListenAction': 'view',
'WatchAction': 'view',
'ViewAction': 'view',
}
def extract_interaction_statistic(e):
interaction_statistic = e.get('interactionStatistic')
if not isinstance(interaction_statistic, list):
return
for is_e in interaction_statistic:
if not isinstance(is_e, dict):
continue
if is_e.get('@type') != 'InteractionCounter':
continue
interaction_type = is_e.get('interactionType')
if not isinstance(interaction_type, compat_str):
continue
interaction_count = int_or_none(is_e.get('userInteractionCount'))
if interaction_count is None:
continue
count_kind = INTERACTION_TYPE_MAP.get(interaction_type.split('/')[-1])
if not count_kind:
continue
count_key = '%s_count' % count_kind
if info.get(count_key) is not None:
continue
info[count_key] = interaction_count
def extract_video_object(e):
assert e['@type'] == 'VideoObject'
info.update({
'url': url_or_none(e.get('contentUrl')),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': url_or_none(e.get('thumbnailUrl') or e.get('thumbnailURL')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
'view_count': int_or_none(e.get('interactionCount')),
})
extract_interaction_statistic(e)
for e in json_ld:
if isinstance(e.get('@context'), compat_str) and re.match(r'^https?://schema.org/?$', e.get('@context')):
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type in ('TVEpisode', 'Episode'):
episode_name = unescapeHTML(e.get('name'))
info.update({
'episode': episode_name,
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
if not info.get('title') and episode_name:
info['title'] = episode_name
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') in (
'TVSeason', 'Season', 'CreativeWorkSeason'):
info.update({
'season': unescapeHTML(part_of_season.get('name')),
'season_number': int_or_none(part_of_season.get('seasonNumber')),
})
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') in (
'TVSeries', 'Series', 'CreativeWorkSeries'):
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Movie':
info.update({
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('dateCreated')),
})
elif item_type in ('Article', 'NewsArticle'):
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
extract_video_object(e)
continue
video = e.get('video')
if isinstance(video, dict) and video.get('@type') == 'VideoObject':
extract_video_object(video)
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)(<input[^>]+>)', html):
attrs = extract_attributes(input)
if not input:
continue
if attrs.get('type') not in ('hidden', 'submit'):
continue
name = attrs.get('name') or attrs.get('id')
value = attrs.get('value')
if name and value is not None:
hidden_inputs[name] = value
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video', headers={}):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item, headers=headers)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
manifest_base_url = get_base_url(manifest)
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
vcodec = None
mime_type = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}mimeType', '{http://ns.adobe.com/f4m/2.0}mimeType'],
'base URL', default=None)
if mime_type and mime_type.startswith('audio/'):
vcodec = 'none'
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((manifest_base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
'vcodec': vcodec,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'manifest_url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'protocol': 'f4m',
'tbr': tbr,
'width': width,
'height': height,
'vcodec': vcodec,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
return self._parse_m3u8_formats(
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
preference=preference, m3u8_id=m3u8_id, live=live)
def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, live=False):
if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access
return []
if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay
return []
formats = []
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
# References:
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21
# 2. https://github.com/rg3/youtube-dl/issues/12211
# 3. https://github.com/rg3/youtube-dl/issues/18923
# We should try extracting formats only from master playlists [1, 4.3.4],
# i.e. playlists that describe available qualities. On the other hand
# media playlists [1, 4.3.3] should be returned as is since they contain
# just the media without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 4.3.3, 4.3.4]
# master playlist tags MUST NOT appear in a media playist and vice versa.
# As of [1, 4.3.3.1] #EXT-X-TARGETDURATION tag is REQUIRED for every
# media playlist and MUST NOT appear in master playlist thus we can
# clearly detect media playlist with this criterion.
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
groups = {}
last_stream_inf = {}
def extract_media(x_media_line):
media = parse_m3u8_attributes(x_media_line)
# As per [1, 4.3.4.1] TYPE, GROUP-ID and NAME are REQUIRED
media_type, group_id, name = media.get('TYPE'), media.get('GROUP-ID'), media.get('NAME')
if not (media_type and group_id and name):
return
groups.setdefault(group_id, []).append(media)
if media_type not in ('VIDEO', 'AUDIO'):
return
media_url = media.get('URI')
if media_url:
format_id = []
for v in (m3u8_id, group_id, name):
if v:
format_id.append(v)
f = {
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'manifest_url': m3u8_url,
'language': media.get('LANGUAGE'),
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}
if media_type == 'AUDIO':
f['vcodec'] = 'none'
formats.append(f)
def build_stream_name():
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF tag it still sometimes may be present (see [1]
# or vidio test in TestInfoExtractor.test_parse_m3u8_formats)
# 1. http://www.vidio.com/watch/165683-dj_ambred-booyah-live-2015
stream_name = last_stream_inf.get('NAME')
if stream_name:
return stream_name
# If there is no NAME in EXT-X-STREAM-INF it will be obtained
# from corresponding rendition group
stream_group_id = last_stream_inf.get('VIDEO')
if not stream_group_id:
return
stream_group = groups.get(stream_group_id)
if not stream_group:
return stream_group_id
rendition = stream_group[0]
return rendition.get('NAME') or stream_group_id
# parse EXT-X-MEDIA tags before EXT-X-STREAM-INF in order to have the
# chance to detect video only formats when EXT-X-STREAM-INF tags
# precede EXT-X-MEDIA tags in HLS manifest such as [3].
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-MEDIA:'):
extract_media(line)
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_stream_inf = parse_m3u8_attributes(line)
elif line.startswith('#') or not line.strip():
continue
else:
tbr = float_or_none(
last_stream_inf.get('AVERAGE-BANDWIDTH') or
last_stream_inf.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
stream_name = build_stream_name()
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
manifest_url = format_url(line.strip())
f = {
'format_id': '-'.join(format_id),
'url': manifest_url,
'manifest_url': m3u8_url,
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_stream_inf.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_stream_inf.get('RESOLUTION')
if resolution:
mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution)
if mobj:
f['width'] = int(mobj.group('width'))
f['height'] = int(mobj.group('height'))
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
codecs = parse_codecs(last_stream_inf.get('CODECS'))
f.update(codecs)
audio_group_id = last_stream_inf.get('AUDIO')
# As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which
# references a rendition group MUST have a CODECS attribute.
# However, this is not always respected, for example, [2]
# contains EXT-X-STREAM-INF tag which references AUDIO
# rendition group but does not have CODECS and despite
# referencing an audio group it represents a complete
# (with audio and video) format. So, for such cases we will
# ignore references to rendition groups and treat them
# as complete formats.
if audio_group_id and codecs and f.get('vcodec') != 'none':
audio_group = groups.get(audio_group_id)
if audio_group and audio_group[0].get('URI'):
# TODO: update acodec for audio only formats with
# the same GROUP-ID
f['acodec'] = 'none'
formats.append(f)
last_stream_inf = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
elif src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
elif src_ext == 'mpd':
formats.extend(self._extract_mpd_formats(
src_url, video_id, mpd_id='dash', fatal=False))
elif re.search(r'\.ism/[Mm]anifest', src_url):
formats.extend(self._extract_ism_formats(
src_url, video_id, ism_id='mss', fatal=False))
elif src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get(
'lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, xspf_url, playlist_id, fatal=True):
xspf = self._download_xml(
xspf_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(
xspf, playlist_id, xspf_url=xspf_url,
xspf_base_url=base_url(xspf_url))
def _parse_xspf(self, xspf_doc, playlist_id, xspf_url=None, xspf_base_url=None):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in xspf_doc.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = []
for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP)):
format_url = urljoin(xspf_base_url, location.text)
if not format_url:
continue
formats.append({
'url': format_url,
'manifest_url': xspf_url,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
})
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_xml_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd_doc, urlh = res
mpd_base_url = base_url(urlh.geturl())
return self._parse_mpd_formats(
mpd_doc, mpd_id=mpd_id, mpd_base_url=mpd_base_url,
formats_dict=formats_dict, mpd_url=mpd_url)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}, mpd_url=None):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
# As per [1, 5.3.9.2.2] SegmentList and SegmentTemplate share some
# common attributes and elements. We will only extract relevant
# for us.
def extract_common(source):
segment_timeline = source.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
start_number = source.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
timescale = source.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = source.get('duration')
if segment_duration:
ms_info['segment_duration'] = float(segment_duration)
def extract_Initialization(source):
initialization = source.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
extract_common(segment_list)
extract_Initialization(segment_list)
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
extract_common(segment_template)
media = segment_template.get('media')
if media:
ms_info['media'] = media
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization'] = initialization
else:
extract_Initialization(segment_template)
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type in ('video', 'audio'):
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(
url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
bandwidth = int_or_none(representation_attrib.get('bandwidth'))
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'manifest_url': mpd_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': float_or_none(bandwidth, 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
'container': mimetype2ext(mime_type) + '_dash',
}
f.update(parse_codecs(representation_attrib.get('codecs')))
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
def prepare_template(template_name, identifiers):
tmpl = representation_ms_info[template_name]
# First of, % characters outside $...$ templates
# must be escaped by doubling for proper processing
# by % operator string formatting used further (see
# https://github.com/rg3/youtube-dl/issues/16867).
t = ''
in_template = False
for c in tmpl:
t += c
if c == '$':
in_template = not in_template
elif c == '%' and not in_template:
t += c
# Next, $...$ templates are translated to their
# %(...) counterparts to be used with % operator
t = t.replace('$RepresentationID$', representation_id)
t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t)
t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t)
t.replace('$$', '$')
return t
# @initialization is a regular template like @media one
# so it should be handled just the same way (see
# https://github.com/rg3/youtube-dl/issues/11605)
if 'initialization' in representation_ms_info:
initialization_template = prepare_template(
'initialization',
# As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and
# $Time$ shall not be included for @initialization thus
# only $Bandwidth$ remains
('Bandwidth',))
representation_ms_info['initialization_url'] = initialization_template % {
'Bandwidth': bandwidth,
}
def location_key(location):
return 'url' if re.match(r'^https?://', location) else 'path'
if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info:
media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time'))
media_location_key = location_key(media_template)
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template and 's' not in representation_ms_info:
segment_duration = None
if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info:
segment_duration = float_or_none(representation_ms_info['segment_duration'],
representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
representation_ms_info['fragments'] = [{
media_location_key: media_template % {
'Number': segment_number,
'Bandwidth': bandwidth,
},
'duration': segment_duration,
} for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
# $Number*$ or $Time$ in media template with S list available
# Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg
# Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411
representation_ms_info['fragments'] = []
segment_time = 0
segment_d = None
segment_number = representation_ms_info['start_number']
def add_segment_url():
segment_url = media_template % {
'Time': segment_time,
'Bandwidth': bandwidth,
'Number': segment_number,
}
representation_ms_info['fragments'].append({
media_location_key: segment_url,
'duration': float_or_none(segment_d, representation_ms_info['timescale']),
})
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
segment_d = s['d']
add_segment_url()
segment_number += 1
for r in range(s.get('r', 0)):
segment_time += segment_d
add_segment_url()
segment_number += 1
segment_time += segment_d
elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info:
# No media template
# Example: https://www.youtube.com/watch?v=iXZV5uAYMJI
# or any YouTube dashsegments video
fragments = []
segment_index = 0
timescale = representation_ms_info['timescale']
for s in representation_ms_info['s']:
duration = float_or_none(s['d'], timescale)
for r in range(s.get('r', 0) + 1):
segment_uri = representation_ms_info['segment_urls'][segment_index]
fragments.append({
location_key(segment_uri): segment_uri,
'duration': duration,
})
segment_index += 1
representation_ms_info['fragments'] = fragments
elif 'segment_urls' in representation_ms_info:
# Segment URLs with no SegmentTimeline
# Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091
# https://github.com/rg3/youtube-dl/pull/14844
fragments = []
segment_duration = float_or_none(
representation_ms_info['segment_duration'],
representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None
for segment_url in representation_ms_info['segment_urls']:
fragment = {
location_key(segment_url): segment_url,
}
if segment_duration:
fragment['duration'] = segment_duration
fragments.append(fragment)
representation_ms_info['fragments'] = fragments
# NB: MPD manifest may contain direct URLs to unfragmented media.
# No fragments key is present in this case.
if 'fragments' in representation_ms_info:
f.update({
'fragment_base_url': base_url,
'fragments': [],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url']
if not f.get('url'):
f['url'] = initialization_url
f['fragments'].append({location_key(initialization_url): initialization_url})
f['fragments'].extend(representation_ms_info['fragments'])
# According to [1, 5.3.5.2, Table 7, page 35] @id of Representation
# is not necessarily unique within a Period thus formats with
# the same `format_id` are quite possible. There are numerous examples
# of such manifests (see https://github.com/rg3/youtube-dl/issues/15111,
# https://github.com/rg3/youtube-dl/issues/13919)
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True):
res = self._download_xml_handle(
ism_url, video_id,
note=note or 'Downloading ISM manifest',
errnote=errnote or 'Failed to download ISM manifest',
fatal=fatal)
if res is False:
return []
ism_doc, urlh = res
return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id)
def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None):
"""
Parse formats from ISM manifest.
References:
1. [MS-SSTR]: Smooth Streaming Protocol,
https://msdn.microsoft.com/en-us/library/ff469518.aspx
"""
if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None:
return []
duration = int(ism_doc.attrib['Duration'])
timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000
formats = []
for stream in ism_doc.findall('StreamIndex'):
stream_type = stream.get('Type')
if stream_type not in ('video', 'audio'):
continue
url_pattern = stream.attrib['Url']
stream_timescale = int_or_none(stream.get('TimeScale')) or timescale
stream_name = stream.get('Name')
for track in stream.findall('QualityLevel'):
fourcc = track.get('FourCC', 'AACL' if track.get('AudioTag') == '255' else None)
# TODO: add support for WVC1 and WMAP
if fourcc not in ('H264', 'AVC1', 'AACL'):
self.report_warning('%s is not a supported codec' % fourcc)
continue
tbr = int(track.attrib['Bitrate']) // 1000
# [1] does not mention Width and Height attributes. However,
# they're often present while MaxWidth and MaxHeight are
# missing, so should be used as fallbacks
width = int_or_none(track.get('MaxWidth') or track.get('Width'))
height = int_or_none(track.get('MaxHeight') or track.get('Height'))
sampling_rate = int_or_none(track.get('SamplingRate'))
track_url_pattern = re.sub(r'{[Bb]itrate}', track.attrib['Bitrate'], url_pattern)
track_url_pattern = compat_urlparse.urljoin(ism_url, track_url_pattern)
fragments = []
fragment_ctx = {
'time': 0,
}
stream_fragments = stream.findall('c')
for stream_fragment_index, stream_fragment in enumerate(stream_fragments):
fragment_ctx['time'] = int_or_none(stream_fragment.get('t')) or fragment_ctx['time']
fragment_repeat = int_or_none(stream_fragment.get('r')) or 1
fragment_ctx['duration'] = int_or_none(stream_fragment.get('d'))
if not fragment_ctx['duration']:
try:
next_fragment_time = int(stream_fragment[stream_fragment_index + 1].attrib['t'])
except IndexError:
next_fragment_time = duration
fragment_ctx['duration'] = (next_fragment_time - fragment_ctx['time']) / fragment_repeat
for _ in range(fragment_repeat):
fragments.append({
'url': re.sub(r'{start[ _]time}', compat_str(fragment_ctx['time']), track_url_pattern),
'duration': fragment_ctx['duration'] / stream_timescale,
})
fragment_ctx['time'] += fragment_ctx['duration']
format_id = []
if ism_id:
format_id.append(ism_id)
if stream_name:
format_id.append(stream_name)
format_id.append(compat_str(tbr))
formats.append({
'format_id': '-'.join(format_id),
'url': ism_url,
'manifest_url': ism_url,
'ext': 'ismv' if stream_type == 'video' else 'isma',
'width': width,
'height': height,
'tbr': tbr,
'asr': sampling_rate,
'vcodec': 'none' if stream_type == 'audio' else fourcc,
'acodec': 'none' if stream_type == 'video' else fourcc,
'protocol': 'ism',
'fragments': fragments,
'_download_params': {
'duration': duration,
'timescale': stream_timescale,
'width': width or 0,
'height': height or 0,
'fourcc': fourcc,
'codec_private_data': track.get('CodecPrivateData'),
'sampling_rate': sampling_rate,
'channels': int_or_none(track.get('Channels', 2)),
'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)),
'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)),
},
})
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8',
mpd_id=None, preference=None):
def absolute_url(item_url):
return urljoin(base_url, item_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type, type_info={}):
full_url = absolute_url(src)
ext = type_info.get('ext') or determine_ext(full_url)
if ext == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id,
preference=preference, fatal=False)
elif ext == 'mpd':
is_plain_url = False
formats = self._extract_mpd_formats(
full_url, video_id, mpd_id=mpd_id, fatal=False)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
# amp-video and amp-audio are very similar to their HTML5 counterparts
# so we wll include them right here (see
# https://www.ampproject.org/docs/reference/components/amp-video)
media_tags = [(media_tag, media_type, '')
for media_tag, media_type
in re.findall(r'(?s)(<(?:amp-)?(video|audio)[^>]*/>)', webpage)]
media_tags.extend(re.findall(
# We only allow video|audio followed by a whitespace or '>'.
# Allowing more characters may end up in significant slow down (see
# https://github.com/rg3/youtube-dl/issues/11979, example URL:
# http://www.porntrex.com/maps/videositemap.xml).
r'(?s)(<(?P<tag>(?:amp-)?(?:video|audio))(?:\s+[^>]*)?>)(.*?)</(?P=tag)>', webpage))
for media_tag, media_type, media_content in media_tags:
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
_, formats = _media_formats(src, media_type)
media_info['formats'].extend(formats)
media_info['thumbnail'] = absolute_url(media_attributes.get('poster'))
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
f = parse_content_type(source_attributes.get('type'))
is_plain_url, formats = _media_formats(src, media_type, f)
if is_plain_url:
# res attribute is not standard but seen several times
# in the wild
f.update({
'height': int_or_none(source_attributes.get('res')),
'format_id': source_attributes.get('label'),
})
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind in ('subtitles', 'captions'):
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
for f in media_info['formats']:
f.setdefault('http_headers', {})['Referer'] = base_url
if media_info['formats'] or media_info['subtitles']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
if hds_host:
f4m_url = re.sub(r'(https?://)[^/]+', r'\1' + hds_host, f4m_url)
if 'hdcore=' not in f4m_url:
f4m_url += ('&' if '?' in f4m_url else '?') + hdcore_sign
f4m_formats = self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False)
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url)
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
query = compat_urlparse.urlparse(url).query
url = re.sub(r'/(?:manifest|playlist|jwplayer)\.(?:m3u8|f4m|mpd|smil)', '', url)
mobj = re.search(
r'(?:(?:http|rtmp|rtsp)(?P<s>s)?:)?(?P<url>//[^?]+)', url)
url_base = mobj.group('url')
http_base_url = '%s%s:%s' % ('http', mobj.group('s') or '', url_base)
formats = []
def manifest_url(manifest):
m_url = '%s/%s' % (http_base_url, manifest)
if query:
m_url += '?%s' % query
return m_url
if 'm3u8' not in skip_protocols:
formats.extend(self._extract_m3u8_formats(
manifest_url('playlist.m3u8'), video_id, 'mp4',
m3u8_entry_protocol, m3u8_id='hls', fatal=False))
if 'f4m' not in skip_protocols:
formats.extend(self._extract_f4m_formats(
manifest_url('manifest.f4m'),
video_id, f4m_id='hds', fatal=False))
if 'dash' not in skip_protocols:
formats.extend(self._extract_mpd_formats(
manifest_url('manifest.mpd'),
video_id, mpd_id='dash', fatal=False))
if re.search(r'(?:/smil:|\.smil)', url_base):
if 'smil' not in skip_protocols:
rtmp_formats = self._extract_smil_formats(
manifest_url('jwplayer.smil'),
video_id, fatal=False)
for rtmp_format in rtmp_formats:
rtsp_format = rtmp_format.copy()
rtsp_format['url'] = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'url': rtsp_format['url'].replace('rtmp://', 'rtsp://'),
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'protocol': 'rtsp',
})
formats.extend([rtmp_format, rtsp_format])
else:
for protocol in ('rtmp', 'rtsp'):
if protocol not in skip_protocols:
formats.append({
'url': '%s:%s' % (protocol, url_base),
'format_id': protocol,
'protocol': protocol,
})
return formats
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
mobj = re.search(
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
webpage)
if mobj:
try:
jwplayer_data = self._parse_json(mobj.group('options'),
video_id=video_id,
transform_source=transform_source)
except ExtractorError:
pass
else:
if isinstance(jwplayer_data, dict):
return jwplayer_data
def _extract_jwplayer_data(self, webpage, video_id, *args, **kwargs):
jwplayer_data = self._find_jwplayer_data(
webpage, video_id, transform_source=js_to_json)
return self._parse_jwplayer_data(
jwplayer_data, video_id, *args, **kwargs)
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
# JWPlayer backward compatibility: flattened playlists
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
if 'playlist' not in jwplayer_data:
jwplayer_data = {'playlist': [jwplayer_data]}
entries = []
# JWPlayer backward compatibility: single playlist item
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
if not isinstance(jwplayer_data['playlist'], list):
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
for video_data in jwplayer_data['playlist']:
# JWPlayer backward compatibility: flattened sources
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
if 'sources' not in video_data:
video_data['sources'] = [video_data]
this_video_id = video_id or video_data['mediaid']
formats = self._parse_jwplayer_formats(
video_data['sources'], video_id=this_video_id, m3u8_id=m3u8_id,
mpd_id=mpd_id, rtmp_params=rtmp_params, base_url=base_url)
subtitles = {}
tracks = video_data.get('tracks')
if tracks and isinstance(tracks, list):
for track in tracks:
if not isinstance(track, dict):
continue
track_kind = track.get('kind')
if not track_kind or not isinstance(track_kind, compat_str):
continue
if track_kind.lower() not in ('captions', 'subtitles'):
continue
track_url = urljoin(base_url, track.get('file'))
if not track_url:
continue
subtitles.setdefault(track.get('label') or 'en', []).append({
'url': self._proto_relative_url(track_url)
})
entry = {
'id': this_video_id,
'title': unescapeHTML(video_data['title'] if require_title else video_data.get('title')),
'description': video_data.get('description'),
'thumbnail': urljoin(base_url, self._proto_relative_url(video_data.get('image'))),
'timestamp': int_or_none(video_data.get('pubdate')),
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
'subtitles': subtitles,
}
# https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
entry.update({
'_type': 'url_transparent',
'url': formats[0]['url'],
})
else:
self._sort_formats(formats)
entry['formats'] = formats
entries.append(entry)
if len(entries) == 1:
return entries[0]
else:
return self.playlist_result(entries)
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
urls = []
formats = []
for source in jwplayer_sources_data:
if not isinstance(source, dict):
continue
source_url = urljoin(
base_url, self._proto_relative_url(source.get('file')))
if not source_url or source_url in urls:
continue
urls.append(source_url)
source_type = source.get('type') or ''
ext = mimetype2ext(source_type) or determine_ext(source_url)
if source_type == 'hls' or ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=m3u8_id, fatal=False))
elif source_type == 'dash' or ext == 'mpd':
formats.extend(self._extract_mpd_formats(
source_url, video_id, mpd_id=mpd_id, fatal=False))
elif ext == 'smil':
formats.extend(self._extract_smil_formats(
source_url, video_id, fatal=False))
# https://github.com/jwplayer/jwplayer/blob/master/src/js/providers/default.js#L67
elif source_type.startswith('audio') or ext in (
'oga', 'aac', 'mp3', 'mpeg', 'vorbis'):
formats.append({
'url': source_url,
'vcodec': 'none',
'ext': ext,
})
else:
height = int_or_none(source.get('height'))
if height is None:
# Often no height is provided but there is a label in
# format like "1080p", "720p SD", or 1080.
height = int_or_none(self._search_regex(
r'^(\d{3,4})[pP]?(?:\b|$)', compat_str(source.get('label') or ''),
'height', default=None))
a_format = {
'url': source_url,
'width': int_or_none(source.get('width')),
'height': height,
'tbr': int_or_none(source.get('bitrate')),
'ext': ext,
}
if source_url.startswith('rtmp'):
a_format['ext'] = 'flv'
# See com/longtailvideo/jwplayer/media/RTMPMediaProvider.as
# of jwplayer.flash.swf
rtmp_url_parts = re.split(
r'((?:mp4|mp3|flv):)', source_url, 1)
if len(rtmp_url_parts) == 3:
rtmp_url, prefix, play_path = rtmp_url_parts
a_format.update({
'url': rtmp_url,
'play_path': prefix + play_path,
})
if rtmp_params:
a_format.update(rtmp_params)
formats.append(a_format)
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None, port=None,
path='/', secure=False, discard=False, rest={}, **kwargs):
cookie = compat_cookiejar.Cookie(
0, name, value, port, port is not None, domain, True,
domain.startswith('.'), path, True, secure, expire_time,
discard, None, None, rest)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
def _generic_id(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
def _generic_title(self, url):
return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0])
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning(
'%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
|
valmynd/MediaFetcher
|
src/plugins/youtube_dl/youtube_dl/extractor/common.py
|
Python
|
gpl-3.0
| 110,711
|
[
"VisIt"
] |
3220d5906779de864bfc77601946983ab0c52d90297b46e8bbbdd3aa633b3606
|
import os, sys
from shutil import copyfile
# Adds an allele to the DB
# Inputfile: (single or multiple) fasta file with gen and the allele sequence
# Source: Site or origin from where the alleles came from. PS: Must match the same name when adding ST data!!!
class AddAllele:
def __init__(self, inputfile, stfile, source):
workingdir = os.getcwd() + '/'
copyfile('/home/jonas/galaxy/tools/straintracer/GetMlst.class', workingdir + "/GetMlst.class")
os.system('java GetMlst -i %s %s' % (inputfile, source))
os.system('java GetMlst -ist %s %s' % (stfile, source))
AddAllele(sys.argv[1], sys.argv[2], sys.argv[3])
|
jeezes/Straintracer
|
AddAllele.py
|
Python
|
bsd-3-clause
| 623
|
[
"Galaxy"
] |
029b8789c2b47eb2ef83ae93d1cc30875a9dc8277129f00c751879b6cbef3c34
|
#!/usr/bin/env python
import os
import os.path
from sys import exit, argv
import json
import oauth2 as oauth
from StringIO import StringIO
from urlparse import parse_qsl
# Please don't use this key and secret if you create a new version of this script.
# You can request your own API key at https://dev.twitter.com/apps/new
# (If you fork my repo to merely submit a pull request then you don't need to change this.)
consumer_key = 'I5Qy02p5CrIXw8Sa9ohw'
consumer_secret = 'ubG7dkIS6g2cjYshXM6gtN6dSZEekKTRZMKgjYIv4'
max_tweets_per_request = 200
access_token_filepath = '~/.config/twitter-backup.py/access-token.json'
def get_access_token_from_twitter():
# Taken from https://github.com/simplegeo/python-oauth2#twitter-three-legged-oauth-example
request_token_url = 'https://api.twitter.com/oauth/request_token'
access_token_url = 'https://api.twitter.com/oauth/access_token'
authorize_url = 'https://api.twitter.com/oauth/authorize'
client = oauth.Client(consumer)
# Step 1: Get a request token. This is a temporary token that is used for
# having the user authorize an access token and to sign the request to obtain
# said access token.
resp, content = client.request(request_token_url, "GET")
if resp['status'] != '200':
raise Exception("Invalid response %s." % resp['status'])
request_token = dict(parse_qsl(content))
# Step 2: Redirect to the provider. Since this is a CLI script we do not
# redirect. In a web application you would redirect the user to the URL
# below.
print "Visit %s?oauth_token=%s" % (authorize_url, request_token['oauth_token'])
# After the user has granted access to you, the consumer, the provider will
# redirect you to whatever URL you have told them to redirect to. You can
# usually define this in the oauth_callback argument as well.
oauth_verifier = raw_input('What is the PIN? ')
# Step 3: Once the consumer has redirected the user back to the oauth_callback
# URL you can request the access token the user has approved. You use the
# request token to sign this request. After this is done you throw away the
# request token and use the access token returned. You should store this
# access token somewhere safe, like a database, for future use.
token = oauth.Token(request_token['oauth_token'], request_token['oauth_token_secret'])
token.set_verifier(oauth_verifier)
client = oauth.Client(consumer, token)
resp, content = client.request(access_token_url, "POST")
access_token = dict(parse_qsl(content))
if access_token == {}:
print 'Invalid PIN was given'
exit(1)
return access_token
def fetch_tweets(access_token, screen_name, max_id=None):
token = oauth.Token(access_token['oauth_token'], access_token['oauth_token_secret'])
client = oauth.Client(consumer, token)
screen_name = '' if screen_name==None else '&screen_name='+screen_name
max_id = '' if max_id==None else '&max_id='+str(max_id)
request_url = 'https://api.twitter.com/1.1/statuses/user_timeline.json?count=%d%s%s' % \
(max_tweets_per_request, screen_name, max_id)
response = client.request(request_url)
response_headers, response_body = response
tweets = json.load(StringIO(response_body))
return tweets
def get_earliest_tweet_id(tweets):
id = None
for tweet in tweets:
id = tweet['id']
return id
def save_tweets(json_object, filepath):
json_string = json.dumps(json_object, indent=4)
with open(filepath, 'w') as file:
file.write(json_string)
def save_access_token(token):
token_directory = os.path.dirname(get_access_token_file_path())
if not os.path.exists(token_directory):
os.makedirs(token_directory)
dumped_token = json.dumps(token)
with open(get_access_token_file_path(), 'w') as file:
file.write(dumped_token)
def load_access_token():
try:
with open(get_access_token_file_path(), 'r') as file:
access_token = json.load(file)
return access_token
except IOError:
return None
def get_access_token_file_path():
return os.path.expanduser(access_token_filepath)
def print_help():
print 'Usage: %s [SCREEN-NAME] | -h | --help' % (argv[0])
print 'Fetch the tweets of SCREEN-NAME'
print 'SCREEN-NAME is optional and defaults to the sceen name of the authorizing user'
# Main program
if len(argv) >= 2:
if argv[1] in ['-h', '--help']:
print_help()
exit(0)
else:
screen_name = argv[1]
else:
screen_name = None
consumer = oauth.Consumer(consumer_key, consumer_secret)
access_token = load_access_token()
if access_token == None:
access_token = get_access_token_from_twitter()
save_access_token(access_token)
earliest_tweet_id = None
page_number = 1
tweet_index = 0
while True:
tweets = fetch_tweets(access_token, screen_name, earliest_tweet_id)
if len(tweets) > 0:
dest_filename = '%02d.json' % (page_number)
print 'Saving tweet %d to %d as %s' % (tweet_index, tweet_index+len(tweets), dest_filename)
save_tweets(tweets, dest_filename)
earliest_tweet_id = get_earliest_tweet_id(tweets)
page_number += 1
tweet_index += len(tweets)
if len(tweets) < max_tweets_per_request:
break
|
mondalaci/twitter-backup.py
|
twitter-backup.py
|
Python
|
gpl-3.0
| 5,347
|
[
"VisIt"
] |
06228e58145f49107858d00681319442e872d02ead5d4e0c9a14a32927f67bfd
|
##
# Copyright 2016-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing ADF, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
"""
import os
import shutil
import easybuild.tools.environment as env
from easybuild.framework.easyblock import EasyBlock
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.run import run_cmd
class EB_ADF(EasyBlock):
"""Support for building/installing ADF."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for ADF."""
super(EB_ADF, self).__init__(*args, **kwargs)
self.build_in_installdir = True
def extract_step(self):
"""Extract sources."""
# strip off 'adf<version>' part to avoid having everything in a subdirectory
self.cfg['unpack_options'] = "--strip-components=1"
super(EB_ADF, self).extract_step()
def configure_step(self):
"""Custom configuration procedure for ADF."""
env.setvar('ADFHOME', self.installdir)
env.setvar('ADFBIN', os.path.join(self.installdir, 'bin'))
env.setvar('ADFRESOURCES', os.path.join(self.installdir, 'atomicdata'))
if self.cfg['license_file'] and os.path.exists(self.cfg['license_file']):
env.setvar('SCMLICENSE', self.cfg['license_file'])
else:
raise EasyBuildError("No or non-existing license file specified: %s", self.cfg['license_file'])
cmd = './Install/configure'
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def build_step(self):
"""No separate custom build procedure for ADF, see install step."""
pass
def install_step(self):
"""Custom install procedure for ADF."""
# bin/init.sh is required to build, so copy it from Install/init.sh
src_init_path = os.path.join('Install', 'init.sh')
target_init_path = os.path.join('bin', 'init.sh')
try:
shutil.copy2(src_init_path, target_init_path)
except OSError as err:
raise EasyBuildError("Failed to copy %s to %s: %s", src_init_path, target_init_path, err)
cmd = "./bin/foray -j %d" % self.cfg['parallel']
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def sanity_check_step(self):
"""Custom sanity check for ADF."""
custom_paths = {
'files': ['bin/adf'],
'dirs': ['atomicdata', 'examples'],
}
super(EB_ADF, self).sanity_check_step(custom_paths=custom_paths)
def make_module_extra(self):
"""Custom extra module file entries for ADF."""
txt = super(EB_ADF, self).make_module_extra()
txt += self.module_generator.set_environment('ADFHOME', self.installdir)
txt += self.module_generator.set_environment('ADFBIN', os.path.join(self.installdir, 'bin'))
txt += self.module_generator.set_environment('ADFRESOURCES', os.path.join(self.installdir, 'atomicdata'))
return txt
|
ULHPC/easybuild-easyblocks
|
easybuild/easyblocks/a/adf.py
|
Python
|
gpl-2.0
| 3,992
|
[
"ADF"
] |
82da38304ff24f8bdc44cf303e6869dc90ae94576fa96741ba5ec82515983484
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Option class representing a number.
"""
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from . import Option
#-------------------------------------------------------------------------
#
# NumberOption class
#
#-------------------------------------------------------------------------
class NumberOption(Option):
"""
This class describes an option that is a simple number with defined maximum
and minimum values.
"""
def __init__(self, label, value, min_val, max_val, step = 1):
"""
:param label: A friendly label to be applied to this option.
Example: "Number of generations to include"
:type label: string
:param value: An initial value for this option.
Example: 5
:type value: int
:param min: The minimum value for this option.
Example: 1
:type min: int
:param max: The maximum value for this option.
Example: 10
:type value: int
:param step: The step size for this option.
Example: 0.01
:type value: int or float
:return: nothing
"""
Option.__init__(self, label, value)
self.__min = min_val
self.__max = max_val
self.__step = step
def get_min(self):
"""
Get the minimum value for this option.
:return: an int that represents the minimum value for this option.
"""
return self.__min
def get_max(self):
"""
Get the maximum value for this option.
:return: an int that represents the maximum value for this option.
"""
return self.__max
def get_step(self):
"""
Get the step size for this option.
:return: an int that represents the step size for this option.
"""
return self.__step
|
sam-m888/gprime
|
gprime/plug/menu/_number.py
|
Python
|
gpl-2.0
| 2,802
|
[
"Brian"
] |
8be8c764e7b75fb32f81aaff49309f0a10a0df759e712f907ed94aeb281733af
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAffy(RPackage):
"""Methods for Affymetrix Oligonucleotide Arrays
The package contains functions for exploratory oligonucleotide array
analysis. The dependence on tkWidgets only concerns few convenience
functions. 'affy' is fully functional without it."""
homepage = "https://bioconductor.org/packages/affy"
git = "https://git.bioconductor.org/packages/affy.git"
version('1.68.0', commit='1664399610c9aa519399445a2ef8bb9ea2233eac')
version('1.62.0', commit='097ab4aa98a1700c5fae65d07bed44a477714605')
version('1.60.0', commit='fcae363e58b322ad53584d9e15e80fa2f9d17206')
version('1.58.0', commit='4698231f45f225228f56c0708cd477ad450b4ee6')
version('1.56.0', commit='d36a7b8f05b1ef60162d94e75037d45c48f88871')
version('1.54.0', commit='a815f02906fcf491b28ed0a356d6fce95a6bd20e')
depends_on('r@2.8.0:4.0', type=('build', 'run'), when='@:1.68.0')
depends_on('r-biocgenerics@0.1.12:', type=('build', 'run'))
depends_on('r-biobase@2.5.5:', type=('build', 'run'))
depends_on('r-affyio@1.13.3:', type=('build', 'run'))
depends_on('r-biocmanager', when='@1.60.0:', type=('build', 'run'))
depends_on('r-preprocesscore', type=('build', 'run'))
depends_on('r-zlibbioc', type=('build', 'run'))
depends_on('r-biocinstaller', when='@1.54.0:1.58.0', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-affy/package.py
|
Python
|
lgpl-2.1
| 1,581
|
[
"Bioconductor"
] |
8e24abf28de9484693f959fe2241f0ea28327af9643ff811e22c72569c336c5c
|
##
# Copyright 2009-2015 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing WRF, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import os
import re
import sys
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.netcdf import set_netcdf_env_vars # @UnresolvedImport
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import apply_regex_substitutions, patch_perl_script_autoflush
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd, run_cmd_qa
class EB_WRF(EasyBlock):
"""Support for building/installing WRF."""
def __init__(self, *args, **kwargs):
"""Add extra config options specific to WRF."""
super(EB_WRF, self).__init__(*args, **kwargs)
self.build_in_installdir = True
self.wrfsubdir = None
self.comp_fam = None
@staticmethod
def extra_options():
extra_vars = {
'buildtype': [None, "Specify the type of build (serial, smpar (OpenMP), " \
"dmpar (MPI), dm+sm (hybrid OpenMP/MPI)).", MANDATORY],
'rewriteopts': [True, "Replace -O3 with CFLAGS/FFLAGS", CUSTOM],
'runtest': [True, "Build and run WRF tests", CUSTOM],
}
return EasyBlock.extra_options(extra_vars)
def configure_step(self):
"""Configure build:
- set some magic environment variables
- run configure script
- adjust configure.wrf file if needed
"""
# define $NETCDF* for netCDF dependency (used when creating WRF module file)
set_netcdf_env_vars(self.log)
# HDF5 (optional) dependency
hdf5 = get_software_root('HDF5')
if hdf5:
# check if this is parallel HDF5
phdf5_bins = ['h5pcc', 'ph5diff']
parallel_hdf5 = True
for f in phdf5_bins:
if not os.path.exists(os.path.join(hdf5, 'bin', f)):
parallel_hdf5 = False
break
if not (hdf5 or parallel_hdf5):
raise EasyBuildError("Parallel HDF5 module not loaded?")
else:
env.setvar('PHDF5', hdf5)
else:
self.log.info("HDF5 module not loaded, assuming that's OK...")
# JasPer dependency check + setting env vars
jasper = get_software_root('JasPer')
if jasper:
jasperlibdir = os.path.join(jasper, "lib")
env.setvar('JASPERINC', os.path.join(jasper, "include"))
env.setvar('JASPERLIB', jasperlibdir)
else:
if os.getenv('JASPERINC') or os.getenv('JASPERLIB'):
raise EasyBuildError("JasPer module not loaded, but JASPERINC and/or JASPERLIB still set?")
else:
self.log.info("JasPer module not loaded, assuming that's OK...")
# enable support for large file support in netCDF
env.setvar('WRFIO_NCD_LARGE_FILE_SUPPORT', '1')
# patch arch/Config_new.pl script, so that run_cmd_qa receives all output to answer questions
patch_perl_script_autoflush(os.path.join("arch", "Config_new.pl"))
# determine build type option to look for
build_type_option = None
self.comp_fam = self.toolchain.comp_family()
if self.comp_fam == toolchain.INTELCOMP: #@UndefinedVariable
build_type_option = "Linux x86_64 i486 i586 i686, ifort compiler with icc"
elif self.comp_fam == toolchain.GCC: #@UndefinedVariable
build_type_option = "x86_64 Linux, gfortran compiler with gcc"
else:
raise EasyBuildError("Don't know how to figure out build type to select.")
# fetch selected build type (and make sure it makes sense)
known_build_types = ['serial', 'smpar', 'dmpar', 'dm+sm']
self.parallel_build_types = ["dmpar", "smpar", "dm+sm"]
bt = self.cfg['buildtype']
if not bt in known_build_types:
raise EasyBuildError("Unknown build type: '%s'. Supported build types: %s", bt, known_build_types)
# fetch option number based on build type option and selected build type
build_type_question = "\s*(?P<nr>[0-9]+).\s*%s\s*\(%s\)" % (build_type_option, bt)
# run configure script
cmd = "./configure"
qa = {
# named group in match will be used to construct answer
"Compile for nesting? (1=basic, 2=preset moves, 3=vortex following) [default 1]:": "1",
"Compile for nesting? (0=no nesting, 1=basic, 2=preset moves, 3=vortex following) [default 0]:": "0"
}
no_qa = [
"testing for fseeko and fseeko64",
r"If you wish to change the default options, edit the file:[\s\n]*arch/configure_new.defaults"
]
std_qa = {
# named group in match will be used to construct answer
r"%s.*\n(.*\n)*Enter selection\s*\[[0-9]+-[0-9]+\]\s*:" % build_type_question: "%(nr)s",
}
run_cmd_qa(cmd, qa, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True)
cfgfile = 'configure.wrf'
# make sure correct compilers are being used
comps = {
'SCC': os.getenv('CC'),
'SFC': os.getenv('F90'),
'CCOMP': os.getenv('CC'),
'DM_FC': os.getenv('MPIF90'),
'DM_CC': "%s -DMPI2_SUPPORT" % os.getenv('MPICC'),
}
regex_subs = [(r"^(%s\s*=\s*).*$" % k, r"\1 %s" % v) for (k, v) in comps.items()]
apply_regex_substitutions(cfgfile, regex_subs)
# rewrite optimization options if desired
if self.cfg['rewriteopts']:
# replace default -O3 option in configure.wrf with CFLAGS/FFLAGS from environment
self.log.info("Rewriting optimization options in %s" % cfgfile)
# set extra flags for Intel compilers
# see http://software.intel.com/en-us/forums/showthread.php?t=72109&p=1#146748
if self.comp_fam == toolchain.INTELCOMP: #@UndefinedVariable
# -O3 -heap-arrays is required to resolve compilation error
for envvar in ['CFLAGS', 'FFLAGS']:
val = os.getenv(envvar)
if '-O3' in val:
env.setvar(envvar, '%s -heap-arrays' % val)
self.log.info("Updated %s to '%s'" % (envvar, os.getenv(envvar)))
# replace -O3 with desired optimization options
regex_subs = [
(r"^(FCOPTIM.*)(\s-O3)(\s.*)$", r"\1 %s \3" % os.getenv('FFLAGS')),
(r"^(CFLAGS_LOCAL.*)(\s-O3)(\s.*)$", r"\1 %s \3" % os.getenv('CFLAGS')),
]
apply_regex_substitutions(cfgfile, regex_subs)
def build_step(self):
"""Build and install WRF and testcases using provided compile script."""
# enable parallel build
p = self.cfg['parallel']
self.par = ""
if p:
self.par = "-j %s" % p
# build wrf (compile script uses /bin/csh )
cmd = "tcsh ./compile %s wrf" % self.par
run_cmd(cmd, log_all=True, simple=True, log_output=True)
# build two testcases to produce ideal.exe and real.exe
for test in ["em_real", "em_b_wave"]:
cmd = "tcsh ./compile %s %s" % (self.par, test)
run_cmd(cmd, log_all=True, simple=True, log_output=True)
def test_step(self):
"""Build and run tests included in the WRF distribution."""
if self.cfg['runtest']:
# get list of WRF test cases
self.testcases = []
if os.path.exists('test'):
self.testcases = os.listdir('test')
elif not self.dry_run:
raise EasyBuildError("Test directory not found, failed to determine list of test cases")
# exclude 2d testcases in non-parallel WRF builds
if self.cfg['buildtype'] in self.parallel_build_types:
self.testcases = [test for test in self.testcases if not "2d_" in test]
# exclude real testcases
self.testcases = [test for test in self.testcases if not test.endswith("_real")]
self.log.debug("intermediate list of testcases: %s" % self.testcases)
# exclude tests that should not be run
for test in ["em_esmf_exp", "em_scm_xy", "nmm_tropical_cyclone"]:
if test in self.testcases:
self.testcases.remove(test)
# some tests hang when WRF is built with Intel compilers
if self.comp_fam == toolchain.INTELCOMP: #@UndefinedVariable
for test in ["em_heldsuarez"]:
if test in self.testcases:
self.testcases.remove(test)
# determine parallel setting (1/2 of available processors + 1)
n = self.cfg['parallel'] / 2 + 1
# prepare run command
# stack limit needs to be set to unlimited for WRF to work well
if self.cfg['buildtype'] in self.parallel_build_types:
test_cmd = "ulimit -s unlimited && %s && %s" % (self.toolchain.mpi_cmd_for("./ideal.exe", 1),
self.toolchain.mpi_cmd_for("./wrf.exe", n))
else:
test_cmd = "ulimit -s unlimited && ./ideal.exe && ./wrf.exe" % n
def run_test():
"""Run a single test and check for success."""
# regex to check for successful test run
re_success = re.compile("SUCCESS COMPLETE WRF")
# run test
run_cmd(test_cmd, log_all=True, simple=True)
# check for success
fn = "rsl.error.0000"
try:
f = open(fn, "r")
txt = f.read()
f.close()
except IOError, err:
raise EasyBuildError("Failed to read output file %s: %s", fn, err)
if re_success.search(txt):
self.log.info("Test %s ran successfully." % test)
else:
raise EasyBuildError("Test %s failed, pattern '%s' not found.", test, re_success.pattern)
# clean up stuff that gets in the way
fn_prefs = ["wrfinput_", "namelist.output", "wrfout_", "rsl.out.", "rsl.error."]
for f in os.listdir('.'):
for p in fn_prefs:
if f.startswith(p):
os.remove(f)
self.log.debug("Cleaned up file %s." % f)
# build an run each test case individually
for test in self.testcases:
self.log.debug("Building and running test %s" % test)
#build_and_install
cmd = "tcsh ./compile %s %s" % (self.par, test)
run_cmd(cmd, log_all=True, simple=True)
# run test
try:
os.chdir('run')
if test in ["em_fire"]:
# handle tests with subtests seperately
testdir = os.path.join("..", "test", test)
for subtest in [x for x in os.listdir(testdir) if os.path.isdir(x)]:
subtestdir = os.path.join(testdir, subtest)
# link required files
for f in os.listdir(subtestdir):
if os.path.exists(f):
os.remove(f)
os.symlink(os.path.join(subtestdir, f), f)
# run test
run_test()
else:
# run test
run_test()
os.chdir('..')
except OSError, err:
raise EasyBuildError("An error occured when running test %s: %s", test, err)
# building/installing is done in build_step, so we can run tests
def install_step(self):
"""Building was done in install dir, so nothing to do in install_step."""
pass
def sanity_check_step(self):
"""Custom sanity check for WRF."""
mainver = self.version.split('.')[0]
self.wrfsubdir = "WRFV%s" % mainver
fs = ["libwrflib.a", "wrf.exe", "ideal.exe", "real.exe", "ndown.exe", "nup.exe", "tc.exe"]
ds = ["main", "run"]
custom_paths = {
'files': [os.path.join(self.wrfsubdir, "main", x) for x in fs],
'dirs': [os.path.join(self.wrfsubdir, x) for x in ds]
}
super(EB_WRF, self).sanity_check_step(custom_paths=custom_paths)
def make_module_req_guess(self):
mainver = self.version.split('.')[0]
self.wrfsubdir = "WRFV%s"%mainver
maindir = os.path.join(self.wrfsubdir, "main")
return {
'PATH': [maindir],
'LD_LIBRARY_PATH': [maindir],
'MANPATH': [],
}
def make_module_extra(self):
"""Add netCDF environment variables to module file."""
txt = super(EB_WRF, self).make_module_extra()
for netcdf_var in ['NETCDF', 'NETCDFF']:
if os.getenv(netcdf_var) is not None:
txt += self.module_generator.set_environment(netcdf_var, os.getenv(netcdf_var))
return txt
|
valtandor/easybuild-easyblocks
|
easybuild/easyblocks/w/wrf.py
|
Python
|
gpl-2.0
| 14,991
|
[
"NetCDF"
] |
36230f3f111da17defce539e6144c37af4a12b6b7dd456afefc3a0871aae0286
|
from aces.materials.POSCAR import structure as Material
class structure(Material):
def getPOSCAR(self):
return """ACES POSCAR
1.00000000000000
4.3715553598745540 0.0000000000000000 0.0000000000000000
0.0000000000000000 4.2918241965252291 0.0000000000000000
0.0000000000000000 0.0000000000000000 22.3387621007543906
Sn Se
2 2
Direct
0.0214617708073111 0.2500000000000000 0.4384455815054507
0.5214617708069064 0.7500000000000000 0.5615544184945497
0.4785382291930940 0.7500000000000000 0.4399051558263891
0.9785382291930936 0.2500000000000000 0.5600948441736112
"""
def csetup(self):
from ase.dft.kpoints import ibz_points
self.bandpoints=ibz_points['orthorhombic']
self.bandpoints['T']=self.bandpoints['S']
self.bandpath=['Gamma','Y','T','X','Gamma']
|
vanceeasleaf/aces
|
aces/materials/SnSe_monolayer_minimize.py
|
Python
|
gpl-2.0
| 878
|
[
"ASE"
] |
315380143e6d450c1a6fe07a40b7594ce47a0898b669cbf45e746abb617aa656
|
"""
Stack RAVE spectra from repeat visits.
"""
import cPickle as pickle
import os
import numpy as np
from astropy.table import Table
parent_spectrum_dir = "/data/gaia-eso/arc/rave/pre-normalized-spectra-with-correct-errors"
stacked_spectrum_dir = os.path.join(parent_spectrum_dir, "stacked-spectra")
if not os.path.exists(stacked_spectrum_dir):
os.mkdir(stacked_spectrum_dir)
dr5 = Table.read("/data/gaia-eso/arc/rave-data-files/rave-dr5-positions.fits")
dr5 = dr5.filled()
def get_spectrum_path(rave_obs_id):
date, field, fibre = rave_obs_id.split("_")
year = date[:4]
return os.path.join(parent_spectrum_dir, year, date,
"{0}.rvsun.{1}.pkl".format(field, fibre.strip()))
for group in dr5.group_by("GroupID").groups:
if group["GroupID"][0] < 0 or group["GroupSize"][0] < 2: continue
group_id = group["GroupID"][0]
flux = []
ivar = []
subset = np.ones(len(group), dtype=bool)
for i, visit in enumerate(group):
spectrum_path = get_spectrum_path(visit["RAVE_OBS_ID"])
if not os.path.exists(spectrum_path):
print("Could not find {} in group {}".format(spectrum_path, group_id))
subset[i] = False
raise WTFError
continue
with open(spectrum_path, "rb") as fp:
visit_flux, visit_ivar = pickle.load(fp)
flux.append(visit_flux)
ivar.append(visit_ivar)
flux = np.array(flux)
ivar = np.array(ivar)
if flux.shape[0] < 2:
print("Skipping group {} because only not enough spectra found".format(
group_id))
continue
# Produce a stacked spectrum.
stacked_ivar = np.sum(ivar, axis=0)
stacked_flux = np.sum(flux * ivar, axis=0)/stacked_ivar
assert np.any(np.isfinite(stacked_flux))
if not np.all(np.isfinite(stacked_ivar)):
print("Warning: {} pixels in {} had non-finite inverse variance".format(
np.sum(~np.isfinite(stacked_ivar)), group["RAVEID"][0]))
stacked_ivar[~np.isfinite(stacked_ivar)] = 0
assert np.all(np.isfinite(stacked_ivar))
stacked_spectrum_path = os.path.join(
stacked_spectrum_dir, "{}.pkl".format(group["RAVEID"][0].strip()))
with open(stacked_spectrum_path, "wb") as fp:
pickle.dump((stacked_flux, stacked_ivar), fp, -1)
print("Created {}".format(stacked_spectrum_path))
|
AnnieJumpCannon/RAVE
|
stack_repeat_visit_spectra.py
|
Python
|
mit
| 2,369
|
[
"VisIt"
] |
4654ef5afa450d13e888b0ddb8809476d6c692d087adaa5c044c390c68e86315
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from ..gloo import Texture2D, VertexBuffer
from ..color import get_colormap
from .shaders import Function, FunctionChain
from .transforms import NullTransform
from .visual import Visual
from ..ext.six import string_types
from ..io import load_spatial_filters
VERT_SHADER = """
uniform int method; // 0=subdivide, 1=impostor
attribute vec2 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main() {
v_texcoord = a_texcoord;
gl_Position = $transform(vec4(a_position, 0., 1.));
}
"""
FRAG_SHADER = """
uniform vec2 image_size;
uniform int method; // 0=subdivide, 1=impostor
uniform sampler2D u_texture;
varying vec2 v_texcoord;
vec4 map_local_to_tex(vec4 x) {
// Cast ray from 3D viewport to surface of image
// (if $transform does not affect z values, then this
// can be optimized as simply $transform.map(x) )
vec4 p1 = $transform(x);
vec4 p2 = $transform(x + vec4(0, 0, 0.5, 0));
p1 /= p1.w;
p2 /= p2.w;
vec4 d = p2 - p1;
float f = p2.z / d.z;
vec4 p3 = p2 - d * f;
// finally map local to texture coords
return vec4(p3.xy / image_size, 0, 1);
}
void main()
{
vec2 texcoord;
if( method == 0 ) {
texcoord = v_texcoord;
}
else {
// vertex shader ouptuts clip coordinates;
// fragment shader maps to texture coordinates
texcoord = map_local_to_tex(vec4(v_texcoord, 0, 1)).xy;
}
gl_FragColor = $color_transform($get_data(texcoord));
}
""" # noqa
_null_color_transform = 'vec4 pass(vec4 color) { return color; }'
_c2l = 'float cmap(vec4 color) { return (color.r + color.g + color.b) / 3.; }'
_interpolation_template = """
#include "misc/spatial-filters.frag"
vec4 texture_lookup_filtered(vec2 texcoord) {
if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
texcoord.y < 0.0 || texcoord.y > 1.0) {
discard;
}
return %s($texture, $shape, texcoord);
}"""
_texture_lookup = """
vec4 texture_lookup(vec2 texcoord) {
if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
texcoord.y < 0.0 || texcoord.y > 1.0) {
discard;
}
return texture2D($texture, texcoord);
}"""
class ImageVisual(Visual):
"""Visual subclass displaying an image.
Parameters
----------
data : ndarray
ImageVisual data. Can be shape (M, N), (M, N, 3), or (M, N, 4).
method : str
Selects method of rendering image in case of non-linear transforms.
Each method produces similar results, but may trade efficiency
and accuracy. If the transform is linear, this parameter is ignored
and a single quad is drawn around the area of the image.
* 'auto': Automatically select 'impostor' if the image is drawn
with a nonlinear transform; otherwise select 'subdivide'.
* 'subdivide': ImageVisual is represented as a grid of triangles
with texture coordinates linearly mapped.
* 'impostor': ImageVisual is represented as a quad covering the
entire view, with texture coordinates determined by the
transform. This produces the best transformation results, but may
be slow.
grid: tuple (rows, cols)
If method='subdivide', this tuple determines the number of rows and
columns in the image grid.
cmap : str | ColorMap
Colormap to use for luminance images.
clim : str | tuple
Limits to use for the colormap. Can be 'auto' to auto-set bounds to
the min and max of the data.
interpolation : str
Selects method of image interpolation. Makes use of the two Texture2D
interpolation methods and the available interpolation methods defined
in vispy/gloo/glsl/misc/spatial_filters.frag
* 'nearest': Default, uses 'nearest' with Texture2D interpolation.
* 'bilinear': uses 'linear' with Texture2D interpolation.
* 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'bicubic',
'catrom', 'mitchell', 'spline16', 'spline36', 'gaussian',
'bessel', 'sinc', 'lanczos', 'blackman'
**kwargs : dict
Keyword arguments to pass to `Visual`.
Notes
-----
The colormap functionality through ``cmap`` and ``clim`` are only used
if the data are 2D.
"""
def __init__(self, data=None, method='auto', grid=(1, 1),
cmap='viridis', clim='auto',
interpolation='nearest', **kwargs):
self._data = None
# load 'float packed rgba8' interpolation kernel
# to load float interpolation kernel use
# `load_spatial_filters(packed=False)`
kernel, self._interpolation_names = load_spatial_filters()
self._kerneltex = Texture2D(kernel, interpolation='nearest')
# The unpacking can be debugged by changing "spatial-filters.frag"
# to have the "unpack" function just return the .r component. That
# combined with using the below as the _kerneltex allows debugging
# of the pipeline
# self._kerneltex = Texture2D(kernel, interpolation='linear',
# internalformat='r32f')
# create interpolation shader functions for available
# interpolations
fun = [Function(_interpolation_template % n)
for n in self._interpolation_names]
self._interpolation_names = [n.lower()
for n in self._interpolation_names]
self._interpolation_fun = dict(zip(self._interpolation_names, fun))
self._interpolation_names.sort()
self._interpolation_names = tuple(self._interpolation_names)
# overwrite "nearest" and "bilinear" spatial-filters
# with "hardware" interpolation _data_lookup_fn
self._interpolation_fun['nearest'] = Function(_texture_lookup)
self._interpolation_fun['bilinear'] = Function(_texture_lookup)
if interpolation not in self._interpolation_names:
raise ValueError("interpolation must be one of %s" %
', '.join(self._interpolation_names))
self._interpolation = interpolation
# check texture interpolation
if self._interpolation == 'bilinear':
texture_interpolation = 'linear'
else:
texture_interpolation = 'nearest'
self._method = method
self._grid = grid
self._need_texture_upload = True
self._need_vertex_update = True
self._need_colortransform_update = True
self._need_interpolation_update = True
self._texture = Texture2D(np.zeros((1, 1, 4)),
interpolation=texture_interpolation)
self._subdiv_position = VertexBuffer()
self._subdiv_texcoord = VertexBuffer()
# impostor quad covers entire viewport
vertices = np.array([[-1, -1], [1, -1], [1, 1],
[-1, -1], [1, 1], [-1, 1]],
dtype=np.float32)
self._impostor_coords = VertexBuffer(vertices)
self._null_tr = NullTransform()
self._init_view(self)
super(ImageVisual, self).__init__(vcode=VERT_SHADER, fcode=FRAG_SHADER)
self.set_gl_state('translucent', cull_face=False)
self._draw_mode = 'triangles'
# define _data_lookup_fn as None, will be setup in
# self._build_interpolation()
self._data_lookup_fn = None
self.clim = clim
self.cmap = cmap
if data is not None:
self.set_data(data)
self.freeze()
def set_data(self, image):
"""Set the data
Parameters
----------
image : array-like
The image data.
"""
data = np.asarray(image)
if self._data is None or self._data.shape != data.shape:
self._need_vertex_update = True
self._data = data
self._need_texture_upload = True
def view(self):
v = Visual.view(self)
self._init_view(v)
return v
def _init_view(self, view):
# Store some extra variables per-view
view._need_method_update = True
view._method_used = None
@property
def clim(self):
return (self._clim if isinstance(self._clim, string_types) else
tuple(self._clim))
@clim.setter
def clim(self, clim):
if isinstance(clim, string_types):
if clim != 'auto':
raise ValueError('clim must be "auto" if a string')
else:
clim = np.array(clim, float)
if clim.shape != (2,):
raise ValueError('clim must have two elements')
self._clim = clim
self._need_vertex_update = True
self.update()
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, cmap):
self._cmap = get_colormap(cmap)
self._need_colortransform_update = True
self.update()
@property
def method(self):
return self._method
@method.setter
def method(self, m):
if self._method != m:
self._method = m
self._need_vertex_update = True
self.update()
@property
def size(self):
return self._data.shape[:2][::-1]
@property
def interpolation(self):
return self._interpolation
@interpolation.setter
def interpolation(self, i):
if i not in self._interpolation_names:
raise ValueError("interpolation must be one of %s" %
', '.join(self._interpolation_names))
if self._interpolation != i:
self._interpolation = i
self._need_interpolation_update = True
self.update()
@property
def interpolation_functions(self):
return self._interpolation_names
# The interpolation code could be transferred to a dedicated filter
# function in visuals/filters as discussed in #1051
def _build_interpolation(self):
"""Rebuild the _data_lookup_fn using different interpolations within
the shader
"""
interpolation = self._interpolation
self._data_lookup_fn = self._interpolation_fun[interpolation]
self.shared_program.frag['get_data'] = self._data_lookup_fn
# only 'bilinear' uses 'linear' texture interpolation
if interpolation == 'bilinear':
texture_interpolation = 'linear'
else:
# 'nearest' (and also 'bilinear') doesn't use spatial_filters.frag
# so u_kernel and shape setting is skipped
texture_interpolation = 'nearest'
if interpolation != 'nearest':
self.shared_program['u_kernel'] = self._kerneltex
self._data_lookup_fn['shape'] = self._data.shape[:2][::-1]
if self._texture.interpolation != texture_interpolation:
self._texture.interpolation = texture_interpolation
self._data_lookup_fn['texture'] = self._texture
self._need_interpolation_update = False
def _build_vertex_data(self):
"""Rebuild the vertex buffers used for rendering the image when using
the subdivide method.
"""
grid = self._grid
w = 1.0 / grid[1]
h = 1.0 / grid[0]
quad = np.array([[0, 0, 0], [w, 0, 0], [w, h, 0],
[0, 0, 0], [w, h, 0], [0, h, 0]],
dtype=np.float32)
quads = np.empty((grid[1], grid[0], 6, 3), dtype=np.float32)
quads[:] = quad
mgrid = np.mgrid[0.:grid[1], 0.:grid[0]].transpose(1, 2, 0)
mgrid = mgrid[:, :, np.newaxis, :]
mgrid[..., 0] *= w
mgrid[..., 1] *= h
quads[..., :2] += mgrid
tex_coords = quads.reshape(grid[1]*grid[0]*6, 3)
tex_coords = np.ascontiguousarray(tex_coords[:, :2])
vertices = tex_coords * self.size
self._subdiv_position.set_data(vertices.astype('float32'))
self._subdiv_texcoord.set_data(tex_coords.astype('float32'))
def _update_method(self, view):
"""Decide which method to use for *view* and configure it accordingly.
"""
method = self._method
if method == 'auto':
if view.transforms.get_transform().Linear:
method = 'subdivide'
else:
method = 'impostor'
view._method_used = method
if method == 'subdivide':
view.view_program['method'] = 0
view.view_program['a_position'] = self._subdiv_position
view.view_program['a_texcoord'] = self._subdiv_texcoord
elif method == 'impostor':
view.view_program['method'] = 1
view.view_program['a_position'] = self._impostor_coords
view.view_program['a_texcoord'] = self._impostor_coords
else:
raise ValueError("Unknown image draw method '%s'" % method)
self.shared_program['image_size'] = self.size
view._need_method_update = False
self._prepare_transforms(view)
def _build_color_transform(self):
data = self._data
if data.ndim == 2 or data.shape[2] == 1:
fun = FunctionChain(None, [Function(_c2l),
Function(self._cmap.glsl_map)])
else:
fun = Function(_null_color_transform)
self.shared_program.frag['color_transform'] = fun
self._need_colortransform_update = False
def _build_texture(self):
data = self._data
if data.dtype == np.float64:
data = data.astype(np.float32)
if data.ndim == 2 or data.shape[2] == 1:
# deal with clim on CPU b/c of texture depth limits :(
# can eventually do this by simulating 32-bit float... maybe
clim = self._clim
if isinstance(clim, string_types) and clim == 'auto':
clim = np.min(data), np.max(data)
clim = np.asarray(clim, dtype=np.float32)
data = data - clim[0] # not inplace so we don't modify orig data
if clim[1] - clim[0] > 0:
data /= clim[1] - clim[0]
else:
data[:] = 1 if data[0, 0] != 0 else 0
self._clim = np.array(clim)
self._texture.set_data(data)
self._need_texture_upload = False
def _compute_bounds(self, axis, view):
if axis > 1:
return (0, 0)
else:
return (0, self.size[axis])
def _prepare_transforms(self, view):
trs = view.transforms
prg = view.view_program
method = view._method_used
if method == 'subdivide':
prg.vert['transform'] = trs.get_transform()
prg.frag['transform'] = self._null_tr
else:
prg.vert['transform'] = self._null_tr
prg.frag['transform'] = trs.get_transform().inverse
def _prepare_draw(self, view):
if self._data is None:
return False
if self._need_interpolation_update:
self._build_interpolation()
if self._need_texture_upload:
self._build_texture()
if self._need_colortransform_update:
self._build_color_transform()
if self._need_vertex_update:
self._build_vertex_data()
if view._need_method_update:
self._update_method(view)
|
kkuunnddaannkk/vispy
|
vispy/visuals/image.py
|
Python
|
bsd-3-clause
| 15,670
|
[
"Gaussian"
] |
ed6807ab4cd5578cff4e60900e3bdc66dd918cd50d2ecbf898b7c4d89365bb2e
|
#!/usr/bin/python3
import re
import os
import copy
import time
import yaml
import logging
import numpy as np
import pandas as pd
import camoco as co
import networkx as nx
from math import isinf
from itertools import chain
from flask import Flask, url_for, jsonify, request, send_from_directory, abort
print('Loading Camoco...')
# Take a huge swig from the flask
app = Flask(__name__, static_folder=None)
# Try Importing GWS
try:
from genewordsearch.Classes import WordFreq
from genewordsearch.GeneWordSearch import geneWords
from genewordsearch.DBBuilder import geneWordBuilder
from genewordsearch.GeneWordSearch import geneWordSearch
hasGWS = True
except ImportError:
hasGWS = False
# ----------------------------------------
# Parse configuration from environment
# ----------------------------------------
# Get the config object
conf = yaml.safe_load(os.getenv('COB_CONF'))
dflt = conf['defaults']
# Folder with annotation files
os.makedirs(conf['scratch'], exist_ok=True)
if hasGWS:
os.environ['GWS_STORE'] = conf['scratch']
# Folder for bundle files
static_bundle_dir = os.path.join(conf['scratch'], 'static')
os.makedirs(static_bundle_dir, exist_ok=True)
# Max number of genes for custom queries
geneLimit = {'min': 1, 'max': 150}
# Option Limits
opts = {
'nodeCutoff': {
'title': 'Min Node Degree',
'default': dflt['nodeCutoff'],
'min': 0,
'max': 20,
'int': True
},
'edgeCutoff': {
'title': 'Min Edge Score',
'default': dflt['edgeCutoff'],
'min': 1.0,
'max': 20.0,
'int': False
},
'fdrCutoff': {
'title': 'FDR Filter (Term)',
'default': dflt['fdrCutoff'],
'min': 0.0,
'max': 5.0,
'int': False
},
'windowSize': {
'title': 'Window Size (Term)',
'default': dflt['windowSize'],
'min': 0,
'max': 1000000,
'int': True
},
'flankLimit': {
'title': 'Flank Limit (Term)',
'default': dflt['flankLimit'],
'min': 0,
'max': 20,
'int': True
},
'visNeighbors': {
'title': 'Vis Neighbors (Custom)',
'default': dflt['visNeighbors'],
'min': 0,
'max': 150,
'int': True
},
'nodeSize': {
'title': 'Node Size',
'default': dflt['nodeSize'],
'min': 5,
'max': 50,
'int': True
},
'pCutoff': {
'title': 'Probability Cutoff',
'default': dflt['pCutoff'],
'min': 0.0,
'max': 1.0,
'int': False
},
'minTerm': {
'title': 'Min Genes (GO)',
'default': dflt['minTerm'],
'min': 1,
'max': 99,
'int': True
},
'maxTerm': {
'title': 'Max Genes (GO)',
'default': dflt['maxTerm'],
'min': 100,
'max': 1000,
'int': True
},
}
binOpts = {
'overlapMethod': {
'default': dflt['overlapMethod'],
'state': dflt['overlapMethod'],
'isBool': False
},
'overlapSNPs': {
'default': dflt['overlapSNPs'],
'state': dflt['overlapSNPs'],
'isBool': False
},
'logSpacing': {
'default': dflt['logSpacing'],
'state': dflt['logSpacing'],
'isBool': True
},
'hpo': {
'default': dflt['hpo'],
'state': dflt['hpo'],
'isBool': True
},
'visEnrich': {
'default': dflt['visEnrich'],
'state': dflt['visEnrich'],
'isBool': True
},
}
# Enumerate the JS files
js_files = [
# load external libraries
'lib/jquery-3.3.1.min.js', 'lib/jquery.textcomplete-1.8.1.min.js',
'lib/bootstrap-3.3.7.min.js', 'lib/datatables-1.10.18.min.js',
'lib/qtip-3.0.3.min.js', 'lib/download-1.4.5.min.js',
'lib/cytoscape-3.4.0.min.js', 'lib/cytoscape-qtip-2.7.1.js',
'lib/cytoscape-graphml-1.0.5.js', 'lib/filepond.js',
# load cob libraries
'core.js', 'genes.js', 'graph.js',
'polywas-layout.js', 'enrichment.js', 'tools.js', 'tables.js', 'cob.js'
]
# Enumerate the CSS files
css_files = [
'lib/bootstrap-3.3.7.min.css', 'lib/datatables-1.10.18.min.css',
'lib/qtip-3.0.3.min.css', 'lib/filepond.css',
'cob.css'
]
# Function to handle bundling the files
def bundle_files(files, type, static_bundle_dir=static_bundle_dir):
os.makedirs(os.path.join(static_bundle_dir, type), exist_ok=True)
with open(os.path.join(static_bundle_dir, type, 'bundle.' + type),
'w') as bundle:
for fn in files:
with open(os.path.join(app.root_path, 'static', type, fn)) as fd:
bundle.write(fd.read())
bundle.write('\n')
# Actually bundle them
bundle_files(js_files, 'js')
bundle_files(css_files, 'css')
# ----------------------------------------
# Load things to memeory to prepare
# ----------------------------------------
# Generate network list based on allowed list
print('Preloading networks into memory...')
if len(conf['networks']) < 1:
conf['networks'] = list(co.Tools.available_datasets('Expr')['Name'].values)
networks = {x: co.COB(x) for x in conf['networks']}
network_info = []
refLinks = {}
for name, net in networks.items():
network_info.append({
'name': net.name,
'refgen': net._global('parent_refgen'),
'desc': net.description,
})
if net._global('parent_refgen') in conf['refLinks']:
refLinks[net.name] = conf['refLinks'][net._global('parent_refgen')]
print('Availible Networks: ' + str(networks))
# Generate ontology list based on allowed list and load them into memory
print('Preloading GWASes into Memory...')
if len(conf['gwas']) < 1:
conf['gwas'] = list(co.Tools.available_datasets('GWAS')['Name'].values)
onts = {x: co.GWAS(x) for x in conf['gwas']}
onts_info = {}
for m, net in networks.items():
ref = net._global('parent_refgen')
onts_info[net.name] = []
for n, ont in onts.items():
if ont.refgen.name == ref:
onts_info[net.name].append({
'name': ont.name,
'refgen': ont.refgen.name,
'desc': ont.description
})
print('Availible GWASes: ' + str(onts_info))
# Prefetch the gene names for all the networks
print('Fetching gene names for networks...')
network_genes = {}
for name, net in networks.items():
ids = list(net._expr.index.values)
als = co.RefGen(net._global('parent_refgen')).aliases(ids)
for k, v in als.items():
ids += v
network_genes[name] = list(set(ids))
print('Found gene names')
# Find all of the GWAS data we have available
print('Finding GWAS Data...')
gwas_data_db = {}
for gwas in co.Tools.available_datasets('Overlap')['Name']:
print("Loading {}".format(gwas))
gwas_data_db[gwas] = co.Overlap(gwas)
# Find the available window sizes and flank limits for each GWAS/COB combo
print('Finding GWAS Metadata...')
gwas_meta_db = {}
for ont in gwas_data_db.keys():
gwas_meta_db[ont] = {}
for net in gwas_data_db[ont].results['COB'].unique():
gwas_meta_db[ont][net] = {}
gwas = gwas_data_db[ont].results[gwas_data_db[ont].results['COB'] ==
net]
gwas_meta_db[ont][net]['windowSize'] = []
gwas_meta_db[ont][net]['flankLimit'] = []
gwas_meta_db[ont][net]['overlapSNPs'] = []
gwas_meta_db[ont][net]['overlapMethod'] = []
for x in gwas['WindowSize'].unique():
gwas_meta_db[ont][net]['windowSize'].append(int(x))
for x in gwas['FlankLimit'].unique():
gwas_meta_db[ont][net]['flankLimit'].append(int(x))
for x in gwas['SNP2Gene'].unique():
gwas_meta_db[ont][net]['overlapSNPs'].append(
str(x).strip().lower())
for x in gwas['Method'].unique():
gwas_meta_db[ont][net]['overlapMethod'].append(
str(x).strip().lower())
# Find any functional annotations we have
print('Finding functional annotations...')
func_data_db = {}
for ref in co.Tools.available_datasets('RefGen')['Name']:
refgen = co.RefGen(ref)
if refgen.has_annotations():
print('Processing annotations for {}...'.format(ref))
func_data_db[ref] = refgen
func_data_db[ref].export_annotations(
os.path.join(conf['scratch'], (ref + '.tsv')))
if hasGWS:
geneWordBuilder(ref,
[os.path.join(conf['scratch'], (ref + '.tsv'))],
[1], ['2 end'], ['tab'], [True])
# Find any GO ontologies we have for the networks we have
print('Finding applicable GO Ontologies...')
GOnt_db = {}
for name in co.Tools.available_datasets('GOnt')['Name']:
gont = co.GOnt(name)
if gont.refgen.name not in GOnt_db:
GOnt_db[gont.refgen.name] = gont
# Generate in memory term lists
print('Finding all available terms...')
terms = {}
for name, ont in onts.items():
terms[name] = []
for term in ont.iter_terms():
terms[name].append({
'name':
term.id,
'desc':
term.desc,
'snps':
len(term.loci),
'genes':
len(
ont.refgen.candidate_genes(
term.effective_loci(window_size=50000)))
})
# ---------------------------------------------
# Final Setup
# ---------------------------------------------
handler = logging.FileHandler(os.path.join(conf['scratch'], 'COBErrors.log'))
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
app.logger.setLevel(logging.INFO)
print('All Ready!')
# ---------------------------------------------
# Routes
# ---------------------------------------------
@app.route('/')
# Sends off the homepage
def index():
return send_from_directory('templates', 'index.html')
@app.route('/defaults')
# Sends the default values in JSON format
def defaults():
return jsonify({
'opts': opts,
'fdrFilter': conf['defaults']['fdrFilter'],
'hpo': conf['defaults']['hpo'],
'logSpacing': conf['defaults']['logSpacing'],
'visEnrich': conf['defaults']['visEnrich'],
'refLinks': refLinks,
'binOpts': binOpts,
})
@app.route('/static/<path:path>')
# Sends off the js and such when needed
def send_static(path):
extension = path.split('.')[-1].strip()
if (extension == 'js'):
if conf['dev']:
print('Rebundling js files')
bundle_files(js_files, 'js')
return send_from_directory(static_bundle_dir, path)
elif (extension == 'css'):
if conf['dev']:
print('Rebundling css files')
bundle_files(css_files, 'css')
return send_from_directory(static_bundle_dir, path)
else:
return send_from_directory('static', path)
@app.route("/available_datasets/<path:type>")
# Route for sending the avalible datasets in a general fashion
def available_datasets(type=None, *args):
# Find the datasets
if (type == None):
datasets = co.Tools.available_datasets()
else:
datasets = co.Tools.available_datasets(type)
# Return the results in a table friendly format
return jsonify({
"data":
list(datasets[['Name', 'Description']].itertuples(index=False))
})
@app.route("/available_networks")
# Route for sending the available networks
def available_networks():
return jsonify({'data': network_info})
@app.route("/available_ontologies/<path:network>")
# Route for sending the available ontologies relevant to a network
def available_ontologies(network):
return jsonify({'data': onts_info[network]})
@app.route("/available_terms/<path:network>/<path:ontology>")
# Route for sending the available terms
def available_terms(network, ontology):
return jsonify({'data': terms[ontology]})
@app.route("/available_genes/<path:network>")
# Route for sending available gene names in the network
def available_genes(network):
return jsonify({'geneIDs': network_genes[network]})
@app.route("/fdr_options/<path:network>/<path:ontology>")
# Route for getting FDR availablity data
def fdr_options(network, ontology):
# Default to empty list
ans = {
'windowSize': [],
'flankLimit': [],
'overlapSNPs': [],
'overlapMethod': []
}
# If the combo is in the db, use that as answer
if ontology in gwas_meta_db:
if network in gwas_meta_db[ontology]:
ans = gwas_meta_db[ontology][network]
# Return it in JSON
return jsonify(ans)
@app.route("/term_network_stats",methods=['POST'])
def term_network_stats():
'''
Mimics the term_network method and returns
subnetwork stats as calculated by networkx
'''
return '{"node_conn": "{\"0\":{\"avg_node_connectivity\":0.0096599814}}", "pageranks:": "{\"0\":{\"GRMZM2G032214\":0.0076241322,\"GRMZM2G105770\":0.0076241322,\"GRMZM2G027821\":0.0083732336,\"GRMZM2G113626\":0.0098714363,\"GRMZM2G032209\":0.0106205377,\"GRMZM2G100639\":0.0106205377,\"GRMZM2G059865\":0.011369639,\"GRMZM2G009265\":0.0136169431,\"GRMZM2G086934\":0.0143660445,\"GRMZM2G026346\":0.0188606526}}"}'
#import time
#start = time.time()
## Get data from the form and derive some stuff
#cob = networks[str(request.form['network'])]
#ontology = onts[str(request.form['ontology'])]
#term = str(request.form['term'])
#nodeCutoff = safeOpts('nodeCutoff', request.form['nodeCutoff'])
#edgeCutoff = safeOpts('edgeCutoff', request.form['edgeCutoff'])
#windowSize = safeOpts('windowSize', request.form['windowSize'])
#flankLimit = safeOpts('flankLimit', request.form['flankLimit'])
#hpo = (request.form['hpo'].lower().strip() == 'true')
#strongestSNPs = (
# request.form['overlapSNPs'].lower().strip() == 'strongest')
#overlapDensity = (
# request.form['overlapMethod'].lower().strip() == 'density')
## Detrmine if there is a FDR cutoff or not
#try:
# float(request.form['fdrCutoff'])
#except ValueError:
# fdrCutoff = None
#else:
# fdrCutoff = safeOpts('fdrCutoff', float(request.form['fdrCutoff']))
## Get the candidates
#cob.set_sig_edge_zscore(edgeCutoff)
## Check to see if Genes are HPO
#if hpo:
# genes = cob.refgen[gwas_data_db[
# ontology.name].high_priority_candidates().query(
# 'COB=="{}" and Ontology == "{}" and Term == "{}"'.format(
# cob.name, ontology.name, term)).gene.unique()]
#else:
# # Get candidates based on options
# if (strongestSNPs):
# try:
# loci = ontology[term].strongest_loci(
# window_size=windowSize,
# attr=ontology.get_strongest_attr(),
# lowest=ontology.get_strongest_higher())
# except KeyError:
# loci = ontology[term].effective_loci(window_size=windowSize)
# else:
# loci = ontology[term].effective_loci(window_size=windowSize)
# # Find the genes
# genes = cob.refgen.candidate_genes(
# loci,
# window_size=windowSize,
# flank_limit=flankLimit,
# chain=True,
# include_parent_locus=True,
# #include_parent_attrs=['numIterations', 'avgEffectSize'],
# include_num_intervening=True,
# include_rank_intervening=True,
# include_num_siblings=True
# )
#edges = cob.subnetwork(genes).reset_index()
#def df_to_list(df):
# edgelist = []
# for k,v in df.iterrows():
# str_rep = v['gene_a'] + ' ' + v['gene_b'] + "{'score':" + str(v['score']) + "}"
# edgelist.append(str_rep)
# return edgelist
#edgelist = df_to_list(edges)
#edge_graph = nx.parse_edgelist(edgelist)
#print("I'm Running")
#print(nx.info(edge_graph))
#components = nx.connected_components(edge_graph)
#triadic_closure = nx.transitivity(edge_graph)
#print("Triadic closure: (transensitivity)", triadic_closure)
#print(str("largest Compoent: "), max(components, key=len))
#import operator
##g is nx graph object
#x=nx.pagerank(edge_graph, weight='score')
##sort pageranks
#sorted_x = sorted(x.items(), key=operator.itemgetter(1))
##Get avg node connectivity
#node_conn=pd.DataFrame(pd.Series(nx.average_node_connectivity(edge_graph), index=["avg_node_connectivity",]))
#print(node_conn)
##get top ten
#Sub_Net_stats = pd.DataFrame([i[1] for i in sorted_x[-10:]], index=[j[0] for j in sorted_x[-10:]])
#print(str("Page Rank:") + str(Sub_Net_stats))
##Avg_Connectivity = pd.DataFrame(nx.average_node_connectivity(edge_graph))
##return jsonify({"AvgConnectivity":str(nx.average_node_connectivity(edge_graph))})
#datatbl = {"node_conn":node_conn.to_json(), "pageranks:":Sub_Net_stats.to_json()}
#print(datatbl)
#print(time.time() - start)
#print(str("Done with Stats for Now!"))
#import json
#return json.dumps(datatbl)
@app.route("/term_network", methods=['POST'])
# Route for sending the CoEx Network Data for graphing from prebuilt term
def term_network():
# Get data from the form and derive some stuff
cob = networks[str(request.form['network'])]
ontology = onts[str(request.form['ontology'])]
term = str(request.form['term'])
nodeCutoff = safeOpts('nodeCutoff', request.form['nodeCutoff'])
edgeCutoff = safeOpts('edgeCutoff', request.form['edgeCutoff'])
windowSize = safeOpts('windowSize', request.form['windowSize'])
flankLimit = safeOpts('flankLimit', request.form['flankLimit'])
hpo = (request.form['hpo'].lower().strip() == 'true')
strongestSNPs = (
request.form['overlapSNPs'].lower().strip() == 'strongest')
overlapDensity = (
request.form['overlapMethod'].lower().strip() == 'density')
# Detrmine if there is a FDR cutoff or not
try:
float(request.form['fdrCutoff'])
except ValueError:
fdrCutoff = None
else:
fdrCutoff = safeOpts('fdrCutoff', float(request.form['fdrCutoff']))
# Get the candidates
cob.set_sig_edge_zscore(edgeCutoff)
# Check to see if Genes are HPO
if hpo:
genes = cob.refgen[gwas_data_db[
ontology.name].high_priority_candidates().query(
'COB=="{}" and Ontology == "{}" and Term == "{}"'.format(
cob.name, ontology.name, term)).gene.unique()]
else:
# Get candidates based on options
if (strongestSNPs):
try:
loci = ontology[term].strongest_loci(
window_size=windowSize,
attr=ontology.get_strongest_attr(),
lowest=ontology.get_strongest_higher())
except KeyError:
loci = ontology[term].effective_loci(window_size=windowSize)
else:
loci = ontology[term].effective_loci(window_size=windowSize)
# Find the genes
genes = cob.refgen.candidate_genes(
loci,
window_size=windowSize,
flank_limit=flankLimit,
chain=True,
include_parent_locus=True,
#include_parent_attrs=['numIterations', 'avgEffectSize'],
include_num_intervening=True,
include_rank_intervening=True,
include_num_siblings=True
)
# always have genes
genes
cob.log('Found {} candidate genes', len(genes))
# Base of the result dict
net = {}
# If there are GWAS results, and a FDR Cutoff
if fdrCutoff and ontology.name in gwas_data_db and not (hpo):
cob.log('Fetching genes with FDR < {}', fdrCutoff)
gwas_data = gwas_data_db[ontology.name].results
gwas_data = gwas_data[gwas_data['COB'] == cob.name]
gwas_data = gwas_data[gwas_data['Term'] == term]
gwas_data = gwas_data[gwas_data['WindowSize'] == windowSize]
gwas_data = gwas_data[gwas_data['FlankLimit'] == flankLimit]
gwas_data = gwas_data[gwas_data['SNP2Gene'] == (
'strongest' if strongestSNPs else 'effective')]
gwas_data = gwas_data[gwas_data['Method'] == (
'density' if overlapDensity else 'locality')]
net['nodes'] = getNodes(
genes,
cob,
term,
gwasData=gwas_data,
nodeCutoff=nodeCutoff,
windowSize=windowSize,
flankLimit=flankLimit,
fdrCutoff=fdrCutoff
)
else:
# Otherwise just run it without GWAS Data
net['nodes'] = getNodes(
genes,
cob,
term,
nodeCutoff=nodeCutoff,
windowSize=windowSize,
flankLimit=flankLimit,
hpo=hpo)
# Get the edges of the nodes that will be rendered
render_list = []
for node in net['nodes'].values():
if node['data']['render']:
render_list.append(node['data']['id'])
net['edges'] = getEdges(render_list, cob)
# Tell what enrichment options are available
net['hasGO'] = cob._global('parent_refgen') in GOnt_db
net['hasGWS'] = hasGWS and (cob._global('parent_refgen') in func_data_db)
# Log Data Point to COB Log
cob.log(term + ': Found ' + str(len(net['nodes'])) + ' nodes, ' +
str(len(net['edges'])) + ' edges')
# Return it as a JSON object
return jsonify(net)
@app.route("/custom_network", methods=['POST'])
def custom_network():
# Get data from the form
cob = networks[str(request.form['network'])]
nodeCutoff = safeOpts('nodeCutoff', int(request.form['nodeCutoff']))
edgeCutoff = safeOpts('edgeCutoff', float(request.form['edgeCutoff']))
geneList = str(request.form['geneList'])
# Detrmine if we want neighbors or not
try:
int(request.form['visNeighbors'])
except ValueError:
visNeighbors = None
else:
visNeighbors = safeOpts('visNeighbors',
int(request.form['visNeighbors']))
# Make sure there aren't too many genes
geneList = list(
filter((lambda x: x != ''), re.split('\r| |,|;|\t|\n', geneList)))
if len(geneList) < geneLimit['min']:
abort(400)
elif len(geneList) > geneLimit['max']:
geneList = geneList[:geneLimit['max']]
# Set the edge score
cob.set_sig_edge_zscore(edgeCutoff)
# Get the genes
cob.log("Getting Neighbors")
primary = set()
neighbors = set()
render = set()
rejected = set(geneList)
for name in copy.copy(rejected):
# Find all the neighbors, sort by score
try:
gene = cob.refgen.from_id(name)
except ValueError:
continue
# Add this gene to the requisite lists
rejected.remove(name)
primary.add(gene.id)
render.add(gene.id)
if visNeighbors is not None:
# Get the neighbors from Camoco
nbs = cob.neighbors(
gene, names_as_index=False,
names_as_cols=True).sort_values('score')
# Strip everything except the gene IDs and add to the grand neighbor list
new_genes = list(set(nbs['gene_a']).union(set(nbs['gene_b'])))
# Build the set of genes that should be rendered
nbs = nbs[:visNeighbors]
render = render.union(set(nbs.gene_a).union(set(nbs.gene_b)))
# Remove the query gene if it's present
if gene.id in new_genes:
new_genes.remove(gene.id)
# Add to the set of neighbor genes
neighbors = neighbors.union(set(new_genes))
# Get gene objects from IDs, but save list both lists for later
genes_set = primary.union(neighbors)
genes = cob.refgen.from_ids(genes_set)
# Get the candidates
genes = cob.refgen.candidate_genes(
genes,
window_size=0,
flank_limit=0,
chain=True,
include_parent_locus=True,
#include_parent_attrs=['numIterations', 'avgEffectSize'],
include_num_intervening=True,
include_rank_intervening=True,
include_num_siblings=True)
# Filter the candidates down to the provided list of genes
genes = list(filter((lambda x: x.id in genes_set), genes))
# If there are no good genes, error out
if (len(genes) <= 0):
abort(400)
# Build up the objects
net = {}
net['nodes'] = getNodes(
genes,
cob,
'custom',
primary=primary,
render=render,
nodeCutoff=nodeCutoff)
net['rejected'] = list(rejected)
# Get the edges of the nodes that will be rendered
render_list = []
for node in net['nodes'].values():
if node['data']['render']:
render_list.append(node['data']['id'])
net['edges'] = getEdges(render_list, cob)
# Tell what enrichment options are available
net['hasGO'] = cob._global('parent_refgen') in GOnt_db
net['hasGWS'] = hasGWS and (cob._global('parent_refgen') in func_data_db)
# Log Data Point to COB Log
cob.log('Custom Term: Found ' + str(len(net['nodes'])) + ' nodes, ' +
str(len(net['edges'])) + ' edges')
return jsonify(net)
@app.route("/gene_connections", methods=['POST'])
def gene_connections():
# Get data from the form
cob = networks[str(request.form['network'])]
edgeCutoff = safeOpts('edgeCutoff', float(request.form['edgeCutoff']))
allGenes = str(request.form['allGenes'])
newGenes = str(request.form['newGenes'])
allGenes = list(
filter((lambda x: x != ''), re.split('\r| |,|;|\t|\n', allGenes)))
newGenes = set(
filter((lambda x: x != ''), re.split('\r| |,|;|\t|\n', newGenes)))
# Set the Significant Edge Score
cob.set_sig_edge_zscore(edgeCutoff)
# Get the edges!
edges = getEdges(allGenes, cob)
# Filter the ones that are not attached to the new one
if (len(newGenes) > 0):
edges = list(
filter(
lambda x: ((x['data']['source'] in newGenes) or (x['data']['target'] in newGenes)),
edges))
# Return it as a JSON object
return jsonify({'edges': edges})
@app.route("/gene_word_search", methods=['POST'])
def gene_word_search():
cob = networks[str(request.form['network'])]
pCutoff = safeOpts('pCutoff', float(request.form['pCutoff']))
geneList = str(request.form['geneList'])
geneList = list(
filter((lambda x: x != ''), re.split('\r| |,|;|\t|\n', geneList)))
# Run the analysis and return the JSONified results
if hasGWS and (cob._global('parent_refgen') in func_data_db):
results = geneWordSearch(
geneList, cob._global('parent_refgen'), minChance=pCutoff)
else:
abort(405)
if len(results[0]) == 0:
abort(400)
results = WordFreq.to_JSON_array(results[0])
return jsonify(result=results)
@app.route("/go_enrichment", methods=['POST'])
def go_enrichment():
cob = networks[str(request.form['network'])]
pCutoff = safeOpts('pCutoff', float(request.form['pCutoff']))
minTerm = safeOpts('minTerm', int(request.form['minTerm']))
maxTerm = safeOpts('maxTerm', int(request.form['maxTerm']))
geneList = str(request.form['geneList'])
# Parse the genes
geneList = list(
filter((lambda x: x != ''), re.split('\r| |,|;|\t|\n', geneList)))
# Get the things for enrichment
genes = cob.refgen.from_ids(geneList)
if cob._global('parent_refgen') in GOnt_db:
gont = GOnt_db[cob._global('parent_refgen')]
else:
abort(405)
# Run the enrichment
cob.log('Running GO Enrichment...')
enr = gont.enrichment(
genes,
pval_cutoff=pCutoff,
min_term_size=minTerm,
max_term_size=maxTerm)
if len(enr) == 0:
abort(400)
# Extract the results for returning
terms = []
for term in enr:
terms.append({
'id': term.id,
'pval': term.attrs['pval'],
'name': term.name,
'desc': term.desc
})
df = pd.DataFrame(terms).drop_duplicates(subset='id')
cob.log('Found {} enriched terms.', str(df.shape[0]))
return jsonify(df.to_json(orient='index'))
@app.route("/add_term",methods=['POST'])
def add_term():
import ipdb; ipdb.set_trace()
ontology = onts[str(request.form['ontology'])]
# --------------------------------------------
# Function to Make Input Safe Again
# --------------------------------------------
def safeOpts(name, val):
# Get the parameters into range
val = int(val) if opts[name]['int'] else float(val)
val = min(val, opts[name]['max'])
val = max(val, opts[name]['min'])
return val
# --------------------------------------------
# Functions to get the nodes and edges
# --------------------------------------------
def getNodes(genes,
cob,
term,
primary=None,
render=None,
gwasData=pd.DataFrame(),
nodeCutoff=0,
windowSize=None,
flankLimit=None,
fdrCutoff=None,
hpo=False):
# Cache the locality
locality = cob.locality(genes)
# Containers for the node info
nodes = {}
parent_set = set()
# Look for alises
aliases = co.RefGen(cob._global('parent_refgen')).aliases(
[gene.id for gene in genes])
# Look for annotations
if cob._global('parent_refgen') in func_data_db:
func_data = func_data_db[cob._global('parent_refgen')].get_annotations(
[gene.id for gene in genes])
else:
func_data = {}
# Pre cache a list of the contained genes
gwasDataGenes = set()
if not gwasData.empty:
gwasDataGenes = set(gwasData['gene'])
for gene in genes:
# Catch for translating the way camoco works to the way We need for COB
try:
ldegree = locality.ix[gene.id]['local']
gdegree = locality.ix[gene.id]['global']
except KeyError as e:
ldegree = gdegree = 'nan'
# Catch for bug in camoco
try:
numInterv = str(gene.attr['num_intervening'])
rankIntervening = str(gene.attr['intervening_rank'])
numSiblings = str(gene.attr['num_siblings'])
except KeyError as e:
#print('Num Attr fail on gene: ' + str(gene.id))
numInterv = '-'
rankIntervening = '-'
numSiblings = '-'
# Pull any aliases from our database
alias = ''
if gene.id in aliases:
for a in aliases[gene.id]:
alias += a + ' '
# Fetch the FDR if we can
fdr = np.nan
if gene.id in gwasDataGenes:
fdr = gwasData[gwasData['gene'] == gene.id]['fdr'].min()
# Pull any annotations from our databases
anote = ''
if gene.id in func_data:
for a in func_data[gene.id]:
anote += a + ' '
# Fetch parent locus if we can
if 'parent_locus' not in gene.attr:
gene.attr['parent_locus'] = '[Unknown]{}:{}-{}'.format(
gene.chrom, gene.start, gene.end)
# Build the data object from our data
node = {
'group': 'nodes',
'data': {
'id':
gene.id,
'type':
'gene',
'render':
False,
'term':
term,
'snp':
gene.attr['parent_locus'].replace('<', '[').replace('>', ']'),
'alias':
alias,
'origin':
'N/A',
'chrom':
str(gene.chrom),
'start':
str(gene.start),
'end':
str(gene.end),
'cur_ldegree':
str(0),
'ldegree':
str(ldegree),
'gdegree':
str(gdegree),
'fdr':
'HPO' if hpo else str(fdr),
'windowSize':
str(windowSize),
'flankLimit':
str(flankLimit),
'numIntervening':
numInterv,
'rankIntervening':
rankIntervening,
'numSiblings':
numSiblings,
# 'parentNumIterations': str(gene.attr['parent_numIterations']),
# 'parentAvgEffectSize': str(gene.attr['parent_avgEffectSize']),
'annotations':
anote,
}
}
# Denote the query genes
if primary:
if gene.id in primary:
node['data']['origin'] = 'query'
else:
node['data']['origin'] = 'neighbor'
# Denote whether or not to render it
if ldegree >= nodeCutoff:
if (not fdrCutoff) or gwasData.empty or fdr <= fdrCutoff:
if (not render) or (gene.id in render):
node['data']['render'] = True
# Save the node to the list
nodes[gene.id] = node
return nodes
def getEdges(geneList, cob):
# Find the Edges for the genes we will render
subnet = cob.subnetwork(
cob.refgen.from_ids(geneList),
names_as_index=False,
names_as_cols=True)
# "Loop" to build the edge objects
edges = [{
'group': 'edges',
'data': {
'source': source,
'target': target,
'weight': str(weight)
}
} for source, target, weight, significant, distance in subnet.itertuples(
index=False)]
return edges
|
schae234/cob
|
cob/server.py
|
Python
|
mit
| 33,713
|
[
"Cytoscape"
] |
850475e05a5b663f546e229e25e7308947fcdb3974f1902a902fc3ff69d15007
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Provides interfaces to various commands provided by FreeSurfer
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../../testing/data'))
>>> os.chdir(datadir)
"""
from __future__ import print_function, division, unicode_literals, absolute_import
from builtins import range
import os
import os.path as op
from glob import glob
import shutil
import numpy as np
from nibabel import load
from ... import logging, LooseVersion
from ...utils.filemanip import fname_presuffix, check_depends
from ..io import FreeSurferSource
from ..base import (TraitedSpec, File, traits,
Directory, InputMultiPath,
OutputMultiPath, CommandLine,
CommandLineInputSpec, isdefined)
from .base import (FSCommand, FSTraitedSpec,
FSTraitedSpecOpenMP,
FSCommandOpenMP, Info)
from .utils import copy2subjdir
__docformat__ = 'restructuredtext'
iflogger = logging.getLogger('interface')
# Keeping this to avoid breaking external programs that depend on it, but
# this should not be used internally
FSVersion = Info.looseversion().vstring
class ParseDICOMDirInputSpec(FSTraitedSpec):
dicom_dir = Directory(exists=True, argstr='--d %s', mandatory=True,
desc='path to siemens dicom directory')
dicom_info_file = File('dicominfo.txt', argstr='--o %s', usedefault=True,
desc='file to which results are written')
sortbyrun = traits.Bool(argstr='--sortbyrun', desc='assign run numbers')
summarize = traits.Bool(argstr='--summarize',
desc='only print out info for run leaders')
class ParseDICOMDirOutputSpec(TraitedSpec):
dicom_info_file = File(exists=True,
desc='text file containing dicom information')
class ParseDICOMDir(FSCommand):
"""Uses mri_parse_sdcmdir to get information from dicom directories
Examples
--------
>>> from nipype.interfaces.freesurfer import ParseDICOMDir
>>> dcminfo = ParseDICOMDir()
>>> dcminfo.inputs.dicom_dir = '.'
>>> dcminfo.inputs.sortbyrun = True
>>> dcminfo.inputs.summarize = True
>>> dcminfo.cmdline # doctest: +ALLOW_UNICODE
'mri_parse_sdcmdir --d . --o dicominfo.txt --sortbyrun --summarize'
"""
_cmd = 'mri_parse_sdcmdir'
input_spec = ParseDICOMDirInputSpec
output_spec = ParseDICOMDirOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.dicom_info_file):
outputs['dicom_info_file'] = os.path.join(os.getcwd(), self.inputs.dicom_info_file)
return outputs
class UnpackSDICOMDirInputSpec(FSTraitedSpec):
source_dir = Directory(exists=True, argstr='-src %s',
mandatory=True,
desc='directory with the DICOM files')
output_dir = Directory(argstr='-targ %s',
desc='top directory into which the files will be unpacked')
run_info = traits.Tuple(traits.Int, traits.Str, traits.Str, traits.Str,
mandatory=True,
argstr='-run %d %s %s %s',
xor=('run_info', 'config', 'seq_config'),
desc='runno subdir format name : spec unpacking rules on cmdline')
config = File(exists=True, argstr='-cfg %s',
mandatory=True,
xor=('run_info', 'config', 'seq_config'),
desc='specify unpacking rules in file')
seq_config = File(exists=True, argstr='-seqcfg %s',
mandatory=True,
xor=('run_info', 'config', 'seq_config'),
desc='specify unpacking rules based on sequence')
dir_structure = traits.Enum('fsfast', 'generic', argstr='-%s',
desc='unpack to specified directory structures')
no_info_dump = traits.Bool(argstr='-noinfodump',
desc='do not create infodump file')
scan_only = File(exists=True, argstr='-scanonly %s',
desc='only scan the directory and put result in file')
log_file = File(exists=True, argstr='-log %s',
desc='explicilty set log file')
spm_zeropad = traits.Int(argstr='-nspmzeropad %d',
desc='set frame number zero padding width for SPM')
no_unpack_err = traits.Bool(argstr='-no-unpackerr',
desc='do not try to unpack runs with errors')
class UnpackSDICOMDir(FSCommand):
"""Use unpacksdcmdir to convert dicom files
Call unpacksdcmdir -help from the command line to see more information on
using this command.
Examples
--------
>>> from nipype.interfaces.freesurfer import UnpackSDICOMDir
>>> unpack = UnpackSDICOMDir()
>>> unpack.inputs.source_dir = '.'
>>> unpack.inputs.output_dir = '.'
>>> unpack.inputs.run_info = (5, 'mprage', 'nii', 'struct')
>>> unpack.inputs.dir_structure = 'generic'
>>> unpack.cmdline # doctest: +ALLOW_UNICODE
'unpacksdcmdir -generic -targ . -run 5 mprage nii struct -src .'
"""
_cmd = 'unpacksdcmdir'
input_spec = UnpackSDICOMDirInputSpec
class MRIConvertInputSpec(FSTraitedSpec):
read_only = traits.Bool(argstr='--read_only',
desc='read the input volume')
no_write = traits.Bool(argstr='--no_write',
desc='do not write output')
in_info = traits.Bool(argstr='--in_info',
desc='display input info')
out_info = traits.Bool(argstr='--out_info',
desc='display output info')
in_stats = traits.Bool(argstr='--in_stats',
desc='display input stats')
out_stats = traits.Bool(argstr='--out_stats',
desc='display output stats')
in_matrix = traits.Bool(argstr='--in_matrix',
desc='display input matrix')
out_matrix = traits.Bool(argstr='--out_matrix',
desc='display output matrix')
in_i_size = traits.Int(argstr='--in_i_size %d',
desc='input i size')
in_j_size = traits.Int(argstr='--in_j_size %d',
desc='input j size')
in_k_size = traits.Int(argstr='--in_k_size %d',
desc='input k size')
force_ras = traits.Bool(argstr='--force_ras_good',
desc='use default when orientation info absent')
in_i_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--in_i_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
in_j_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--in_j_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
in_k_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--in_k_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
_orientations = ['LAI', 'LIA', 'ALI', 'AIL', 'ILA', 'IAL', 'LAS', 'LSA', 'ALS', 'ASL', 'SLA', 'SAL', 'LPI', 'LIP', 'PLI', 'PIL', 'ILP', 'IPL', 'LPS', 'LSP', 'PLS', 'PSL', 'SLP', 'SPL', 'RAI', 'RIA', 'ARI', 'AIR', 'IRA', 'IAR', 'RAS', 'RSA', 'ARS', 'ASR', 'SRA', 'SAR', 'RPI', 'RIP', 'PRI', 'PIR', 'IRP', 'IPR', 'RPS', 'RSP', 'PRS', 'PSR', 'SRP', 'SPR']
# _orientations = [comb for comb in itertools.chain(*[[''.join(c) for c in itertools.permutations(s)] for s in [a+b+c for a in 'LR' for b in 'AP' for c in 'IS']])]
in_orientation = traits.Enum(_orientations,
argstr='--in_orientation %s',
desc='specify the input orientation')
in_center = traits.List(traits.Float, maxlen=3,
argstr='--in_center %s',
desc='<R coordinate> <A coordinate> <S coordinate>')
sphinx = traits.Bool(argstr='--sphinx',
desc='change orientation info to sphinx')
out_i_count = traits.Int(argstr='--out_i_count %d',
desc='some count ?? in i direction')
out_j_count = traits.Int(argstr='--out_j_count %d',
desc='some count ?? in j direction')
out_k_count = traits.Int(argstr='--out_k_count %d',
desc='some count ?? in k direction')
vox_size = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='-voxsize %f %f %f',
desc='<size_x> <size_y> <size_z> specify the size (mm) - useful for upsampling or downsampling')
out_i_size = traits.Int(argstr='--out_i_size %d',
desc='output i size')
out_j_size = traits.Int(argstr='--out_j_size %d',
desc='output j size')
out_k_size = traits.Int(argstr='--out_k_size %d',
desc='output k size')
out_i_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--out_i_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
out_j_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--out_j_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
out_k_dir = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--out_k_direction %f %f %f',
desc='<R direction> <A direction> <S direction>')
out_orientation = traits.Enum(_orientations,
argstr='--out_orientation %s',
desc='specify the output orientation')
out_center = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='--out_center %f %f %f',
desc='<R coordinate> <A coordinate> <S coordinate>')
out_datatype = traits.Enum('uchar', 'short', 'int', 'float',
argstr='--out_data_type %s',
desc='output data type <uchar|short|int|float>')
resample_type = traits.Enum('interpolate', 'weighted', 'nearest', 'sinc', 'cubic',
argstr='--resample_type %s',
desc='<interpolate|weighted|nearest|sinc|cubic> (default is interpolate)')
no_scale = traits.Bool(argstr='--no_scale 1',
desc='dont rescale values for COR')
no_change = traits.Bool(argstr='--nochange',
desc="don't change type of input to that of template")
tr = traits.Int(argstr='-tr %d',
desc='TR in msec')
te = traits.Int(argstr='-te %d',
desc='TE in msec')
ti = traits.Int(argstr='-ti %d',
desc='TI in msec (note upper case flag)')
autoalign_matrix = File(exists=True, argstr='--autoalign %s',
desc='text file with autoalign matrix')
unwarp_gradient = traits.Bool(argstr='--unwarp_gradient_nonlinearity',
desc='unwarp gradient nonlinearity')
apply_transform = File(exists=True, argstr='--apply_transform %s',
desc='apply xfm file')
apply_inv_transform = File(exists=True, argstr='--apply_inverse_transform %s',
desc='apply inverse transformation xfm file')
devolve_transform = traits.Str(argstr='--devolvexfm %s',
desc='subject id')
crop_center = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--crop %d %d %d',
desc='<x> <y> <z> crop to 256 around center (x, y, z)')
crop_size = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--cropsize %d %d %d',
desc='<dx> <dy> <dz> crop to size <dx, dy, dz>')
cut_ends = traits.Int(argstr='--cutends %d',
desc='remove ncut slices from the ends')
slice_crop = traits.Tuple(traits.Int, traits.Int,
argstr='--slice-crop %d %d',
desc='s_start s_end : keep slices s_start to s_end')
slice_reverse = traits.Bool(argstr='--slice-reverse',
desc='reverse order of slices, update vox2ras')
slice_bias = traits.Float(argstr='--slice-bias %f',
desc='apply half-cosine bias field')
fwhm = traits.Float(argstr='--fwhm %f',
desc='smooth input volume by fwhm mm')
_filetypes = ['cor', 'mgh', 'mgz', 'minc', 'analyze',
'analyze4d', 'spm', 'afni', 'brik', 'bshort',
'bfloat', 'sdt', 'outline', 'otl', 'gdf',
'nifti1', 'nii', 'niigz']
_infiletypes = ['ge', 'gelx', 'lx', 'ximg', 'siemens', 'dicom', 'siemens_dicom']
in_type = traits.Enum(_filetypes + _infiletypes, argstr='--in_type %s',
desc='input file type')
out_type = traits.Enum(_filetypes, argstr='--out_type %s',
desc='output file type')
ascii = traits.Bool(argstr='--ascii',
desc='save output as ascii col>row>slice>frame')
reorder = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--reorder %d %d %d',
desc='olddim1 olddim2 olddim3')
invert_contrast = traits.Float(argstr='--invert_contrast %f',
desc='threshold for inversting contrast')
in_file = File(exists=True, mandatory=True,
position=-2,
argstr='--input_volume %s',
desc='File to read/convert')
out_file = File(argstr='--output_volume %s',
position=-1, genfile=True,
desc='output filename or True to generate one')
conform = traits.Bool(argstr='--conform',
desc='conform to 1mm voxel size in coronal slice direction with 256^3 or more')
conform_min = traits.Bool(argstr='--conform_min',
desc='conform to smallest size')
conform_size = traits.Float(argstr='--conform_size %s',
desc='conform to size_in_mm')
cw256 = traits.Bool(argstr='--cw256',
desc='confrom to dimensions of 256^3')
parse_only = traits.Bool(argstr='--parse_only',
desc='parse input only')
subject_name = traits.Str(argstr='--subject_name %s',
desc='subject name ???')
reslice_like = File(exists=True, argstr='--reslice_like %s',
desc='reslice output to match file')
template_type = traits.Enum(_filetypes + _infiletypes,
argstr='--template_type %s',
desc='template file type')
split = traits.Bool(argstr='--split',
desc='split output frames into separate output files.')
frame = traits.Int(argstr='--frame %d',
desc='keep only 0-based frame number')
midframe = traits.Bool(argstr='--mid-frame',
desc='keep only the middle frame')
skip_n = traits.Int(argstr='--nskip %d',
desc='skip the first n frames')
drop_n = traits.Int(argstr='--ndrop %d',
desc='drop the last n frames')
frame_subsample = traits.Tuple(traits.Int, traits.Int, traits.Int,
argstr='--fsubsample %d %d %d',
desc='start delta end : frame subsampling (end = -1 for end)')
in_scale = traits.Float(argstr='--scale %f',
desc='input intensity scale factor')
out_scale = traits.Float(argstr='--out-scale %d',
desc='output intensity scale factor')
in_like = File(exists=True, argstr='--in_like %s',
desc='input looks like')
fill_parcellation = traits.Bool(argstr='--fill_parcellation',
desc='fill parcellation')
smooth_parcellation = traits.Bool(argstr='--smooth_parcellation',
desc='smooth parcellation')
zero_outlines = traits.Bool(argstr='--zero_outlines',
desc='zero outlines')
color_file = File(exists=True, argstr='--color_file %s',
desc='color file')
no_translate = traits.Bool(argstr='--no_translate',
desc='???')
status_file = File(argstr='--status %s',
desc='status file for DICOM conversion')
sdcm_list = File(exists=True, argstr='--sdcmlist %s',
desc='list of DICOM files for conversion')
template_info = traits.Bool('--template_info',
desc='dump info about template')
crop_gdf = traits.Bool(argstr='--crop_gdf',
desc='apply GDF cropping')
zero_ge_z_offset = traits.Bool(argstr='--zero_ge_z_offset',
desc='zero ge z offset ???')
class MRIConvertOutputSpec(TraitedSpec):
out_file = OutputMultiPath(File(exists=True), desc='converted output file')
class MRIConvert(FSCommand):
"""use fs mri_convert to manipulate files
.. note::
Adds niigz as an output type option
Examples
--------
>>> mc = MRIConvert()
>>> mc.inputs.in_file = 'structural.nii'
>>> mc.inputs.out_file = 'outfile.mgz'
>>> mc.inputs.out_type = 'mgz'
>>> mc.cmdline # doctest: +ALLOW_UNICODE
'mri_convert --out_type mgz --input_volume structural.nii --output_volume outfile.mgz'
"""
_cmd = 'mri_convert'
input_spec = MRIConvertInputSpec
output_spec = MRIConvertOutputSpec
filemap = dict(cor='cor', mgh='mgh', mgz='mgz', minc='mnc',
afni='brik', brik='brik', bshort='bshort',
spm='img', analyze='img', analyze4d='img',
bfloat='bfloat', nifti1='img', nii='nii',
niigz='nii.gz')
def _format_arg(self, name, spec, value):
if name in ['in_type', 'out_type', 'template_type']:
if value == 'niigz':
return spec.argstr % 'nii'
return super(MRIConvert, self)._format_arg(name, spec, value)
def _get_outfilename(self):
outfile = self.inputs.out_file
if not isdefined(outfile):
if isdefined(self.inputs.out_type):
suffix = '_out.' + self.filemap[self.inputs.out_type]
else:
suffix = '_out.nii.gz'
outfile = fname_presuffix(self.inputs.in_file,
newpath=os.getcwd(),
suffix=suffix,
use_ext=False)
return os.path.abspath(outfile)
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self._get_outfilename()
if isdefined(self.inputs.split) and self.inputs.split:
size = load(self.inputs.in_file).shape
if len(size) == 3:
tp = 1
else:
tp = size[-1]
if outfile.endswith('.mgz'):
stem = outfile.split('.mgz')[0]
ext = '.mgz'
elif outfile.endswith('.nii.gz'):
stem = outfile.split('.nii.gz')[0]
ext = '.nii.gz'
else:
stem = '.'.join(outfile.split('.')[:-1])
ext = '.' + outfile.split('.')[-1]
outfile = []
for idx in range(0, tp):
outfile.append(stem + '%04d' % idx + ext)
if isdefined(self.inputs.out_type):
if self.inputs.out_type in ['spm', 'analyze']:
# generate all outputs
size = load(self.inputs.in_file).shape
if len(size) == 3:
tp = 1
else:
tp = size[-1]
# have to take care of all the frame manipulations
raise Exception('Not taking frame manipulations into account- please warn the developers')
outfiles = []
outfile = self._get_outfilename()
for i in range(tp):
outfiles.append(fname_presuffix(outfile,
suffix='%03d' % (i + 1)))
outfile = outfiles
outputs['out_file'] = outfile
return outputs
def _gen_filename(self, name):
if name == 'out_file':
return self._get_outfilename()
return None
class DICOMConvertInputSpec(FSTraitedSpec):
dicom_dir = Directory(exists=True, mandatory=True,
desc='dicom directory from which to convert dicom files')
base_output_dir = Directory(mandatory=True,
desc='directory in which subject directories are created')
subject_dir_template = traits.Str('S.%04d', usedefault=True,
desc='template for subject directory name')
subject_id = traits.Any(desc='subject identifier to insert into template')
file_mapping = traits.List(traits.Tuple(traits.Str, traits.Str),
desc='defines the output fields of interface')
out_type = traits.Enum('niigz', MRIConvertInputSpec._filetypes,
usedefault=True,
desc='defines the type of output file produced')
dicom_info = File(exists=True,
desc='File containing summary information from mri_parse_sdcmdir')
seq_list = traits.List(traits.Str,
requires=['dicom_info'],
desc='list of pulse sequence names to be converted.')
ignore_single_slice = traits.Bool(requires=['dicom_info'],
desc='ignore volumes containing a single slice')
class DICOMConvert(FSCommand):
"""use fs mri_convert to convert dicom files
Examples
--------
>>> from nipype.interfaces.freesurfer import DICOMConvert
>>> cvt = DICOMConvert()
>>> cvt.inputs.dicom_dir = 'dicomdir'
>>> cvt.inputs.file_mapping = [('nifti', '*.nii'), ('info', 'dicom*.txt'), ('dti', '*dti.bv*')]
"""
_cmd = 'mri_convert'
input_spec = DICOMConvertInputSpec
def _get_dicomfiles(self):
"""validate fsl bet options
if set to None ignore
"""
return glob(os.path.abspath(os.path.join(self.inputs.dicom_dir,
'*-1.dcm')))
def _get_outdir(self):
"""returns output directory"""
subjid = self.inputs.subject_id
if not isdefined(subjid):
path, fname = os.path.split(self._get_dicomfiles()[0])
subjid = int(fname.split('-')[0])
if isdefined(self.inputs.subject_dir_template):
subjid = self.inputs.subject_dir_template % subjid
basedir = self.inputs.base_output_dir
if not isdefined(basedir):
basedir = os.path.abspath('.')
outdir = os.path.abspath(os.path.join(basedir, subjid))
return outdir
def _get_runs(self):
"""Returns list of dicom series that should be converted.
Requires a dicom info summary file generated by ``DicomDirInfo``
"""
seq = np.genfromtxt(self.inputs.dicom_info, dtype=object)
runs = []
for s in seq:
if self.inputs.seq_list:
if self.inputs.ignore_single_slice:
if (int(s[8]) > 1) and any([s[12].startswith(sn) for sn in self.inputs.seq_list]):
runs.append(int(s[2]))
else:
if any([s[12].startswith(sn) for sn in self.inputs.seq_list]):
runs.append(int(s[2]))
else:
runs.append(int(s[2]))
return runs
def _get_filelist(self, outdir):
"""Returns list of files to be converted"""
filemap = {}
for f in self._get_dicomfiles():
head, fname = os.path.split(f)
fname, ext = os.path.splitext(fname)
fileparts = fname.split('-')
runno = int(fileparts[1])
out_type = MRIConvert.filemap[self.inputs.out_type]
outfile = os.path.join(outdir, '.'.join(('%s-%02d' % (fileparts[0],
runno),
out_type)))
filemap[runno] = (f, outfile)
if self.inputs.dicom_info:
files = [filemap[r] for r in self._get_runs()]
else:
files = [filemap[r] for r in list(filemap.keys())]
return files
@property
def cmdline(self):
""" `command` plus any arguments (args)
validates arguments and generates command line"""
self._check_mandatory_inputs()
outdir = self._get_outdir()
cmd = []
if not os.path.exists(outdir):
cmdstr = 'python -c "import os; os.makedirs(\'%s\')"' % outdir
cmd.extend([cmdstr])
infofile = os.path.join(outdir, 'shortinfo.txt')
if not os.path.exists(infofile):
cmdstr = 'dcmdir-info-mgh %s > %s' % (self.inputs.dicom_dir,
infofile)
cmd.extend([cmdstr])
files = self._get_filelist(outdir)
for infile, outfile in files:
if not os.path.exists(outfile):
single_cmd = '%s %s %s' % (self.cmd, infile,
os.path.join(outdir, outfile))
cmd.extend([single_cmd])
return '; '.join(cmd)
class ResampleInputSpec(FSTraitedSpec):
in_file = File(exists=True, argstr='-i %s', mandatory=True,
desc='file to resample', position=-2)
resampled_file = File(argstr='-o %s', desc='output filename', genfile=True,
position=-1)
voxel_size = traits.Tuple(traits.Float, traits.Float, traits.Float,
argstr='-vs %.2f %.2f %.2f', desc='triplet of output voxel sizes',
mandatory=True)
class ResampleOutputSpec(TraitedSpec):
resampled_file = File(exists=True,
desc='output filename')
class Resample(FSCommand):
"""Use FreeSurfer mri_convert to up or down-sample image files
Examples
--------
>>> from nipype.interfaces import freesurfer
>>> resampler = freesurfer.Resample()
>>> resampler.inputs.in_file = 'structural.nii'
>>> resampler.inputs.resampled_file = 'resampled.nii'
>>> resampler.inputs.voxel_size = (2.1, 2.1, 2.1)
>>> resampler.cmdline # doctest: +ALLOW_UNICODE
'mri_convert -vs 2.10 2.10 2.10 -i structural.nii -o resampled.nii'
"""
_cmd = 'mri_convert'
input_spec = ResampleInputSpec
output_spec = ResampleOutputSpec
def _get_outfilename(self):
if isdefined(self.inputs.resampled_file):
outfile = self.inputs.resampled_file
else:
outfile = fname_presuffix(self.inputs.in_file,
newpath=os.getcwd(),
suffix='_resample')
return outfile
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['resampled_file'] = self._get_outfilename()
return outputs
def _gen_filename(self, name):
if name == 'resampled_file':
return self._get_outfilename()
return None
class ReconAllInputSpec(CommandLineInputSpec):
subject_id = traits.Str("recon_all", argstr='-subjid %s',
desc='subject name', usedefault=True)
directive = traits.Enum('all', 'autorecon1',
# autorecon2 variants
'autorecon2', 'autorecon2-volonly',
'autorecon2-perhemi', 'autorecon2-inflate1',
'autorecon2-cp', 'autorecon2-wm',
# autorecon3 variants
'autorecon3', 'autorecon3-T2pial',
# Mix of autorecon2 and autorecon3 steps
'autorecon-pial', 'autorecon-hemi',
# Not "multi-stage flags"
'localGI', 'qcache',
argstr='-%s', desc='process directive',
usedefault=True, position=0)
hemi = traits.Enum('lh', 'rh', desc='hemisphere to process',
argstr="-hemi %s")
T1_files = InputMultiPath(File(exists=True), argstr='-i %s...',
desc='name of T1 file to process')
T2_file = File(exists=True, argstr="-T2 %s", min_ver='5.3.0',
desc='Convert T2 image to orig directory')
use_T2 = traits.Bool(argstr="-T2pial", min_ver='5.3.0',
desc='Use converted T2 to refine the cortical surface')
openmp = traits.Int(argstr="-openmp %d",
desc="Number of processors to use in parallel")
parallel = traits.Bool(argstr="-parallel",
desc="Enable parallel execution")
hires = traits.Bool(argstr="-hires", min_ver='6.0.0',
desc="Conform to minimum voxel size (for voxels < 1mm)")
mprage = traits.Bool(argstr='-mprage',
desc=('Assume scan parameters are MGH MP-RAGE '
'protocol, which produces darker gray matter'))
big_ventricles = traits.Bool(argstr='-bigventricles',
desc=('For use in subjects with enlarged '
'ventricles'))
brainstem = traits.Bool(argstr='-brainstem-structures',
desc='Segment brainstem structures')
hippocampal_subfields_T1 = traits.Bool(
argstr='-hippocampal-subfields-T1', min_ver='6.0.0',
desc='segment hippocampal subfields using input T1 scan')
hippocampal_subfields_T2 = traits.Tuple(
File(exists=True), traits.Str(),
argstr='-hippocampal-subfields-T2 %s %s', min_ver='6.0.0',
desc=('segment hippocampal subfields using T2 scan, identified by '
'ID (may be combined with hippocampal_subfields_T1)'))
expert = File(exists=True, argstr='-expert %s',
desc="Set parameters using expert file")
xopts = traits.Enum("use", "clean", "overwrite", argstr='-xopts-%s',
desc="Use, delete or overwrite existing expert options file")
subjects_dir = Directory(exists=True, argstr='-sd %s', hash_files=False,
desc='path to subjects directory', genfile=True)
flags = InputMultiPath(traits.Str, argstr='%s', desc='additional parameters')
# Expert options
talairach = traits.Str(desc="Flags to pass to talairach commands", xor=['expert'])
mri_normalize = traits.Str(desc="Flags to pass to mri_normalize commands", xor=['expert'])
mri_watershed = traits.Str(desc="Flags to pass to mri_watershed commands", xor=['expert'])
mri_em_register = traits.Str(desc="Flags to pass to mri_em_register commands", xor=['expert'])
mri_ca_normalize = traits.Str(desc="Flags to pass to mri_ca_normalize commands", xor=['expert'])
mri_ca_register = traits.Str(desc="Flags to pass to mri_ca_register commands", xor=['expert'])
mri_remove_neck = traits.Str(desc="Flags to pass to mri_remove_neck commands", xor=['expert'])
mri_ca_label = traits.Str(desc="Flags to pass to mri_ca_label commands", xor=['expert'])
mri_segstats = traits.Str(desc="Flags to pass to mri_segstats commands", xor=['expert'])
mri_mask = traits.Str(desc="Flags to pass to mri_mask commands", xor=['expert'])
mri_segment = traits.Str(desc="Flags to pass to mri_segment commands", xor=['expert'])
mri_edit_wm_with_aseg = traits.Str(desc="Flags to pass to mri_edit_wm_with_aseg commands", xor=['expert'])
mri_pretess = traits.Str(desc="Flags to pass to mri_pretess commands", xor=['expert'])
mri_fill = traits.Str(desc="Flags to pass to mri_fill commands", xor=['expert'])
mri_tessellate = traits.Str(desc="Flags to pass to mri_tessellate commands", xor=['expert'])
mris_smooth = traits.Str(desc="Flags to pass to mri_smooth commands", xor=['expert'])
mris_inflate = traits.Str(desc="Flags to pass to mri_inflate commands", xor=['expert'])
mris_sphere = traits.Str(desc="Flags to pass to mris_sphere commands", xor=['expert'])
mris_fix_topology = traits.Str(desc="Flags to pass to mris_fix_topology commands", xor=['expert'])
mris_make_surfaces = traits.Str(desc="Flags to pass to mris_make_surfaces commands", xor=['expert'])
mris_surf2vol = traits.Str(desc="Flags to pass to mris_surf2vol commands", xor=['expert'])
mris_register = traits.Str(desc="Flags to pass to mris_register commands", xor=['expert'])
mrisp_paint = traits.Str(desc="Flags to pass to mrisp_paint commands", xor=['expert'])
mris_ca_label = traits.Str(desc="Flags to pass to mris_ca_label commands", xor=['expert'])
mris_anatomical_stats = traits.Str(desc="Flags to pass to mris_anatomical_stats commands", xor=['expert'])
mri_aparc2aseg = traits.Str(desc="Flags to pass to mri_aparc2aseg commands", xor=['expert'])
class ReconAllOutputSpec(FreeSurferSource.output_spec):
subjects_dir = Directory(exists=True, desc='Freesurfer subjects directory.')
subject_id = traits.Str(desc='Subject name for whom to retrieve data')
class ReconAll(CommandLine):
"""Uses recon-all to generate surfaces and parcellations of structural data
from anatomical images of a subject.
Examples
--------
>>> from nipype.interfaces.freesurfer import ReconAll
>>> reconall = ReconAll()
>>> reconall.inputs.subject_id = 'foo'
>>> reconall.inputs.directive = 'all'
>>> reconall.inputs.subjects_dir = '.'
>>> reconall.inputs.T1_files = 'structural.nii'
>>> reconall.cmdline # doctest: +ALLOW_UNICODE
'recon-all -all -i structural.nii -subjid foo -sd .'
>>> reconall.inputs.flags = "-qcache"
>>> reconall.cmdline # doctest: +ALLOW_UNICODE
'recon-all -all -i structural.nii -qcache -subjid foo -sd .'
>>> reconall.inputs.flags = ["-cw256", "-qcache"]
>>> reconall.cmdline # doctest: +ALLOW_UNICODE
'recon-all -all -i structural.nii -cw256 -qcache -subjid foo -sd .'
Hemisphere may be specified regardless of directive:
>>> reconall.inputs.flags = []
>>> reconall.inputs.hemi = 'lh'
>>> reconall.cmdline # doctest: +ALLOW_UNICODE
'recon-all -all -i structural.nii -hemi lh -subjid foo -sd .'
``-autorecon-hemi`` uses the ``-hemi`` input to specify the hemisphere
to operate upon:
>>> reconall.inputs.directive = 'autorecon-hemi'
>>> reconall.cmdline # doctest: +ALLOW_UNICODE
'recon-all -autorecon-hemi lh -i structural.nii -subjid foo -sd .'
Hippocampal subfields can accept T1 and T2 images:
>>> reconall_subfields = ReconAll()
>>> reconall_subfields.inputs.subject_id = 'foo'
>>> reconall_subfields.inputs.directive = 'all'
>>> reconall_subfields.inputs.subjects_dir = '.'
>>> reconall_subfields.inputs.T1_files = 'structural.nii'
>>> reconall_subfields.inputs.hippocampal_subfields_T1 = True
>>> reconall_subfields.cmdline # doctest: +ALLOW_UNICODE
'recon-all -all -i structural.nii -hippocampal-subfields-T1 -subjid foo -sd .'
>>> reconall_subfields.inputs.hippocampal_subfields_T2 = (
... 'structural.nii', 'test')
>>> reconall_subfields.cmdline # doctest: +ALLOW_UNICODE
'recon-all -all -i structural.nii -hippocampal-subfields-T1T2 structural.nii test -subjid foo -sd .'
>>> reconall_subfields.inputs.hippocampal_subfields_T1 = False
>>> reconall_subfields.cmdline # doctest: +ALLOW_UNICODE
'recon-all -all -i structural.nii -hippocampal-subfields-T2 structural.nii test -subjid foo -sd .'
"""
_cmd = 'recon-all'
_additional_metadata = ['loc', 'altkey']
input_spec = ReconAllInputSpec
output_spec = ReconAllOutputSpec
_can_resume = True
force_run = False
# Steps are based off of the recon-all tables [0,1] describing, inputs,
# commands, and outputs of each step of the recon-all process,
# controlled by flags.
#
# Each step is a 3-tuple containing (flag, [outputs], [inputs])
# A step is considered complete if all of its outputs exist and are newer
# than the inputs. An empty input list indicates input mtimes will not
# be checked. This may need updating, if users are working with manually
# edited files.
#
# [0] https://surfer.nmr.mgh.harvard.edu/fswiki/ReconAllTableStableV5.3
# [1] https://surfer.nmr.mgh.harvard.edu/fswiki/ReconAllTableStableV6.0
_autorecon1_steps = [
('motioncor', ['mri/rawavg.mgz', 'mri/orig.mgz'], []),
('talairach', ['mri/orig_nu.mgz',
'mri/transforms/talairach.auto.xfm',
'mri/transforms/talairach.xfm',
# 'mri/transforms/talairach_avi.log',
], []),
('nuintensitycor', ['mri/nu.mgz'], []),
('normalization', ['mri/T1.mgz'], []),
('skullstrip', ['mri/transforms/talairach_with_skull.lta',
'mri/brainmask.auto.mgz',
'mri/brainmask.mgz'], []),
]
if Info.looseversion() < LooseVersion("6.0.0"):
_autorecon2_volonly_steps = [
('gcareg', ['mri/transforms/talairach.lta'], []),
('canorm', ['mri/norm.mgz'], []),
('careg', ['mri/transforms/talairach.m3z'], []),
('careginv', ['mri/transforms/talairach.m3z.inv.x.mgz',
'mri/transforms/talairach.m3z.inv.y.mgz',
'mri/transforms/talairach.m3z.inv.z.mgz',
], []),
('rmneck', ['mri/nu_noneck.mgz'], []),
('skull-lta', ['mri/transforms/talairach_with_skull_2.lta'], []),
('calabel', ['mri/aseg.auto_noCCseg.mgz',
'mri/aseg.auto.mgz',
'mri/aseg.mgz'], []),
('normalization2', ['mri/brain.mgz'], []),
('maskbfs', ['mri/brain.finalsurfs.mgz'], []),
('segmentation', ['mri/wm.seg.mgz',
'mri/wm.asegedit.mgz',
'mri/wm.mgz'], []),
('fill', ['mri/filled.mgz',
# 'scripts/ponscc.cut.log',
], []),
]
_autorecon2_lh_steps = [
('tessellate', ['surf/lh.orig.nofix'], []),
('smooth1', ['surf/lh.smoothwm.nofix'], []),
('inflate1', ['surf/lh.inflated.nofix'], []),
('qsphere', ['surf/lh.qsphere.nofix'], []),
('fix', ['surf/lh.orig'], []),
('white', ['surf/lh.white', 'surf/lh.curv', 'surf/lh.area',
'label/lh.cortex.label'], []),
('smooth2', ['surf/lh.smoothwm'], []),
('inflate2', ['surf/lh.inflated', 'surf/lh.sulc',
'surf/lh.inflated.H', 'surf/lh.inflated.K'], []),
# Undocumented in ReconAllTableStableV5.3
('curvstats', ['stats/lh.curv.stats'], []),
]
_autorecon3_lh_steps = [
('sphere', ['surf/lh.sphere'], []),
('surfreg', ['surf/lh.sphere.reg'], []),
('jacobian_white', ['surf/lh.jacobian_white'], []),
('avgcurv', ['surf/lh.avg_curv'], []),
('cortparc', ['label/lh.aparc.annot'], []),
('pial', ['surf/lh.pial', 'surf/lh.curv.pial', 'surf/lh.area.pial',
'surf/lh.thickness'], []),
# Misnamed outputs in ReconAllTableStableV5.3: ?h.w-c.pct.mgz
('pctsurfcon', ['surf/lh.w-g.pct.mgh'], []),
('parcstats', ['stats/lh.aparc.stats'], []),
('cortparc2', ['label/lh.aparc.a2009s.annot'], []),
('parcstats2', ['stats/lh.aparc.a2009s.stats'], []),
# Undocumented in ReconAllTableStableV5.3
('cortparc3', ['label/lh.aparc.DKTatlas40.annot'], []),
# Undocumented in ReconAllTableStableV5.3
('parcstats3', ['stats/lh.aparc.a2009s.stats'], []),
('label-exvivo-ec', ['label/lh.entorhinal_exvivo.label'], []),
]
_autorecon3_added_steps = [
('cortribbon', ['mri/lh.ribbon.mgz', 'mri/rh.ribbon.mgz',
'mri/ribbon.mgz'], []),
('segstats', ['stats/aseg.stats'], []),
('aparc2aseg', ['mri/aparc+aseg.mgz',
'mri/aparc.a2009s+aseg.mgz'], []),
('wmparc', ['mri/wmparc.mgz', 'stats/wmparc.stats'], []),
('balabels', ['label/BA.ctab', 'label/BA.thresh.ctab'], []),
]
else:
_autorecon2_volonly_steps = [
('gcareg', ['mri/transforms/talairach.lta'], []),
('canorm', ['mri/norm.mgz'], []),
('careg', ['mri/transforms/talairach.m3z'], []),
('calabel', ['mri/aseg.auto_noCCseg.mgz',
'mri/aseg.auto.mgz',
'mri/aseg.mgz'], []),
('normalization2', ['mri/brain.mgz'], []),
('maskbfs', ['mri/brain.finalsurfs.mgz'], []),
('segmentation', ['mri/wm.seg.mgz',
'mri/wm.asegedit.mgz',
'mri/wm.mgz'], []),
('fill', ['mri/filled.mgz',
# 'scripts/ponscc.cut.log',
], []),
]
_autorecon2_lh_steps = [
('tessellate', ['surf/lh.orig.nofix'], []),
('smooth1', ['surf/lh.smoothwm.nofix'], []),
('inflate1', ['surf/lh.inflated.nofix'], []),
('qsphere', ['surf/lh.qsphere.nofix'], []),
('fix', ['surf/lh.orig'], []),
('white', ['surf/lh.white.preaparc', 'surf/lh.curv',
'surf/lh.area', 'label/lh.cortex.label'], []),
('smooth2', ['surf/lh.smoothwm'], []),
('inflate2', ['surf/lh.inflated', 'surf/lh.sulc'], []),
('curvHK', ['surf/lh.white.H', 'surf/lh.white.K',
'surf/lh.inflated.H', 'surf/lh.inflated.K'], []),
('curvstats', ['stats/lh.curv.stats'], []),
]
_autorecon3_lh_steps = [
('sphere', ['surf/lh.sphere'], []),
('surfreg', ['surf/lh.sphere.reg'], []),
('jacobian_white', ['surf/lh.jacobian_white'], []),
('avgcurv', ['surf/lh.avg_curv'], []),
('cortparc', ['label/lh.aparc.annot'], []),
('pial', ['surf/lh.pial', 'surf/lh.curv.pial',
'surf/lh.area.pial', 'surf/lh.thickness',
'surf/lh.white'], []),
('parcstats', ['stats/lh.aparc.stats'], []),
('cortparc2', ['label/lh.aparc.a2009s.annot'], []),
('parcstats2', ['stats/lh.aparc.a2009s.stats'], []),
('cortparc3', ['label/lh.aparc.DKTatlas.annot'], []),
('parcstats3', ['stats/lh.aparc.DKTatlas.stats'], []),
('pctsurfcon', ['surf/lh.w-g.pct.mgh'], []),
]
_autorecon3_added_steps = [
('cortribbon', ['mri/lh.ribbon.mgz', 'mri/rh.ribbon.mgz',
'mri/ribbon.mgz'], []),
('hyporelabel', ['mri/aseg.presurf.hypos.mgz'], []),
('aparc2aseg', ['mri/aparc+aseg.mgz',
'mri/aparc.a2009s+aseg.mgz',
'mri/aparc.DKTatlas+aseg.mgz'], []),
('apas2aseg', ['mri/aseg.mgz'], ['mri/aparc+aseg.mgz']),
('segstats', ['stats/aseg.stats'], []),
('wmparc', ['mri/wmparc.mgz', 'stats/wmparc.stats'], []),
# Note that this is a very incomplete list; however the ctab
# files are last to be touched, so this should be reasonable
('balabels', ['label/BA_exvivo.ctab',
'label/BA_exvivo.thresh.ctab',
'label/lh.entorhinal_exvivo.label',
'label/rh.entorhinal_exvivo.label'], []),
]
# Fill out autorecon2 steps
_autorecon2_rh_steps = [
(step, [out.replace('lh', 'rh') for out in outs], ins)
for step, outs, ins in _autorecon2_lh_steps]
_autorecon2_perhemi_steps = [
(step, [of for out in outs
for of in (out, out.replace('lh', 'rh'))], ins)
for step, outs, ins in _autorecon2_lh_steps]
_autorecon2_steps = _autorecon2_volonly_steps + _autorecon2_perhemi_steps
# Fill out autorecon3 steps
_autorecon3_rh_steps = [
(step, [out.replace('lh', 'rh') for out in outs], ins)
for step, outs, ins in _autorecon3_lh_steps]
_autorecon3_perhemi_steps = [
(step, [of for out in outs
for of in (out, out.replace('lh', 'rh'))], ins)
for step, outs, ins in _autorecon3_lh_steps]
_autorecon3_steps = _autorecon3_perhemi_steps + _autorecon3_added_steps
# Fill out autorecon-hemi lh/rh steps
_autorecon_lh_steps = (_autorecon2_lh_steps + _autorecon3_lh_steps)
_autorecon_rh_steps = (_autorecon2_rh_steps + _autorecon3_rh_steps)
_steps = _autorecon1_steps + _autorecon2_steps + _autorecon3_steps
_binaries = ['talairach', 'mri_normalize', 'mri_watershed',
'mri_em_register', 'mri_ca_normalize', 'mri_ca_register',
'mri_remove_neck', 'mri_ca_label', 'mri_segstats',
'mri_mask', 'mri_segment', 'mri_edit_wm_with_aseg',
'mri_pretess', 'mri_fill', 'mri_tessellate', 'mris_smooth',
'mris_inflate', 'mris_sphere', 'mris_fix_topology',
'mris_make_surfaces', 'mris_surf2vol', 'mris_register',
'mrisp_paint', 'mris_ca_label', 'mris_anatomical_stats',
'mri_aparc2aseg']
def _gen_subjects_dir(self):
return os.getcwd()
def _gen_filename(self, name):
if name == 'subjects_dir':
return self._gen_subjects_dir()
return None
def _list_outputs(self):
"""
See io.FreeSurferSource.outputs for the list of outputs returned
"""
if isdefined(self.inputs.subjects_dir):
subjects_dir = self.inputs.subjects_dir
else:
subjects_dir = self._gen_subjects_dir()
if isdefined(self.inputs.hemi):
hemi = self.inputs.hemi
else:
hemi = 'both'
outputs = self._outputs().get()
outputs.update(FreeSurferSource(subject_id=self.inputs.subject_id,
subjects_dir=subjects_dir,
hemi=hemi)._list_outputs())
outputs['subject_id'] = self.inputs.subject_id
outputs['subjects_dir'] = subjects_dir
return outputs
def _is_resuming(self):
subjects_dir = self.inputs.subjects_dir
if not isdefined(subjects_dir):
subjects_dir = self._gen_subjects_dir()
if os.path.isdir(os.path.join(subjects_dir, self.inputs.subject_id,
'mri')):
return True
return False
def _format_arg(self, name, trait_spec, value):
if name == 'T1_files':
if self._is_resuming():
return None
if name == 'hippocampal_subfields_T1' and \
isdefined(self.inputs.hippocampal_subfields_T2):
return None
if all((name == 'hippocampal_subfields_T2',
isdefined(self.inputs.hippocampal_subfields_T1) and
self.inputs.hippocampal_subfields_T1)):
argstr = trait_spec.argstr.replace('T2', 'T1T2')
return argstr % value
if name == 'directive' and value == 'autorecon-hemi':
if not isdefined(self.inputs.hemi):
raise ValueError("Directive 'autorecon-hemi' requires hemi "
"input to be set")
value += ' ' + self.inputs.hemi
if all((name == 'hemi',
isdefined(self.inputs.directive) and
self.inputs.directive == 'autorecon-hemi')):
return None
return super(ReconAll, self)._format_arg(name, trait_spec, value)
@property
def cmdline(self):
cmd = super(ReconAll, self).cmdline
# Adds '-expert' flag if expert flags are passed
# Mutually exclusive with 'expert' input parameter
cmd += self._prep_expert_file()
if not self._is_resuming():
return cmd
subjects_dir = self.inputs.subjects_dir
if not isdefined(subjects_dir):
subjects_dir = self._gen_subjects_dir()
# Check only relevant steps
directive = self.inputs.directive
if not isdefined(directive):
steps = []
elif directive == 'autorecon1':
steps = self._autorecon1_steps
elif directive == 'autorecon2-volonly':
steps = self._autorecon2_volonly_steps
elif directive == 'autorecon2-perhemi':
steps = self._autorecon2_perhemi_steps
elif directive.startswith('autorecon2'):
if isdefined(self.inputs.hemi):
if self.inputs.hemi == 'lh':
steps = (self._autorecon2_volonly_steps +
self._autorecon2_lh_steps)
else:
steps = (self._autorecon2_volonly_steps +
self._autorecon2_rh_steps)
else:
steps = self._autorecon2_steps
elif directive == 'autorecon-hemi':
if self.inputs.hemi == 'lh':
steps = self._autorecon_lh_steps
else:
steps = self._autorecon_rh_steps
elif directive == 'autorecon3':
steps = self._autorecon3_steps
else:
steps = self._steps
no_run = True
flags = []
for step, outfiles, infiles in steps:
flag = '-{}'.format(step)
noflag = '-no{}'.format(step)
if noflag in cmd:
continue
elif flag in cmd:
no_run = False
continue
subj_dir = os.path.join(subjects_dir, self.inputs.subject_id)
if check_depends([os.path.join(subj_dir, f) for f in outfiles],
[os.path.join(subj_dir, f) for f in infiles]):
flags.append(noflag)
else:
no_run = False
if no_run and not self.force_run:
iflogger.info('recon-all complete : Not running')
return "echo recon-all: nothing to do"
cmd += ' ' + ' '.join(flags)
iflogger.info('resume recon-all : %s' % cmd)
return cmd
def _prep_expert_file(self):
if isdefined(self.inputs.expert):
return ''
lines = []
for binary in self._binaries:
args = getattr(self.inputs, binary)
if isdefined(args):
lines.append('{} {}\n'.format(binary, args))
if lines == []:
return ''
contents = ''.join(lines)
if not isdefined(self.inputs.xopts) and \
self._get_expert_file() == contents:
return ' -xopts-use'
expert_fname = os.path.abspath('expert.opts')
with open(expert_fname, 'w') as fobj:
fobj.write(contents)
return ' -expert {}'.format(expert_fname)
def _get_expert_file(self):
# Read pre-existing options file, if it exists
if isdefined(self.inputs.subjects_dir):
subjects_dir = self.inputs.subjects_dir
else:
subjects_dir = self._gen_subjects_dir()
xopts_file = os.path.join(subjects_dir, self.inputs.subject_id,
'scripts', 'expert-options')
if not os.path.exists(xopts_file):
return ''
with open(xopts_file, 'r') as fobj:
return fobj.read()
class BBRegisterInputSpec(FSTraitedSpec):
subject_id = traits.Str(argstr='--s %s',
desc='freesurfer subject id',
mandatory=True)
source_file = File(argstr='--mov %s',
desc='source file to be registered',
mandatory=True, copyfile=False)
init = traits.Enum('spm', 'fsl', 'header', argstr='--init-%s',
mandatory=True, xor=['init_reg_file'],
desc='initialize registration spm, fsl, header')
init_reg_file = File(exists=True, argstr='--init-reg %s',
desc='existing registration file',
xor=['init'], mandatory=True)
contrast_type = traits.Enum('t1', 't2', 'bold', 'dti', argstr='--%s',
desc='contrast type of image',
mandatory=True)
intermediate_file = File(exists=True, argstr="--int %s",
desc="Intermediate image, e.g. in case of partial FOV")
reg_frame = traits.Int(argstr="--frame %d", xor=["reg_middle_frame"],
desc="0-based frame index for 4D source file")
reg_middle_frame = traits.Bool(argstr="--mid-frame", xor=["reg_frame"],
desc="Register middle frame of 4D source file")
out_reg_file = File(argstr='--reg %s',
desc='output registration file',
genfile=True)
spm_nifti = traits.Bool(argstr="--spm-nii",
desc="force use of nifti rather than analyze with SPM")
epi_mask = traits.Bool(argstr="--epi-mask",
desc="mask out B0 regions in stages 1 and 2")
dof = traits.Enum(6, 9, 12, argstr='--%d',
desc='number of transform degrees of freedom')
fsldof = traits.Int(argstr='--fsl-dof %d',
desc='degrees of freedom for initial registration (FSL)')
out_fsl_file = traits.Either(traits.Bool, File, argstr="--fslmat %s",
desc="write the transformation matrix in FSL FLIRT format")
out_lta_file = traits.Either(traits.Bool, File, argstr="--lta %s", min_ver='5.2.0',
desc="write the transformation matrix in LTA format")
registered_file = traits.Either(traits.Bool, File, argstr='--o %s',
desc='output warped sourcefile either True or filename')
init_cost_file = traits.Either(traits.Bool, File, argstr='--initcost %s',
desc='output initial registration cost file')
class BBRegisterInputSpec6(BBRegisterInputSpec):
init = traits.Enum('coreg', 'rr', 'spm', 'fsl', 'header', 'best', argstr='--init-%s',
xor=['init_reg_file'],
desc='initialize registration with mri_coreg, spm, fsl, or header')
init_reg_file = File(exists=True, argstr='--init-reg %s',
desc='existing registration file',
xor=['init'])
class BBRegisterOutputSpec(TraitedSpec):
out_reg_file = File(exists=True, desc='Output registration file')
out_fsl_file = File(exists=True, desc='Output FLIRT-style registration file')
out_lta_file = File(exists=True, desc='Output LTA-style registration file')
min_cost_file = File(exists=True, desc='Output registration minimum cost file')
init_cost_file = File(exists=True, desc='Output initial registration cost file')
registered_file = File(exists=True, desc='Registered and resampled source file')
class BBRegister(FSCommand):
"""Use FreeSurfer bbregister to register a volume to the Freesurfer anatomical.
This program performs within-subject, cross-modal registration using a
boundary-based cost function. It is required that you have an anatomical
scan of the subject that has already been recon-all-ed using freesurfer.
Examples
--------
>>> from nipype.interfaces.freesurfer import BBRegister
>>> bbreg = BBRegister(subject_id='me', source_file='structural.nii', init='header', contrast_type='t2')
>>> bbreg.cmdline # doctest: +ALLOW_UNICODE
'bbregister --t2 --init-header --reg structural_bbreg_me.dat --mov structural.nii --s me'
"""
_cmd = 'bbregister'
if LooseVersion('0.0.0') < Info.looseversion() < LooseVersion("6.0.0"):
input_spec = BBRegisterInputSpec
else:
input_spec = BBRegisterInputSpec6
output_spec = BBRegisterOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
_in = self.inputs
if isdefined(_in.out_reg_file):
outputs['out_reg_file'] = op.abspath(_in.out_reg_file)
elif _in.source_file:
suffix = '_bbreg_%s.dat' % _in.subject_id
outputs['out_reg_file'] = fname_presuffix(_in.source_file,
suffix=suffix,
use_ext=False)
if isdefined(_in.registered_file):
if isinstance(_in.registered_file, bool):
outputs['registered_file'] = fname_presuffix(_in.source_file,
suffix='_bbreg')
else:
outputs['registered_file'] = op.abspath(_in.registered_file)
if isdefined(_in.out_lta_file):
if isinstance(_in.out_lta_file, bool):
suffix = '_bbreg_%s.lta' % _in.subject_id
out_lta_file = fname_presuffix(_in.source_file,
suffix=suffix,
use_ext=False)
outputs['out_lta_file'] = out_lta_file
else:
outputs['out_lta_file'] = op.abspath(_in.out_lta_file)
if isdefined(_in.out_fsl_file):
if isinstance(_in.out_fsl_file, bool):
suffix = '_bbreg_%s.mat' % _in.subject_id
out_fsl_file = fname_presuffix(_in.source_file,
suffix=suffix,
use_ext=False)
outputs['out_fsl_file'] = out_fsl_file
else:
outputs['out_fsl_file'] = op.abspath(_in.out_fsl_file)
if isdefined(_in.init_cost_file):
if isinstance(_in.out_fsl_file, bool):
outputs['init_cost_file'] = outputs['out_reg_file'] + '.initcost'
else:
outputs['init_cost_file'] = op.abspath(_in.init_cost_file)
outputs['min_cost_file'] = outputs['out_reg_file'] + '.mincost'
return outputs
def _format_arg(self, name, spec, value):
if name in ('registered_file', 'out_fsl_file', 'out_lta_file',
'init_cost_file') and isinstance(value, bool):
value = self._list_outputs()[name]
return super(BBRegister, self)._format_arg(name, spec, value)
def _gen_filename(self, name):
if name == 'out_reg_file':
return self._list_outputs()[name]
return None
class ApplyVolTransformInputSpec(FSTraitedSpec):
source_file = File(exists=True, argstr='--mov %s',
copyfile=False, mandatory=True,
desc='Input volume you wish to transform')
transformed_file = File(desc='Output volume', argstr='--o %s', genfile=True)
_targ_xor = ('target_file', 'tal', 'fs_target')
target_file = File(exists=True, argstr='--targ %s', xor=_targ_xor,
desc='Output template volume', mandatory=True)
tal = traits.Bool(argstr='--tal', xor=_targ_xor, mandatory=True,
desc='map to a sub FOV of MNI305 (with --reg only)')
tal_resolution = traits.Float(argstr="--talres %.10f",
desc="Resolution to sample when using tal")
fs_target = traits.Bool(argstr='--fstarg', xor=_targ_xor, mandatory=True,
requires=['reg_file'],
desc='use orig.mgz from subject in regfile as target')
_reg_xor = ('reg_file', 'lta_file', 'lta_inv_file', 'fsl_reg_file', 'xfm_reg_file',
'reg_header', 'mni_152_reg', 'subject')
reg_file = File(exists=True, xor=_reg_xor, argstr='--reg %s',
mandatory=True,
desc='tkRAS-to-tkRAS matrix (tkregister2 format)')
lta_file = File(exists=True, xor=_reg_xor, argstr='--lta %s',
mandatory=True, desc='Linear Transform Array file')
lta_inv_file = File(exists=True, xor=_reg_xor, argstr='--lta-inv %s',
mandatory=True, desc='LTA, invert')
reg_file = File(exists=True, xor=_reg_xor, argstr='--reg %s',
mandatory=True,
desc='tkRAS-to-tkRAS matrix (tkregister2 format)')
fsl_reg_file = File(exists=True, xor=_reg_xor, argstr='--fsl %s',
mandatory=True,
desc='fslRAS-to-fslRAS matrix (FSL format)')
xfm_reg_file = File(exists=True, xor=_reg_xor, argstr='--xfm %s',
mandatory=True,
desc='ScannerRAS-to-ScannerRAS matrix (MNI format)')
reg_header = traits.Bool(xor=_reg_xor, argstr='--regheader',
mandatory=True,
desc='ScannerRAS-to-ScannerRAS matrix = identity')
mni_152_reg = traits.Bool(xor=_reg_xor, argstr='--regheader', mandatory=True,
desc='target MNI152 space')
subject = traits.Str(xor=_reg_xor, argstr='--s %s', mandatory=True,
desc='set matrix = identity and use subject for any templates')
inverse = traits.Bool(desc='sample from target to source',
argstr='--inv')
interp = traits.Enum('trilin', 'nearest', 'cubic', argstr='--interp %s',
desc='Interpolation method (<trilin> or nearest)')
no_resample = traits.Bool(desc='Do not resample; just change vox2ras matrix',
argstr='--no-resample')
m3z_file = File(argstr="--m3z %s",
desc=('This is the morph to be applied to the volume. '
'Unless the morph is in mri/transforms (eg.: for '
'talairach.m3z computed by reconall), you will need '
'to specify the full path to this morph and use the '
'--noDefM3zPath flag.'))
no_ded_m3z_path = traits.Bool(argstr="--noDefM3zPath",
requires=['m3z_file'],
desc=('To be used with the m3z flag. '
'Instructs the code not to look for the'
'm3z morph in the default location '
'(SUBJECTS_DIR/subj/mri/transforms), '
'but instead just use the path '
'indicated in --m3z.'))
invert_morph = traits.Bool(argstr="--inv-morph",
requires=['m3z_file'],
desc=('Compute and use the inverse of the '
'non-linear morph to resample the input '
'volume. To be used by --m3z.'))
class ApplyVolTransformOutputSpec(TraitedSpec):
transformed_file = File(exists=True, desc='Path to output file if used normally')
class ApplyVolTransform(FSCommand):
"""Use FreeSurfer mri_vol2vol to apply a transform.
Examples
--------
>>> from nipype.interfaces.freesurfer import ApplyVolTransform
>>> applyreg = ApplyVolTransform()
>>> applyreg.inputs.source_file = 'structural.nii'
>>> applyreg.inputs.reg_file = 'register.dat'
>>> applyreg.inputs.transformed_file = 'struct_warped.nii'
>>> applyreg.inputs.fs_target = True
>>> applyreg.cmdline # doctest: +ALLOW_UNICODE
'mri_vol2vol --fstarg --reg register.dat --mov structural.nii --o struct_warped.nii'
"""
_cmd = 'mri_vol2vol'
input_spec = ApplyVolTransformInputSpec
output_spec = ApplyVolTransformOutputSpec
def _get_outfile(self):
outfile = self.inputs.transformed_file
if not isdefined(outfile):
if self.inputs.inverse is True:
if self.inputs.fs_target is True:
src = 'orig.mgz'
else:
src = self.inputs.target_file
else:
src = self.inputs.source_file
outfile = fname_presuffix(src,
newpath=os.getcwd(),
suffix='_warped')
return outfile
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['transformed_file'] = os.path.abspath(self._get_outfile())
return outputs
def _gen_filename(self, name):
if name == 'transformed_file':
return self._get_outfile()
return None
class SmoothInputSpec(FSTraitedSpec):
in_file = File(exists=True, desc='source volume',
argstr='--i %s', mandatory=True)
reg_file = File(desc='registers volume to surface anatomical ',
argstr='--reg %s', mandatory=True,
exists=True)
smoothed_file = File(desc='output volume', argstr='--o %s', genfile=True)
proj_frac_avg = traits.Tuple(traits.Float, traits.Float, traits.Float,
xor=['proj_frac'],
desc='average a long normal min max delta',
argstr='--projfrac-avg %.2f %.2f %.2f')
proj_frac = traits.Float(desc='project frac of thickness a long surface normal',
xor=['proj_frac_avg'],
argstr='--projfrac %s')
surface_fwhm = traits.Range(low=0.0, requires=['reg_file'],
mandatory=True, xor=['num_iters'],
desc='surface FWHM in mm', argstr='--fwhm %f')
num_iters = traits.Range(low=1, xor=['surface_fwhm'],
mandatory=True, argstr='--niters %d',
desc='number of iterations instead of fwhm')
vol_fwhm = traits.Range(low=0.0, argstr='--vol-fwhm %f',
desc='volume smoothing outside of surface')
class SmoothOutputSpec(TraitedSpec):
smoothed_file = File(exists=True, desc='smoothed input volume')
class Smooth(FSCommand):
"""Use FreeSurfer mris_volsmooth to smooth a volume
This function smoothes cortical regions on a surface and non-cortical
regions in volume.
.. note::
Cortical voxels are mapped to the surface (3D->2D) and then the
smoothed values from the surface are put back into the volume to fill
the cortical ribbon. If data is smoothed with this algorithm, one has to
be careful about how further processing is interpreted.
Examples
--------
>>> from nipype.interfaces.freesurfer import Smooth
>>> smoothvol = Smooth(in_file='functional.nii', smoothed_file = 'foo_out.nii', reg_file='register.dat', surface_fwhm=10, vol_fwhm=6)
>>> smoothvol.cmdline # doctest: +ALLOW_UNICODE
'mris_volsmooth --i functional.nii --reg register.dat --o foo_out.nii --fwhm 10.000000 --vol-fwhm 6.000000'
"""
_cmd = 'mris_volsmooth'
input_spec = SmoothInputSpec
output_spec = SmoothOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outfile = self.inputs.smoothed_file
if not isdefined(outfile):
outfile = self._gen_fname(self.inputs.in_file,
suffix='_smooth')
outputs['smoothed_file'] = outfile
return outputs
def _gen_filename(self, name):
if name == 'smoothed_file':
return self._list_outputs()[name]
return None
class RobustRegisterInputSpec(FSTraitedSpec):
source_file = File(exists=True, mandatory=True, argstr='--mov %s',
desc='volume to be registered')
target_file = File(exists=True, mandatory=True, argstr='--dst %s',
desc='target volume for the registration')
out_reg_file = traits.Either(
True, File, default=True, usedefault=True, argstr='--lta %s',
desc='registration file; either True or filename')
registered_file = traits.Either(
traits.Bool, File, argstr='--warp %s',
desc='registered image; either True or filename')
weights_file = traits.Either(
traits.Bool, File, argstr='--weights %s',
desc='weights image to write; either True or filename')
est_int_scale = traits.Bool(
argstr='--iscale',
desc='estimate intensity scale (recommended for unnormalized images)')
trans_only = traits.Bool(argstr='--transonly',
desc='find 3 parameter translation only')
in_xfm_file = File(exists=True, argstr='--transform',
desc='use initial transform on source')
half_source = traits.Either(
traits.Bool, File, argstr='--halfmov %s',
desc="write source volume mapped to halfway space")
half_targ = traits.Either(
traits.Bool, File, argstr="--halfdst %s",
desc="write target volume mapped to halfway space")
half_weights = traits.Either(
traits.Bool, File, argstr="--halfweights %s",
desc="write weights volume mapped to halfway space")
half_source_xfm = traits.Either(
traits.Bool, File, argstr="--halfmovlta %s",
desc="write transform from source to halfway space")
half_targ_xfm = traits.Either(
traits.Bool, File, argstr="--halfdstlta %s",
desc="write transform from target to halfway space")
auto_sens = traits.Bool(
argstr='--satit', xor=['outlier_sens'], mandatory=True,
desc='auto-detect good sensitivity')
outlier_sens = traits.Float(
argstr='--sat %.4f', xor=['auto_sens'], mandatory=True,
desc='set outlier sensitivity explicitly')
least_squares = traits.Bool(
argstr='--leastsquares',
desc='use least squares instead of robust estimator')
no_init = traits.Bool(argstr='--noinit', desc='skip transform init')
init_orient = traits.Bool(
argstr='--initorient',
desc='use moments for initial orient (recommended for stripped brains)'
)
max_iterations = traits.Int(argstr='--maxit %d',
desc='maximum # of times on each resolution')
high_iterations = traits.Int(argstr='--highit %d',
desc='max # of times on highest resolution')
iteration_thresh = traits.Float(
argstr='--epsit %.3f', desc='stop iterations when below threshold')
subsample_thresh = traits.Int(
argstr='--subsample %d',
desc='subsample if dimension is above threshold size')
outlier_limit = traits.Float(argstr='--wlimit %.3f',
desc='set maximal outlier limit in satit')
write_vo2vox = traits.Bool(
argstr='--vox2vox', desc='output vox2vox matrix (default is RAS2RAS)')
no_multi = traits.Bool(argstr='--nomulti',
desc='work on highest resolution')
mask_source = File(exists=True, argstr='--maskmov %s',
desc='image to mask source volume with')
mask_target = File(exists=True, argstr='--maskdst %s',
desc='image to mask target volume with')
force_double = traits.Bool(argstr='--doubleprec',
desc='use double-precision intensities')
force_float = traits.Bool(argstr='--floattype',
desc='use float intensities')
class RobustRegisterOutputSpec(TraitedSpec):
out_reg_file = File(exists=True, desc="output registration file")
registered_file = File(exists=True,
desc="output image with registration applied")
weights_file = File(exists=True, desc="image of weights used")
half_source = File(exists=True,
desc="source image mapped to halfway space")
half_targ = File(exists=True, desc="target image mapped to halfway space")
half_weights = File(exists=True,
desc="weights image mapped to halfway space")
half_source_xfm = File(
exists=True,
desc="transform file to map source image to halfway space")
half_targ_xfm = File(
exists=True,
desc="transform file to map target image to halfway space")
class RobustRegister(FSCommand):
"""Perform intramodal linear registration (translation and rotation) using
robust statistics.
Examples
--------
>>> from nipype.interfaces.freesurfer import RobustRegister
>>> reg = RobustRegister()
>>> reg.inputs.source_file = 'structural.nii'
>>> reg.inputs.target_file = 'T1.nii'
>>> reg.inputs.auto_sens = True
>>> reg.inputs.init_orient = True
>>> reg.cmdline # doctest: +ALLOW_UNICODE +ELLIPSIS
'mri_robust_register --satit --initorient --lta .../structural_robustreg.lta --mov structural.nii --dst T1.nii'
References
----------
Reuter, M, Rosas, HD, and Fischl, B, (2010). Highly Accurate Inverse
Consistent Registration: A Robust Approach. Neuroimage 53(4) 1181-96.
"""
_cmd = 'mri_robust_register'
input_spec = RobustRegisterInputSpec
output_spec = RobustRegisterOutputSpec
def _format_arg(self, name, spec, value):
options = ("out_reg_file", "registered_file", "weights_file",
"half_source", "half_targ", "half_weights",
"half_source_xfm", "half_targ_xfm")
if name in options and isinstance(value, bool):
value = self._list_outputs()[name]
return super(RobustRegister, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
cwd = os.getcwd()
prefices = dict(src=self.inputs.source_file,
trg=self.inputs.target_file)
suffices = dict(out_reg_file=("src", "_robustreg.lta", False),
registered_file=("src", "_robustreg", True),
weights_file=("src", "_robustweights", True),
half_source=("src", "_halfway", True),
half_targ=("trg", "_halfway", True),
half_weights=("src", "_halfweights", True),
half_source_xfm=("src", "_robustxfm.lta", False),
half_targ_xfm=("trg", "_robustxfm.lta", False))
for name, sufftup in list(suffices.items()):
value = getattr(self.inputs, name)
if value:
if value is True:
outputs[name] = fname_presuffix(prefices[sufftup[0]],
suffix=sufftup[1],
newpath=cwd,
use_ext=sufftup[2])
else:
outputs[name] = os.path.abspath(value)
return outputs
class FitMSParamsInputSpec(FSTraitedSpec):
in_files = traits.List(File(exists=True), argstr="%s", position=-2, mandatory=True,
desc="list of FLASH images (must be in mgh format)")
tr_list = traits.List(traits.Int, desc="list of TRs of the input files (in msec)")
te_list = traits.List(traits.Float, desc="list of TEs of the input files (in msec)")
flip_list = traits.List(traits.Int, desc="list of flip angles of the input files")
xfm_list = traits.List(File(exists=True),
desc="list of transform files to apply to each FLASH image")
out_dir = Directory(argstr="%s", position=-1, genfile=True,
desc="directory to store output in")
class FitMSParamsOutputSpec(TraitedSpec):
t1_image = File(exists=True, desc="image of estimated T1 relaxation values")
pd_image = File(exists=True, desc="image of estimated proton density values")
t2star_image = File(exists=True, desc="image of estimated T2* values")
class FitMSParams(FSCommand):
"""Estimate tissue paramaters from a set of FLASH images.
Examples
--------
>>> from nipype.interfaces.freesurfer import FitMSParams
>>> msfit = FitMSParams()
>>> msfit.inputs.in_files = ['flash_05.mgz', 'flash_30.mgz']
>>> msfit.inputs.out_dir = 'flash_parameters'
>>> msfit.cmdline # doctest: +ALLOW_UNICODE
'mri_ms_fitparms flash_05.mgz flash_30.mgz flash_parameters'
"""
_cmd = "mri_ms_fitparms"
input_spec = FitMSParamsInputSpec
output_spec = FitMSParamsOutputSpec
def _format_arg(self, name, spec, value):
if name == "in_files":
cmd = ""
for i, file in enumerate(value):
if isdefined(self.inputs.tr_list):
cmd = " ".join((cmd, "-tr %.1f" % self.inputs.tr_list[i]))
if isdefined(self.inputs.te_list):
cmd = " ".join((cmd, "-te %.3f" % self.inputs.te_list[i]))
if isdefined(self.inputs.flip_list):
cmd = " ".join((cmd, "-fa %.1f" % self.inputs.flip_list[i]))
if isdefined(self.inputs.xfm_list):
cmd = " ".join((cmd, "-at %s" % self.inputs.xfm_list[i]))
cmd = " ".join((cmd, file))
return cmd
return super(FitMSParams, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
if not isdefined(self.inputs.out_dir):
out_dir = self._gen_filename("out_dir")
else:
out_dir = self.inputs.out_dir
outputs["t1_image"] = os.path.join(out_dir, "T1.mgz")
outputs["pd_image"] = os.path.join(out_dir, "PD.mgz")
outputs["t2star_image"] = os.path.join(out_dir, "T2star.mgz")
return outputs
def _gen_filename(self, name):
if name == "out_dir":
return os.getcwd()
return None
class SynthesizeFLASHInputSpec(FSTraitedSpec):
fixed_weighting = traits.Bool(position=1, argstr="-w",
desc="use a fixed weighting to generate optimal gray/white contrast")
tr = traits.Float(mandatory=True, position=2, argstr="%.2f",
desc="repetition time (in msec)")
flip_angle = traits.Float(mandatory=True, position=3, argstr="%.2f",
desc="flip angle (in degrees)")
te = traits.Float(mandatory=True, position=4, argstr="%.3f",
desc="echo time (in msec)")
t1_image = File(exists=True, mandatory=True, position=5, argstr="%s",
desc="image of T1 values")
pd_image = File(exists=True, mandatory=True, position=6, argstr="%s",
desc="image of proton density values")
out_file = File(genfile=True, argstr="%s", desc="image to write")
class SynthesizeFLASHOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="synthesized FLASH acquisition")
class SynthesizeFLASH(FSCommand):
"""Synthesize a FLASH acquisition from T1 and proton density maps.
Examples
--------
>>> from nipype.interfaces.freesurfer import SynthesizeFLASH
>>> syn = SynthesizeFLASH(tr=20, te=3, flip_angle=30)
>>> syn.inputs.t1_image = 'T1.mgz'
>>> syn.inputs.pd_image = 'PD.mgz'
>>> syn.inputs.out_file = 'flash_30syn.mgz'
>>> syn.cmdline # doctest: +ALLOW_UNICODE
'mri_synthesize 20.00 30.00 3.000 T1.mgz PD.mgz flash_30syn.mgz'
"""
_cmd = "mri_synthesize"
input_spec = SynthesizeFLASHInputSpec
output_spec = SynthesizeFLASHOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
if isdefined(self.inputs.out_file):
outputs["out_file"] = self.inputs.out_file
else:
outputs["out_file"] = self._gen_fname("synth-flash_%02d.mgz" % self.inputs.flip_angle,
suffix="")
return outputs
def _gen_filename(self, name):
if name == "out_file":
return self._list_outputs()["out_file"]
return None
class MNIBiasCorrectionInputSpec(FSTraitedSpec):
# mandatory
in_file = File(exists=True, mandatory=True, argstr="--i %s",
desc="input volume. Input can be any format accepted by mri_convert.")
# optional
out_file = File(argstr="--o %s", name_source=['in_file'],
name_template='%s_output', hash_files=False, keep_extension=True,
desc="output volume. Output can be any format accepted by mri_convert. " +
"If the output format is COR, then the directory must exist.")
iterations = traits.Int(4, argstr="--n %d",
desc="Number of iterations to run nu_correct. Default is 4. This is the number of times " +
"that nu_correct is repeated (ie, using the output from the previous run as the input for " +
"the next). This is different than the -iterations option to nu_correct.")
protocol_iterations = traits.Int(argstr="--proto-iters %d",
desc="Passes Np as argument of the -iterations flag of nu_correct. This is different " +
"than the --n flag above. Default is not to pass nu_correct the -iterations flag.")
distance = traits.Int(argstr="--distance %d", desc="N3 -distance option")
no_rescale = traits.Bool(argstr="--no-rescale",
desc="do not rescale so that global mean of output == input global mean")
mask = File(exists=True, argstr="--mask %s",
desc="brainmask volume. Input can be any format accepted by mri_convert.")
transform = File(exists=True, argstr="--uchar %s",
desc="tal.xfm. Use mri_make_uchar instead of conforming")
stop = traits.Float(argstr="--stop %f",
desc="Convergence threshold below which iteration stops (suggest 0.01 to 0.0001)")
shrink = traits.Int(argstr="--shrink %d",
desc="Shrink parameter for finer sampling (default is 4)")
class MNIBiasCorrectionOutputSpec(TraitedSpec):
out_file = File(exists=True, desc="output volume")
class MNIBiasCorrection(FSCommand):
""" Wrapper for nu_correct, a program from the Montreal Neurological Insitute (MNI)
used for correcting intensity non-uniformity (ie, bias fields). You must have the
MNI software installed on your system to run this. See [www.bic.mni.mcgill.ca/software/N3]
for more info.
mri_nu_correct.mni uses float internally instead of uchar. It also rescales the output so
that the global mean is the same as that of the input. These two changes are linked and
can be turned off with --no-float
Examples
--------
>>> from nipype.interfaces.freesurfer import MNIBiasCorrection
>>> correct = MNIBiasCorrection()
>>> correct.inputs.in_file = "norm.mgz"
>>> correct.inputs.iterations = 6
>>> correct.inputs.protocol_iterations = 1000
>>> correct.inputs.distance = 50
>>> correct.cmdline # doctest: +ALLOW_UNICODE
'mri_nu_correct.mni --distance 50 --i norm.mgz --n 6 --o norm_output.mgz --proto-iters 1000'
References:
----------
[http://freesurfer.net/fswiki/mri_nu_correct.mni]
[http://www.bic.mni.mcgill.ca/software/N3]
[https://github.com/BIC-MNI/N3]
"""
_cmd = "mri_nu_correct.mni"
input_spec = MNIBiasCorrectionInputSpec
output_spec = MNIBiasCorrectionOutputSpec
class WatershedSkullStripInputSpec(FSTraitedSpec):
# required
in_file = File(argstr="%s", exists=True, mandatory=True,
position=-2, desc="input volume")
out_file = File('brainmask.auto.mgz', argstr="%s", exists=False,
mandatory=True, position=-1, usedefault=True,
desc="output volume")
# optional
t1 = traits.Bool(
argstr="-T1", desc="specify T1 input volume (T1 grey value = 110)")
brain_atlas = File(argstr="-brain_atlas %s",
exists=True, position=-4, desc="")
transform = File(argstr="%s", exists=False,
position=-3, desc="undocumented")
class WatershedSkullStripOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="skull stripped brain volume")
class WatershedSkullStrip(FSCommand):
""" This program strips skull and other outer non-brain tissue and
produces the brain volume from T1 volume or the scanned volume.
The "watershed" segmentation algorithm was used to dertermine the
intensity values for white matter, grey matter, and CSF.
A force field was then used to fit a spherical surface to the brain.
The shape of the surface fit was then evaluated against a previously
derived template.
The default parameters are: -w 0.82 -b 0.32 -h 10 -seedpt -ta -wta
(Segonne 2004)
Examples
========
>>> from nipype.interfaces.freesurfer import WatershedSkullStrip
>>> skullstrip = WatershedSkullStrip()
>>> skullstrip.inputs.in_file = "T1.mgz"
>>> skullstrip.inputs.t1 = True
>>> skullstrip.inputs.transform = "transforms/talairach_with_skull.lta"
>>> skullstrip.inputs.out_file = "brainmask.auto.mgz"
>>> skullstrip.cmdline # doctest: +ALLOW_UNICODE
'mri_watershed -T1 transforms/talairach_with_skull.lta T1.mgz brainmask.auto.mgz'
"""
_cmd = 'mri_watershed'
input_spec = WatershedSkullStripInputSpec
output_spec = WatershedSkullStripOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class NormalizeInputSpec(FSTraitedSpec):
# required
in_file = File(argstr='%s', exists=True, mandatory=True,
position=-2, desc="The input file for Normalize")
out_file = File(argstr='%s', position=-1,
name_source=['in_file'], name_template='%s_norm',
hash_files=False, keep_extension=True,
desc="The output file for Normalize")
# optional
gradient = traits.Int(1, argstr="-g %d", usedefault=False,
desc="use max intensity/mm gradient g (default=1)")
mask = File(argstr="-mask %s", exists=True,
desc="The input mask file for Normalize")
segmentation = File(argstr="-aseg %s",
exists=True, desc="The input segmentation for Normalize")
transform = File(exists=True,
desc="Tranform file from the header of the input file")
class NormalizeOutputSpec(TraitedSpec):
out_file = traits.File(exists=False, desc="The output file for Normalize")
class Normalize(FSCommand):
"""
Normalize the white-matter, optionally based on control points. The
input volume is converted into a new volume where white matter image
values all range around 110.
Examples
========
>>> from nipype.interfaces import freesurfer
>>> normalize = freesurfer.Normalize()
>>> normalize.inputs.in_file = "T1.mgz"
>>> normalize.inputs.gradient = 1
>>> normalize.cmdline # doctest: +ALLOW_UNICODE
'mri_normalize -g 1 T1.mgz T1_norm.mgz'
"""
_cmd = "mri_normalize"
input_spec = NormalizeInputSpec
output_spec = NormalizeOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class CANormalizeInputSpec(FSTraitedSpec):
in_file = File(argstr='%s', exists=True, mandatory=True,
position=-4, desc="The input file for CANormalize")
out_file = File(argstr='%s', position=-1,
name_source=['in_file'], name_template='%s_norm',
hash_files=False, keep_extension=True,
desc="The output file for CANormalize")
atlas = File(argstr='%s', exists=True, mandatory=True,
position=-3, desc="The atlas file in gca format")
transform = File(argstr='%s', exists=True, mandatory=True,
position=-2, desc="The tranform file in lta format")
# optional
mask = File(argstr='-mask %s', exists=True,
desc="Specifies volume to use as mask")
control_points = File(argstr='-c %s',
desc="File name for the output control points")
long_file = File(argstr='-long %s',
desc='undocumented flag used in longitudinal processing')
class CANormalizeOutputSpec(TraitedSpec):
out_file = traits.File(exists=False, desc="The output file for Normalize")
control_points = File(
exists=False, desc="The output control points for Normalize")
class CANormalize(FSCommand):
"""This program creates a normalized volume using the brain volume and an
input gca file.
For complete details, see the `FS Documentation <http://surfer.nmr.mgh.harvard.edu/fswiki/mri_ca_normalize>`_
Examples
========
>>> from nipype.interfaces import freesurfer
>>> ca_normalize = freesurfer.CANormalize()
>>> ca_normalize.inputs.in_file = "T1.mgz"
>>> ca_normalize.inputs.atlas = "atlas.nii.gz" # in practice use .gca atlases
>>> ca_normalize.inputs.transform = "trans.mat" # in practice use .lta transforms
>>> ca_normalize.cmdline # doctest: +ALLOW_UNICODE
'mri_ca_normalize T1.mgz atlas.nii.gz trans.mat T1_norm.mgz'
"""
_cmd = "mri_ca_normalize"
input_spec = CANormalizeInputSpec
output_spec = CANormalizeOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
outputs['control_points'] = os.path.abspath(self.inputs.control_points)
return outputs
class CARegisterInputSpec(FSTraitedSpecOpenMP):
#required
in_file = File(argstr='%s', exists=True, mandatory=True,
position=-3, desc="The input volume for CARegister")
out_file = File(argstr='%s', position=-1,
genfile=True, desc="The output volume for CARegister")
template = File(argstr='%s', exists=True,
position=-2, desc="The template file in gca format")
# optional
mask = File(argstr='-mask %s', exists=True,
desc="Specifies volume to use as mask")
invert_and_save = traits.Bool(argstr='-invert-and-save', position=-4,
desc="Invert and save the .m3z multi-dimensional talaraich transform to x, y, and z .mgz files")
no_big_ventricles = traits.Bool(
argstr='-nobigventricles', desc="No big ventricles")
transform = File(argstr='-T %s', exists=True,
desc="Specifies transform in lta format")
align = traits.String(argstr='-align-%s',
desc="Specifies when to perform alignment")
levels = traits.Int(
argstr='-levels %d',
desc="defines how many surrounding voxels will be used in interpolations, default is 6")
A = traits.Int(
argstr='-A %d', desc='undocumented flag used in longitudinal processing')
l_files = InputMultiPath(
File(exists=False), argstr='-l %s',
desc='undocumented flag used in longitudinal processing')
class CARegisterOutputSpec(TraitedSpec):
out_file = traits.File(exists=False, desc="The output file for CARegister")
class CARegister(FSCommandOpenMP):
"""Generates a multi-dimensional talairach transform from a gca file and talairach.lta file
For complete details, see the `FS Documentation <http://surfer.nmr.mgh.harvard.edu/fswiki/mri_ca_register>`_
Examples
========
>>> from nipype.interfaces import freesurfer
>>> ca_register = freesurfer.CARegister()
>>> ca_register.inputs.in_file = "norm.mgz"
>>> ca_register.inputs.out_file = "talairach.m3z"
>>> ca_register.cmdline # doctest: +ALLOW_UNICODE
'mri_ca_register norm.mgz talairach.m3z'
"""
_cmd = "mri_ca_register"
input_spec = CARegisterInputSpec
output_spec = CARegisterOutputSpec
def _format_arg(self, name, spec, value):
if name == "l_files" and len(value) == 1:
value.append('identity.nofile')
return super(CARegister, self)._format_arg(name, spec, value)
def _gen_fname(self, name):
if name == 'out_file':
return os.path.abspath('talairach.m3z')
return None
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class CALabelInputSpec(FSTraitedSpecOpenMP):
#required
in_file = File(argstr="%s", position=-4, mandatory=True,
exists=True, desc="Input volume for CALabel")
out_file = File(argstr="%s", position=-1, mandatory=True, exists=False,
desc="Output file for CALabel")
transform = File(argstr="%s", position=-3, mandatory=True,
exists=True, desc="Input transform for CALabel")
template = File(argstr="%s", position=-2, mandatory=True,
exists=True, desc="Input template for CALabel")
# optional
in_vol = File(argstr="-r %s", exists=True,
desc="set input volume")
intensities = File(argstr="-r %s", exists=True,
desc="input label intensities file(used in longitudinal processing)")
no_big_ventricles = traits.Bool(
argstr="-nobigventricles", desc="No big ventricles")
align = traits.Bool(argstr="-align", desc="Align CALabel")
prior = traits.Float(argstr="-prior %.1f",
desc="Prior for CALabel")
relabel_unlikely = traits.Tuple(traits.Int, traits.Float,
argstr="-relabel_unlikely %d %.1f",
desc=("Reclassify voxels at least some std"
" devs from the mean using some size"
" Gaussian window"))
label = traits.File(argstr="-l %s", exists=True,
desc="Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file")
aseg = traits.File(argstr="-aseg %s", exists=True,
desc="Undocumented flag. Autorecon3 uses ../mri/aseg.presurf.mgz as input file")
class CALabelOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output volume from CALabel")
class CALabel(FSCommandOpenMP):
"""
For complete details, see the `FS Documentation <http://surfer.nmr.mgh.harvard.edu/fswiki/mri_ca_register>`_
Examples
========
>>> from nipype.interfaces import freesurfer
>>> ca_label = freesurfer.CALabel()
>>> ca_label.inputs.in_file = "norm.mgz"
>>> ca_label.inputs.out_file = "out.mgz"
>>> ca_label.inputs.transform = "trans.mat"
>>> ca_label.inputs.template = "Template_6.nii" # in practice use .gcs extension
>>> ca_label.cmdline # doctest: +ALLOW_UNICODE
'mri_ca_label norm.mgz trans.mat Template_6.nii out.mgz'
"""
_cmd = "mri_ca_label"
input_spec = CALabelInputSpec
output_spec = CALabelOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class MRIsCALabelInputSpec(FSTraitedSpecOpenMP):
# required
subject_id = traits.String('subject_id', argstr="%s", position=-5,
usedefault=True, mandatory=True,
desc="Subject name or ID")
hemisphere = traits.Enum('lh', 'rh',
argstr="%s", position=-4, mandatory=True,
desc="Hemisphere ('lh' or 'rh')")
canonsurf = File(argstr="%s", position=-3, mandatory=True, exists=True,
desc="Input canonical surface file")
classifier = File(argstr="%s", position=-2, mandatory=True, exists=True,
desc="Classifier array input file")
smoothwm = File(mandatory=True, exists=True,
desc="implicit input {hemisphere}.smoothwm")
curv = File(mandatory=True, exists=True,
desc="implicit input {hemisphere}.curv")
sulc = File(mandatory=True, exists=True,
desc="implicit input {hemisphere}.sulc")
out_file = File(argstr="%s", position=-1, exists=False,
name_source=['hemisphere'], keep_extension=True,
hash_files=False, name_template="%s.aparc.annot",
desc="Annotated surface output file")
# optional
label = traits.File(argstr="-l %s", exists=True,
desc="Undocumented flag. Autorecon3 uses ../label/{hemisphere}.cortex.label as input file")
aseg = traits.File(argstr="-aseg %s", exists=True,
desc="Undocumented flag. Autorecon3 uses ../mri/aseg.presurf.mgz as input file")
seed = traits.Int(argstr="-seed %d",
desc="")
copy_inputs = traits.Bool(desc="Copies implicit inputs to node directory " +
"and creates a temp subjects_directory. " +
"Use this when running as a node")
class MRIsCALabelOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output volume from MRIsCALabel")
class MRIsCALabel(FSCommandOpenMP):
"""
For a single subject, produces an annotation file, in which each
cortical surface vertex is assigned a neuroanatomical label.This
automatic procedure employs data from a previously-prepared atlas
file. An atlas file is created from a training set, capturing region
data manually drawn by neuroanatomists combined with statistics on
variability correlated to geometric information derived from the
cortical model (sulcus and curvature). Besides the atlases provided
with FreeSurfer, new ones can be prepared using mris_ca_train).
Examples
========
>>> from nipype.interfaces import freesurfer
>>> ca_label = freesurfer.MRIsCALabel()
>>> ca_label.inputs.subject_id = "test"
>>> ca_label.inputs.hemisphere = "lh"
>>> ca_label.inputs.canonsurf = "lh.pial"
>>> ca_label.inputs.curv = "lh.pial"
>>> ca_label.inputs.sulc = "lh.pial"
>>> ca_label.inputs.classifier = "im1.nii" # in pracice, use .gcs extension
>>> ca_label.inputs.smoothwm = "lh.pial"
>>> ca_label.cmdline # doctest: +ALLOW_UNICODE
'mris_ca_label test lh lh.pial im1.nii lh.aparc.annot'
"""
_cmd = "mris_ca_label"
input_spec = MRIsCALabelInputSpec
output_spec = MRIsCALabelOutputSpec
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
copy2subjdir(self, self.inputs.canonsurf, folder='surf')
copy2subjdir(self, self.inputs.smoothwm,
folder='surf',
basename='{0}.smoothwm'.format(self.inputs.hemisphere))
copy2subjdir(self, self.inputs.curv,
folder='surf',
basename='{0}.curv'.format(self.inputs.hemisphere))
copy2subjdir(self, self.inputs.sulc,
folder='surf',
basename='{0}.sulc'.format(self.inputs.hemisphere))
# The label directory must exist in order for an output to be written
label_dir = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id,
'label')
if not os.path.isdir(label_dir):
os.makedirs(label_dir)
return super(MRIsCALabel, self).run(**inputs)
def _list_outputs(self):
outputs = self.output_spec().get()
out_basename = os.path.basename(self.inputs.out_file)
outputs['out_file'] = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id,
'label', out_basename)
return outputs
class SegmentCCInputSpec(FSTraitedSpec):
in_file = File(argstr="-aseg %s", mandatory=True, exists=True,
desc="Input aseg file to read from subjects directory")
in_norm = File(mandatory=True, exists=True,
desc="Required undocumented input {subject}/mri/norm.mgz")
out_file = File(argstr="-o %s", exists=False,
name_source=['in_file'], name_template='%s.auto.mgz',
hash_files=False, keep_extension=False,
desc="Filename to write aseg including CC")
out_rotation = File(argstr="-lta %s", mandatory=True, exists=False,
desc="Global filepath for writing rotation lta")
subject_id = traits.String('subject_id', argstr="%s", mandatory=True,
position=-1, usedefault=True,
desc="Subject name")
copy_inputs = traits.Bool(desc="If running as a node, set this to True." +
"This will copy the input files to the node " +
"directory.")
class SegmentCCOutputSpec(TraitedSpec):
out_file = File(exists=False,
desc="Output segmentation uncluding corpus collosum")
out_rotation = File(exists=False,
desc="Output lta rotation file")
class SegmentCC(FSCommand):
"""
This program segments the corpus callosum into five separate labels in
the subcortical segmentation volume 'aseg.mgz'. The divisions of the
cc are equally spaced in terms of distance along the primary
eigendirection (pretty much the long axis) of the cc. The lateral
extent can be changed with the -T <thickness> parameter, where
<thickness> is the distance off the midline (so -T 1 would result in
the who CC being 3mm thick). The default is 2 so it's 5mm thick. The
aseg.stats values should be volume.
Examples
========
>>> from nipype.interfaces import freesurfer
>>> SegmentCC_node = freesurfer.SegmentCC()
>>> SegmentCC_node.inputs.in_file = "aseg.mgz"
>>> SegmentCC_node.inputs.in_norm = "norm.mgz"
>>> SegmentCC_node.inputs.out_rotation = "cc.lta"
>>> SegmentCC_node.inputs.subject_id = "test"
>>> SegmentCC_node.cmdline # doctest: +ALLOW_UNICODE
'mri_cc -aseg aseg.mgz -o aseg.auto.mgz -lta cc.lta test'
"""
_cmd = "mri_cc"
input_spec = SegmentCCInputSpec
output_spec = SegmentCCOutputSpec
# mri_cc does not take absolute paths and will look for the
# input files in <SUBJECTS_DIR>/<subject_id>/mri/<basename>
# So, if the files are not there, they will be copied to that
# location
def _format_arg(self, name, spec, value):
if name in ["in_file", "in_norm", "out_file"]:
# mri_cc can't use abspaths just the basename
basename = os.path.basename(value)
return spec.argstr % basename
return super(SegmentCC, self)._format_arg(name, spec, value)
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
outputs['out_rotation'] = os.path.abspath(self.inputs.out_rotation)
return outputs
def run(self, **inputs):
if self.inputs.copy_inputs:
self.inputs.subjects_dir = os.getcwd()
if 'subjects_dir' in inputs:
inputs['subjects_dir'] = self.inputs.subjects_dir
for originalfile in [self.inputs.in_file, self.inputs.in_norm]:
copy2subjdir(self, originalfile, folder='mri')
return super(SegmentCC, self).run(**inputs)
def aggregate_outputs(self, runtime=None, needed_outputs=None):
# it is necessary to find the output files and move
# them to the correct loacation
predicted_outputs = self._list_outputs()
for name in ['out_file', 'out_rotation']:
out_file = predicted_outputs[name]
if not os.path.isfile(out_file):
out_base = os.path.basename(out_file)
if isdefined(self.inputs.subjects_dir):
subj_dir = os.path.join(self.inputs.subjects_dir,
self.inputs.subject_id)
else:
subj_dir = os.path.join(os.getcwd(),
self.inputs.subject_id)
if name == 'out_file':
out_tmp = os.path.join(subj_dir,
'mri',
out_base)
elif name == 'out_rotation':
out_tmp = os.path.join(subj_dir,
'mri',
'transforms',
out_base)
else:
out_tmp = None
# move the file to correct location
if out_tmp and os.path.isfile(out_tmp):
if not os.path.isdir(os.path.dirname(out_tmp)):
os.makedirs(os.path.dirname(out_tmp))
shutil.move(out_tmp, out_file)
return super(SegmentCC, self).aggregate_outputs(runtime, needed_outputs)
class SegmentWMInputSpec(FSTraitedSpec):
in_file = File(argstr="%s", exists=True, mandatory=True,
position=-2, desc="Input file for SegmentWM")
out_file = File(argstr="%s", exists=False, mandatory=True,
position=-1, desc="File to be written as output for SegmentWM")
class SegmentWMOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output white matter segmentation")
class SegmentWM(FSCommand):
"""
This program segments white matter from the input volume. The input
volume should be normalized such that white matter voxels are
~110-valued, and the volume is conformed to 256^3.
Examples
========
>>> from nipype.interfaces import freesurfer
>>> SegmentWM_node = freesurfer.SegmentWM()
>>> SegmentWM_node.inputs.in_file = "norm.mgz"
>>> SegmentWM_node.inputs.out_file = "wm.seg.mgz"
>>> SegmentWM_node.cmdline # doctest: +ALLOW_UNICODE
'mri_segment norm.mgz wm.seg.mgz'
"""
_cmd = "mri_segment"
input_spec = SegmentWMInputSpec
output_spec = SegmentWMOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class EditWMwithAsegInputSpec(FSTraitedSpec):
in_file = File(argstr="%s", position=-4, mandatory=True, exists=True,
desc="Input white matter segmentation file")
brain_file = File(argstr="%s", position=-3, mandatory=True, exists=True,
desc="Input brain/T1 file")
seg_file = File(argstr="%s", position=-2, mandatory=True, exists=True,
desc="Input presurf segmentation file")
out_file = File(argstr="%s", position=-1, mandatory=True, exists=False,
desc="File to be written as output")
# optional
keep_in = traits.Bool(argstr="-keep-in",
desc="Keep edits as found in input volume")
class EditWMwithAsegOutputSpec(TraitedSpec):
out_file = File(exists=False, desc="Output edited WM file")
class EditWMwithAseg(FSCommand):
"""
Edits a wm file using a segmentation
Examples
========
>>> from nipype.interfaces.freesurfer import EditWMwithAseg
>>> editwm = EditWMwithAseg()
>>> editwm.inputs.in_file = "T1.mgz"
>>> editwm.inputs.brain_file = "norm.mgz"
>>> editwm.inputs.seg_file = "aseg.mgz"
>>> editwm.inputs.out_file = "wm.asegedit.mgz"
>>> editwm.inputs.keep_in = True
>>> editwm.cmdline # doctest: +ALLOW_UNICODE
'mri_edit_wm_with_aseg -keep-in T1.mgz norm.mgz aseg.mgz wm.asegedit.mgz'
"""
_cmd = 'mri_edit_wm_with_aseg'
input_spec = EditWMwithAsegInputSpec
output_spec = EditWMwithAsegOutputSpec
def _list_outputs(self):
outputs = self.output_spec().get()
outputs['out_file'] = os.path.abspath(self.inputs.out_file)
return outputs
class ConcatenateLTAInputSpec(FSTraitedSpec):
# required
in_lta1 = File(exists=True, mandatory=True, argstr='%s', position=-3,
desc='maps some src1 to dst1')
in_lta2 = traits.Either(
File(exists=True), 'identity.nofile', argstr='%s', position=-2,
mandatory=True, desc='maps dst1(src2) to dst2')
out_file = File(
position=-1, argstr='%s', hash_files=False, name_source=['in_lta1'],
name_template='%s_concat', keep_extension=True,
desc='the combined LTA maps: src1 to dst2 = LTA2*LTA1')
# Inversion and transform type
invert_1 = traits.Bool(argstr='-invert1',
desc='invert in_lta1 before applying it')
invert_2 = traits.Bool(argstr='-invert2',
desc='invert in_lta2 before applying it')
invert_out = traits.Bool(argstr='-invertout',
desc='invert output LTA')
out_type = traits.Enum('VOX2VOX', 'RAS2RAS', argstr='-out_type %d',
desc='set final LTA type')
# Talairach options
tal_source_file = traits.File(
exists=True, argstr='-tal %s', position=-5,
requires=['tal_template_file'],
desc='if in_lta2 is talairach.xfm, specify source for talairach')
tal_template_file = traits.File(
exists=True, argstr='%s', position=-4, requires=['tal_source_file'],
desc='if in_lta2 is talairach.xfm, specify template for talairach')
subject = traits.Str(argstr='-subject %s',
desc='set subject in output LTA')
# Note rmsdiff would be xor out_file, and would be most easily dealt with
# in a new interface. -CJM 2017.10.05
class ConcatenateLTAOutputSpec(TraitedSpec):
out_file = File(
exists=False, desc='the combined LTA maps: src1 to dst2 = LTA2*LTA1')
class ConcatenateLTA(FSCommand):
""" Concatenates two consecutive LTA transformations into one overall
transformation
Out = LTA2*LTA1
Examples
--------
>>> from nipype.interfaces.freesurfer import ConcatenateLTA
>>> conc_lta = ConcatenateLTA()
>>> conc_lta.inputs.in_lta1 = 'lta1.lta'
>>> conc_lta.inputs.in_lta2 = 'lta2.lta'
>>> conc_lta.cmdline # doctest: +ALLOW_UNICODE
'mri_concatenate_lta lta1.lta lta2.lta lta1_concat.lta'
You can use 'identity.nofile' as the filename for in_lta2, e.g.:
>>> conc_lta.inputs.in_lta2 = 'identity.nofile'
>>> conc_lta.inputs.invert_1 = True
>>> conc_lta.inputs.out_file = 'inv1.lta'
>>> conc_lta.cmdline # doctest: +ALLOW_UNICODE
'mri_concatenate_lta -invert1 lta1.lta identity.nofile inv1.lta'
To create a RAS2RAS transform:
>>> conc_lta.inputs.out_type = 'RAS2RAS'
>>> conc_lta.cmdline # doctest: +ALLOW_UNICODE
'mri_concatenate_lta -invert1 -out_type 1 lta1.lta identity.nofile inv1.lta'
"""
_cmd = 'mri_concatenate_lta'
input_spec = ConcatenateLTAInputSpec
output_spec = ConcatenateLTAOutputSpec
def _format_arg(self, name, spec, value):
if name == 'out_type':
value = {'VOX2VOX': 0, 'RAS2RAS': 1}[value]
return super(ConcatenateLTA, self)._format_arg(name, spec, value)
|
mick-d/nipype
|
nipype/interfaces/freesurfer/preprocess.py
|
Python
|
bsd-3-clause
| 113,186
|
[
"Gaussian"
] |
2e61d05e532e6b777125d267c1b1678871ac6ae733db766a8508e26dc35bf592
|
from ase import Atoms
from ase.optimize import QuasiNewton
from ase.neb import NEB
from ase.optimize.mdmin import MDMin
try:
from asap3 import EMT
except ImportError:
pass
else:
a = 3.6
b = a / 2
initial = Atoms('Cu4',
positions=[(0, 0, 0),
(0, b, b),
(b, 0, b),
(b, b, 0)],
cell=(a, a, a),
pbc=True)
initial *= (4, 4, 4)
del initial[0]
images = [initial] + [initial.copy() for i in range(6)]
images[-1].positions[0] = (0, 0, 0)
for image in images:
image.set_calculator(EMT())
#image.set_calculator(ASAP())
for image in [images[0], images[-1]]:
QuasiNewton(image).run(fmax=0.01)
neb = NEB(images)
neb.interpolate()
for a in images:
print a.positions[0], a.get_potential_energy()
dyn = MDMin(neb, dt=0.1, trajectory='mep1.traj')
#dyn = QuasiNewton(neb)
print dyn.run(fmax=0.01, steps=25)
for a in images:
print a.positions[0], a.get_potential_energy()
|
grhawk/ASE
|
tools/ase/test/vacancy.py
|
Python
|
gpl-2.0
| 1,121
|
[
"ASE"
] |
3d69e3a5c8d7b967e31ddc3ecc693fdcbc746382b24c0196554105be7e08562b
|
# Generated by Django 2.1.4 on 2019-01-31 06:26
from django.conf import settings
import django.contrib.postgres.fields
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import herders.models
import timezone_field.fields
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('bestiary', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BuildingInstance',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('level', models.IntegerField()),
('building', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bestiary.Building')),
],
options={
'ordering': ['building'],
},
),
migrations.CreateModel(
name='MonsterInstance',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('com2us_id', models.BigIntegerField(blank=True, null=True)),
('created', models.DateTimeField(blank=True, null=True)),
('stars', models.IntegerField()),
('level', models.IntegerField()),
('skill_1_level', models.IntegerField(blank=True, default=1)),
('skill_2_level', models.IntegerField(blank=True, default=1)),
('skill_3_level', models.IntegerField(blank=True, default=1)),
('skill_4_level', models.IntegerField(blank=True, default=1)),
('fodder', models.BooleanField(default=False)),
('in_storage', models.BooleanField(default=False)),
('ignore_for_fusion', models.BooleanField(default=False)),
('priority', models.IntegerField(blank=True, choices=[(1, 'Low'), (2, 'Medium'), (3, 'High')], null=True)),
('notes', models.TextField(blank=True, help_text='<a href="https://daringfireball.net/projects/markdown/syntax" target="_blank">Markdown syntax</a> enabled', null=True)),
('custom_name', models.CharField(blank=True, default='', max_length=20)),
('base_hp', models.IntegerField(blank=True, default=0)),
('rune_hp', models.IntegerField(blank=True, default=0)),
('base_attack', models.IntegerField(blank=True, default=0)),
('rune_attack', models.IntegerField(blank=True, default=0)),
('base_defense', models.IntegerField(blank=True, default=0)),
('rune_defense', models.IntegerField(blank=True, default=0)),
('base_speed', models.IntegerField(blank=True, default=0)),
('rune_speed', models.IntegerField(blank=True, default=0)),
('base_crit_rate', models.IntegerField(blank=True, default=0)),
('rune_crit_rate', models.IntegerField(blank=True, default=0)),
('base_crit_damage', models.IntegerField(blank=True, default=0)),
('rune_crit_damage', models.IntegerField(blank=True, default=0)),
('base_resistance', models.IntegerField(blank=True, default=0)),
('rune_resistance', models.IntegerField(blank=True, default=0)),
('base_accuracy', models.IntegerField(blank=True, default=0)),
('rune_accuracy', models.IntegerField(blank=True, default=0)),
('avg_rune_efficiency', models.FloatField(blank=True, null=True)),
('monster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bestiary.Monster')),
],
options={
'ordering': ['-stars', '-level', 'monster__name'],
},
),
migrations.CreateModel(
name='MonsterPiece',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('pieces', models.IntegerField(default=0)),
('monster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='bestiary.Monster')),
],
options={
'ordering': ['monster__name'],
},
),
migrations.CreateModel(
name='MonsterTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='RuneCraftInstance',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('com2us_id', models.BigIntegerField(blank=True, null=True)),
('type', models.IntegerField(choices=[(0, 'Grindstone'), (1, 'Enchant Gem'), (2, 'Immemorial Grindstone'), (3, 'Immemorial Gem')])),
('rune', models.IntegerField(blank=True, choices=[(1, 'Energy'), (2, 'Fatal'), (3, 'Blade'), (4, 'Rage'), (5, 'Swift'), (6, 'Focus'), (7, 'Guard'), (8, 'Endure'), (9, 'Violent'), (10, 'Will'), (11, 'Nemesis'), (12, 'Shield'), (13, 'Revenge'), (14, 'Despair'), (15, 'Vampire'), (16, 'Destroy'), (17, 'Fight'), (18, 'Determination'), (19, 'Enhance'), (20, 'Accuracy'), (21, 'Tolerance')], null=True)),
('stat', models.IntegerField(choices=[(1, 'HP'), (2, 'HP %'), (3, 'ATK'), (4, 'ATK %'), (5, 'DEF'), (6, 'DEF %'), (7, 'SPD'), (8, 'CRI Rate %'), (9, 'CRI Dmg %'), (10, 'Resistance %'), (11, 'Accuracy %')])),
('quality', models.IntegerField(choices=[(0, 'Normal'), (1, 'Magic'), (2, 'Rare'), (3, 'Hero'), (4, 'Legend')])),
('value', models.IntegerField(blank=True, null=True)),
],
options={
'ordering': ['type', 'rune'],
},
),
migrations.CreateModel(
name='RuneInstance',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('type', models.IntegerField(choices=[(1, 'Energy'), (2, 'Fatal'), (3, 'Blade'), (4, 'Rage'), (5, 'Swift'), (6, 'Focus'), (7, 'Guard'), (8, 'Endure'), (9, 'Violent'), (10, 'Will'), (11, 'Nemesis'), (12, 'Shield'), (13, 'Revenge'), (14, 'Despair'), (15, 'Vampire'), (16, 'Destroy'), (17, 'Fight'), (18, 'Determination'), (19, 'Enhance'), (20, 'Accuracy'), (21, 'Tolerance')])),
('com2us_id', models.BigIntegerField(blank=True, null=True)),
('marked_for_sale', models.BooleanField(default=False)),
('notes', models.TextField(blank=True, null=True)),
('stars', models.IntegerField()),
('level', models.IntegerField()),
('slot', models.IntegerField()),
('original_quality', models.IntegerField(blank=True, choices=[(0, 'Normal'), (1, 'Magic'), (2, 'Rare'), (3, 'Hero'), (4, 'Legend')], null=True)),
('value', models.IntegerField(blank=True, null=True)),
('main_stat', models.IntegerField(choices=[(1, 'HP'), (2, 'HP %'), (3, 'ATK'), (4, 'ATK %'), (5, 'DEF'), (6, 'DEF %'), (7, 'SPD'), (8, 'CRI Rate %'), (9, 'CRI Dmg %'), (10, 'Resistance %'), (11, 'Accuracy %')])),
('main_stat_value', models.IntegerField()),
('innate_stat', models.IntegerField(blank=True, choices=[(1, 'HP'), (2, 'HP %'), (3, 'ATK'), (4, 'ATK %'), (5, 'DEF'), (6, 'DEF %'), (7, 'SPD'), (8, 'CRI Rate %'), (9, 'CRI Dmg %'), (10, 'Resistance %'), (11, 'Accuracy %')], null=True)),
('innate_stat_value', models.IntegerField(blank=True, null=True)),
('substats', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(blank=True, choices=[(1, 'HP'), (2, 'HP %'), (3, 'ATK'), (4, 'ATK %'), (5, 'DEF'), (6, 'DEF %'), (7, 'SPD'), (8, 'CRI Rate %'), (9, 'CRI Dmg %'), (10, 'Resistance %'), (11, 'Accuracy %')], null=True), blank=True, null=True, size=4)),
('substat_values', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(blank=True, null=True), blank=True, null=True, size=4)),
('substat_crafts', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(blank=True, choices=[(0, 'Grindstone'), (1, 'Enchant Gem'), (2, 'Immemorial Grindstone'), (3, 'Immemorial Gem')], null=True), blank=True, null=True, size=4)),
('substat_1', models.IntegerField(blank=True, choices=[(1, 'HP'), (2, 'HP %'), (3, 'ATK'), (4, 'ATK %'), (5, 'DEF'), (6, 'DEF %'), (7, 'SPD'), (8, 'CRI Rate %'), (9, 'CRI Dmg %'), (10, 'Resistance %'), (11, 'Accuracy %')], null=True)),
('substat_1_value', models.IntegerField(blank=True, null=True)),
('substat_1_craft', models.IntegerField(blank=True, choices=[(0, 'Grindstone'), (1, 'Enchant Gem'), (2, 'Immemorial Grindstone'), (3, 'Immemorial Gem')], null=True)),
('substat_2', models.IntegerField(blank=True, choices=[(1, 'HP'), (2, 'HP %'), (3, 'ATK'), (4, 'ATK %'), (5, 'DEF'), (6, 'DEF %'), (7, 'SPD'), (8, 'CRI Rate %'), (9, 'CRI Dmg %'), (10, 'Resistance %'), (11, 'Accuracy %')], null=True)),
('substat_2_value', models.IntegerField(blank=True, null=True)),
('substat_2_craft', models.IntegerField(blank=True, choices=[(0, 'Grindstone'), (1, 'Enchant Gem'), (2, 'Immemorial Grindstone'), (3, 'Immemorial Gem')], null=True)),
('substat_3', models.IntegerField(blank=True, choices=[(1, 'HP'), (2, 'HP %'), (3, 'ATK'), (4, 'ATK %'), (5, 'DEF'), (6, 'DEF %'), (7, 'SPD'), (8, 'CRI Rate %'), (9, 'CRI Dmg %'), (10, 'Resistance %'), (11, 'Accuracy %')], null=True)),
('substat_3_value', models.IntegerField(blank=True, null=True)),
('substat_3_craft', models.IntegerField(blank=True, choices=[(0, 'Grindstone'), (1, 'Enchant Gem'), (2, 'Immemorial Grindstone'), (3, 'Immemorial Gem')], null=True)),
('substat_4', models.IntegerField(blank=True, choices=[(1, 'HP'), (2, 'HP %'), (3, 'ATK'), (4, 'ATK %'), (5, 'DEF'), (6, 'DEF %'), (7, 'SPD'), (8, 'CRI Rate %'), (9, 'CRI Dmg %'), (10, 'Resistance %'), (11, 'Accuracy %')], null=True)),
('substat_4_value', models.IntegerField(blank=True, null=True)),
('substat_4_craft', models.IntegerField(blank=True, choices=[(0, 'Grindstone'), (1, 'Enchant Gem'), (2, 'Immemorial Grindstone'), (3, 'Immemorial Gem')], null=True)),
('quality', models.IntegerField(choices=[(0, 'Normal'), (1, 'Magic'), (2, 'Rare'), (3, 'Hero'), (4, 'Legend')], default=0)),
('has_hp', models.BooleanField(default=False)),
('has_atk', models.BooleanField(default=False)),
('has_def', models.BooleanField(default=False)),
('has_crit_rate', models.BooleanField(default=False)),
('has_crit_dmg', models.BooleanField(default=False)),
('has_speed', models.BooleanField(default=False)),
('has_resist', models.BooleanField(default=False)),
('has_accuracy', models.BooleanField(default=False)),
('substat_upgrades_remaining', models.IntegerField(blank=True, null=True)),
('efficiency', models.FloatField(blank=True, null=True)),
('max_efficiency', models.FloatField(blank=True, null=True)),
('assigned_to', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='herders.MonsterInstance')),
],
options={
'ordering': ['slot', 'type', 'level'],
},
),
migrations.CreateModel(
name='Storage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('magic_essence', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(default=0), default=[0, 0, 0], help_text='Magic Essence', size=3)),
('fire_essence', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(default=0), default=[0, 0, 0], help_text='Fire Essence', size=3)),
('water_essence', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(default=0), default=[0, 0, 0], help_text='Water Essence', size=3)),
('wind_essence', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(default=0), default=[0, 0, 0], help_text='Wind Essence', size=3)),
('light_essence', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(default=0), default=[0, 0, 0], help_text='Light Essence', size=3)),
('dark_essence', django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(default=0), default=[0, 0, 0], help_text='Dark Essence', size=3)),
('wood', models.IntegerField(default=0, help_text='Hard Wood')),
('leather', models.IntegerField(default=0, help_text='Tough Leather')),
('rock', models.IntegerField(default=0, help_text='Solid Rock')),
('ore', models.IntegerField(default=0, help_text='Solid Iron Ore')),
('mithril', models.IntegerField(default=0, help_text='Shining Mythril')),
('cloth', models.IntegerField(default=0, help_text='Thick Cloth')),
('rune_piece', models.IntegerField(default=0, help_text='Rune Piece')),
('dust', models.IntegerField(default=0, help_text='Magic Dust')),
('symbol_harmony', models.IntegerField(default=0, help_text='Symbol of Harmony')),
('symbol_transcendance', models.IntegerField(default=0, help_text='Symbol of Transcendance')),
('symbol_chaos', models.IntegerField(default=0, help_text='Symbol of Chaos')),
('crystal_water', models.IntegerField(default=0, help_text='Frozen Water Crystal')),
('crystal_fire', models.IntegerField(default=0, help_text='Flaming Fire Crystal')),
('crystal_wind', models.IntegerField(default=0, help_text='Whirling Wind Crystal')),
('crystal_light', models.IntegerField(default=0, help_text='Shiny Light Crystal')),
('crystal_dark', models.IntegerField(default=0, help_text='Pitch-black Dark Crystal')),
('crystal_magic', models.IntegerField(default=0, help_text='Condensed Magic Crystal')),
('crystal_pure', models.IntegerField(default=0, help_text='Pure Magic Crystal')),
],
),
migrations.CreateModel(
name='Summoner',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('summoner_name', models.CharField(blank=True, max_length=256, null=True)),
('com2us_id', models.BigIntegerField(blank=True, default=None, null=True)),
('server', models.IntegerField(blank=True, choices=[(0, 'Global'), (1, 'Europe'), (2, 'Asia'), (3, 'Korea'), (4, 'Japan'), (5, 'China')], default=0, null=True)),
('public', models.BooleanField(blank=True, default=False)),
('timezone', timezone_field.fields.TimeZoneField(default='America/Los_Angeles')),
('notes', models.TextField(blank=True, null=True)),
('preferences', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
('last_update', models.DateTimeField(auto_now=True)),
('following', models.ManyToManyField(related_name='followed_by', to='herders.Summoner')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=30)),
('favorite', models.BooleanField(blank=True, default=False)),
('description', models.TextField(blank=True, help_text='<a href="https://daringfireball.net/projects/markdown/syntax" target="_blank">Markdown syntax</a> enabled', null=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='TeamGroup',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=30)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='herders.Summoner')),
],
options={
'ordering': ['name'],
},
),
migrations.AddField(
model_name='team',
name='group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='herders.TeamGroup'),
),
migrations.AddField(
model_name='team',
name='leader',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='team_leader', to='herders.MonsterInstance'),
),
migrations.AddField(
model_name='team',
name='level',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='bestiary.Level'),
),
migrations.AddField(
model_name='team',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='herders.Summoner'),
),
migrations.AddField(
model_name='team',
name='roster',
field=models.ManyToManyField(blank=True, to='herders.MonsterInstance'),
),
migrations.AddField(
model_name='storage',
name='owner',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='herders.Summoner'),
),
migrations.AddField(
model_name='runeinstance',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='herders.Summoner'),
),
migrations.AddField(
model_name='runecraftinstance',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='herders.Summoner'),
),
migrations.AddField(
model_name='monsterpiece',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='herders.Summoner'),
),
migrations.AddField(
model_name='monsterinstance',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='herders.Summoner'),
),
migrations.AddField(
model_name='monsterinstance',
name='tags',
field=models.ManyToManyField(blank=True, to='herders.MonsterTag'),
),
migrations.AddField(
model_name='buildinginstance',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='herders.Summoner'),
),
]
|
PeteAndersen/swarfarm
|
herders/migrations/0001_initial.py
|
Python
|
apache-2.0
| 19,862
|
[
"CRYSTAL"
] |
f3052dc1f476566e2ed79491c2a11248f70f99121f67e850818d4ffbcb08fb9b
|
"""
A set of objects representing each file extension recognized by ImageIO. If an
extension is not listed here it is still supported, as long as there exists a
supporting backend.
"""
from typing import List, Dict
class FileExtension:
"""File Extension Metadata
This class holds information about a image file format associated with a
given extension. This information is used to track plugins that are known to
be able to handle a particular format. It also contains additional
information about a format, which is used when creating the supported format
docs.
Plugins known to be able to handle this format are ordered by a ``priority``
list. This list is used to determine the ideal plugin to use when choosing a
plugin based on file extension.
Parameters
----------
extension : str
The name of the extension including the initial dot, e.g. ".png".
priority : List
A list of plugin names (entries in config.known_plugins) that can handle
this format. The position of a plugin expresses a preference, e.g.
["plugin1", "plugin2"] indicates that, if available, plugin1 should be
preferred over plugin2 when handling a request related to this format.
name : str
The full name of the format.
description : str
A description of the format.
external_link : str
A link to further information about the format. Typically, the format's
specification.
Examples
--------
>>> FileExtension(
name="Bitmap",
extension=".bmp",
priority=["pillow", "BMP-PIL", "BMP-FI", "ITK"],
external_link="https://en.wikipedia.org/wiki/BMP_file_format",
)
"""
def __init__(
self,
*,
extension: str,
priority: List[str],
name: str = None,
description: str = None,
external_link: str = None
) -> None:
self.extension = extension
self.priority = priority
self.name = name
self.description = description
self.external_link = external_link
self.default_priority = priority.copy()
def reset(self) -> None:
self.priority = self.default_priority.copy()
extension_list = [
FileExtension(
name="Hasselblad raw",
extension=".3fr",
priority=["RAW-FI"],
),
FileExtension(
name="Sony alpha",
extension=".arw",
priority=["RAW-FI"],
),
FileExtension(
name="Animated Portable Network Graphics",
external_link="https://en.wikipedia.org/wiki/APNG",
extension=".apng",
priority=["pillow"],
),
FileExtension(
name="Audio Video Interleave",
extension=".avi",
priority=["FFMPEG"],
),
FileExtension(
name="Casio raw format",
extension=".bay",
priority=["RAW-FI"],
),
FileExtension(
name="Bitmap",
extension=".bmp",
priority=["pillow", "BMP-PIL", "BMP-FI", "ITK"],
external_link="https://en.wikipedia.org/wiki/BMP_file_format",
),
FileExtension(
name="Re-Volt mipmap",
extension=".bmq",
priority=["RAW-FI"],
),
FileExtension(
name="Binary Structured Data Format",
extension=".bsdf",
priority=["BSDF"],
external_link="http://bsdf.io/",
),
FileExtension(
name="Binary Universal Form for the Representation of meteorological data",
extension=".bufr",
priority=["pillow", "BUFR-PIL"],
),
FileExtension(
name="Silicon Graphics Image",
extension=".bw",
priority=["pillow", "SGI-PIL", "SGI-FI"],
),
FileExtension(
name="Scirra Construct",
extension=".cap",
priority=["RAW-FI"],
),
FileExtension(
name="AMETEK High Speed Camera Format",
extension=".cine",
priority=["RAW-FI"],
external_link="https://phantomhighspeed-knowledge.secure.force.com/servlet/fileField?id=0BE1N000000kD2i#:~:text=Cine%20is%20a%20video%20file,camera%20model%20and%20image%20resolution",
),
FileExtension(extension=".cr2", priority=["RAW-FI"]),
FileExtension(
extension=".crw",
priority=["RAW-FI"],
),
FileExtension(
extension=".cs1",
priority=["RAW-FI"],
),
FileExtension(
name="Computerized Tomography",
extension=".ct",
priority=["DICOM"],
),
FileExtension(
name="Windows Cursor Icons",
extension=".cur",
priority=["pillow", "CUR-PIL"],
),
FileExtension(
name="Dr. Halo",
extension=".cut",
priority=["CUT-FI"],
),
FileExtension(
extension=".dc2",
priority=["RAW-FI"],
),
FileExtension(
name="DICOM file format",
extension=".dcm",
priority=["DICOM", "ITK"],
),
FileExtension(
extension=".dcr",
priority=["RAW-FI"],
),
FileExtension(
name="Intel DCX",
extension=".dcx",
priority=["pillow", "DCX-PIL"],
),
FileExtension(
name="DirectX Texture Container",
extension=".dds",
priority=["pillow", "DDS-FI", "DDS-PIL"],
),
FileExtension(
name="Windows Bitmap",
extension=".DIB",
priority=["pillow", "DIB-PIL"],
),
FileExtension(
name="DICOM file format",
extension=".dicom",
priority=["ITK"],
),
FileExtension(
extension=".dng",
priority=["RAW-FI"],
),
FileExtension(
extension=".drf",
priority=["RAW-FI"],
),
FileExtension(
extension=".dsc",
priority=["RAW-FI"],
),
FileExtension(
name="Enhanced Compression Wavelet",
extension=".ecw",
priority=["GDAL"],
),
FileExtension(
name="Windows Metafile",
extension=".emf",
priority=["pillow", "WMF-PIL"],
),
FileExtension(
name="Encapsulated Postscript",
extension=".eps",
priority=["pillow", "EPS-PIL"],
),
FileExtension(
extension=".erf",
priority=["RAW-FI"],
),
FileExtension(
name="ILM OpenEXR",
extension=".exr",
priority=["EXR-FI"],
),
FileExtension(
extension=".fff",
priority=["RAW-FI"],
),
FileExtension(
name="Flexible Image Transport System File",
extension=".fit",
priority=["pillow", "FITS-PIL", "FITS"],
),
FileExtension(
name="Flexible Image Transport System File",
extension=".fits",
priority=["pillow", "FITS-PIL", "FITS"],
),
FileExtension(
name="Autodesk FLC Animation",
extension=".flc",
priority=["pillow", "FLI-PIL"],
),
FileExtension(
name="Autodesk FLI Animation",
extension=".fli",
priority=["pillow", "FLI-PIL"],
),
FileExtension(
name="Kodak FlashPix",
extension=".fpx",
priority=["pillow", "FPX-PIL"],
),
FileExtension(
name="Independence War 2: Edge Of Chaos Texture Format",
extension=".ftc",
priority=["pillow", "FTEX-PIL"],
),
FileExtension(
name="Flexible Image Transport System File",
extension=".fts",
priority=["FITS"],
),
FileExtension(
name="Independence War 2: Edge Of Chaos Texture Format",
extension=".ftu",
priority=["pillow", "FTEX-PIL"],
),
FileExtension(
name="Flexible Image Transport System File",
extension=".fz",
priority=["FITS"],
),
FileExtension(
name="Raw fax format CCITT G.3",
extension=".g3",
priority=["G3-FI"],
),
FileExtension(
name="GIMP brush file",
extension=".gbr",
priority=["pillow", "GBR-PIL"],
),
FileExtension(
name="Grassroots DICOM",
extension=".gdcm",
priority=["ITK"],
),
FileExtension(
name="Graphics Interchange Format",
extension=".gif",
priority=["pillow", "GIF-PIL"],
),
FileExtension(
name="UMDS GIPL",
extension=".gipl",
priority=["ITK"],
),
FileExtension(
name="gridded meteorological data",
extension=".grib",
priority=["pillow", "GRIB-PIL"],
),
FileExtension(
name="Hierarchical Data Format 5",
extension=".h5",
priority=["pillow", "HDF5-PIL"],
),
FileExtension(
name="Hierarchical Data Format 5",
extension=".hdf",
priority=["pillow", "HDF5-PIL"],
),
FileExtension(
name="Hierarchical Data Format 5",
extension=".hdf5",
priority=["ITK"],
),
FileExtension(
name="JPEG Extended Range",
extension=".hdp",
priority=["JPEG-XR-FI"],
),
FileExtension(
name="High Dynamic Range Image",
extension=".hdr",
priority=["HDR-FI", "ITK"],
),
FileExtension(
extension=".ia",
priority=["RAW-FI"],
),
FileExtension(
name="Mac OS Icon File",
extension=".icns",
priority=["pillow", "ICNS-PIL"],
),
FileExtension(
name="Windows Icon File",
extension=".ico",
priority=["pillow", "ICO-FI", "ICO-PIL"],
),
FileExtension(
name="ILBM Interleaved Bitmap",
extension=".iff",
priority=["IFF-FI"],
),
FileExtension(
name="IPTC/NAA",
extension=".iim",
priority=["pillow", "IPTC-PIL"],
),
FileExtension(
extension=".iiq",
priority=["RAW-FI"],
),
FileExtension(
name="IFUNC Image Memory",
extension=".im",
priority=["pillow", "IM-PIL"],
),
FileExtension(
extension=".img",
priority=["ITK", "GDAL"],
),
FileExtension(
extension=".img.gz",
priority=["ITK"],
),
FileExtension(
name="IM Tools",
extension=".IMT",
priority=["pillow", "IMT-PIL"],
),
FileExtension(
name="Image Processing Lab",
extension=".ipl",
priority=["ITK"],
),
FileExtension(
name="JPEG 2000",
extension=".j2c",
priority=["pillow", "J2K-FI", "JPEG2000-PIL"],
),
FileExtension(
name="JPEG 2000",
extension=".j2k",
priority=["pillow", "J2K-FI", "JPEG2000-PIL"],
),
FileExtension(
name="JPEG",
extension=".jfif",
priority=["pillow", "JPEG-PIL"],
),
FileExtension(
name="JPEG",
extension=".jif",
priority=["JPEG-FI"],
),
FileExtension(
name="JPEG Network Graphics",
extension=".jng",
priority=["JNG-FI"],
),
FileExtension(
name="JPEG 2000",
extension=".jp2",
priority=["pillow", "JP2-FI", "JPEG2000-PIL"],
),
FileExtension(
name="JPEG 2000",
extension=".jpc",
priority=["pillow", "JPEG2000-PIL"],
),
FileExtension(
name="JPEG",
extension=".jpe",
priority=["pillow", "JPEG-FI", "JPEG-PIL"],
),
FileExtension(
name="Joint Photographic Experts Group",
extension=".jpeg",
priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL"],
),
FileExtension(
name="JPEG 2000",
extension=".jpf",
priority=["pillow", "JPEG2000-PIL"],
),
FileExtension(
name="Joint Photographic Experts Group",
extension=".jpg",
priority=["pillow", "JPEG-PIL", "JPEG-FI", "ITK", "GDAL"],
),
FileExtension(
name="JPEG 2000",
extension=".jpx",
priority=["pillow", "JPEG2000-PIL"],
),
FileExtension(
name="JPEG Extended Range",
extension=".jxr",
priority=["JPEG-XR-FI"],
),
FileExtension(
extension=".k25",
priority=["RAW-FI"],
),
FileExtension(
extension=".kc2",
priority=["RAW-FI"],
),
FileExtension(
extension=".kdc",
priority=["RAW-FI"],
),
FileExtension(
name="C64 Koala Graphics",
extension=".koa",
priority=["KOALA-FI"],
),
FileExtension(
name="ILBM Interleaved Bitmap",
extension=".lbm",
priority=["IFF-FI"],
),
FileExtension(
name="Lytro F01",
extension=".lfp",
priority=["LYTRO-LFP"],
),
FileExtension(
name="Lytro Illum",
extension=".lfr",
priority=["LYTRO-LFR"],
),
FileExtension(
name="ZEISS LSM",
extension=".lsm",
priority=["ITK", "TIFF"],
),
FileExtension(
name="McIdas area file",
extension=".MCIDAS",
priority=["pillow", "MCIDAS-PIL"],
external_link="https://www.ssec.wisc.edu/mcidas/doc/prog_man/2003print/progman2003-formats.html",
),
FileExtension(
extension=".mdc",
priority=["RAW-FI"],
),
FileExtension(
extension=".mef",
priority=["RAW-FI"],
),
FileExtension(
name="FreeSurfer File Format",
extension=".mgh",
priority=["ITK"],
),
FileExtension(
name="ITK MetaImage",
extension=".mha",
priority=["ITK"],
),
FileExtension(
name="ITK MetaImage Header",
extension=".mhd",
priority=["ITK"],
),
FileExtension(
name="Microsoft Image Composer",
extension=".mic",
priority=["pillow", "MIC-PIL"],
),
FileExtension(
name="Matroska Multimedia Container",
extension=".mkv",
priority=["FFMPEG"],
),
FileExtension(
name="Medical Imaging NetCDF",
extension=".mnc",
priority=["ITK"],
),
FileExtension(
name="Medical Imaging NetCDF 2",
extension=".mnc2",
priority=["ITK"],
),
FileExtension(
name="Leaf Raw Image Format",
extension=".mos",
priority=["RAW-FI"],
),
FileExtension(
name="QuickTime File Format",
extension=".mov",
priority=["FFMPEG"],
),
FileExtension(
name="MPEG-4 Part 14",
extension=".mp4",
priority=["FFMPEG"],
),
FileExtension(
name="Moving Picture Experts Group",
extension=".mpeg",
priority=["FFMPEG"],
),
FileExtension(
name="Moving Picture Experts Group",
extension=".mpg",
priority=["FFMPEG"],
),
FileExtension(
name="JPEG Multi-Picture Format",
extension=".mpo",
priority=["pillow", "MPO-PIL"],
),
FileExtension(
name="Magnetic resonance imaging",
extension=".mri",
priority=["DICOM"],
),
FileExtension(
extension=".mrw",
priority=["RAW-FI"],
),
FileExtension(
name="Windows Paint",
extension=".msp",
priority=["pillow", "MSP-PIL"],
),
FileExtension(
extension=".nef",
priority=["RAW-FI"],
),
FileExtension(
extension=".nhdr",
priority=["ITK"],
),
FileExtension(
extension=".nia",
priority=["ITK"],
),
FileExtension(
extension=".nii",
priority=["ITK"],
),
FileExtension(
name="nii.gz",
extension=".nii.gz",
priority=["ITK"],
),
FileExtension(
name="Numpy Array",
extension=".npz",
priority=["NPZ"],
),
FileExtension(
extension=".nrrd",
priority=["ITK"],
),
FileExtension(
extension=".nrw",
priority=["RAW-FI"],
),
FileExtension(
extension=".orf",
priority=["RAW-FI"],
),
FileExtension(
name="Portable Bitmap",
extension=".pbm",
priority=["PGM-FI", "PGMRAW-FI"],
),
FileExtension(
name="Kodak PhotoCD",
extension=".pcd",
priority=["pillow", "PCD-FI", "PCD-PIL"],
),
FileExtension(
name="Macintosh PICT",
extension=".pct",
priority=["PICT-FI"],
),
FileExtension(
name="Zsoft Paintbrush",
extension=".PCX",
priority=["pillow", "PCX-FI", "PCX-PIL"],
),
FileExtension(
extension=".pef",
priority=["RAW-FI"],
),
FileExtension(
extension=".pfm",
priority=["PFM-FI"],
),
FileExtension(
name="Portable Greymap",
extension=".pgm",
priority=["PGM-FI", "PGMRAW-FI"],
),
FileExtension(
name="Macintosh PICT",
extension=".pic",
priority=["PICT-FI", "ITK"],
),
FileExtension(
name="Macintosh PICT",
extension=".pict",
priority=["PICT-FI"],
),
FileExtension(
name="Portable Network Graphics",
extension=".png",
priority=["pillow", "PNG-PIL", "PNG-FI", "ITK"],
),
FileExtension(
name="Pbmplus image",
extension=".ppm",
priority=["pillow", "PPM-PIL"],
),
FileExtension(
name="Pbmplus image",
extension=".pbm",
priority=["pillow", "PPM-PIL"],
),
FileExtension(
name="Pbmplus image",
extension=".pbm",
priority=["pillow", "PPM-PIL", "PPM-FI"],
),
FileExtension(
name="Portable Pixelmap (ASCII)",
extension=".ppm",
priority=["PPM-FI"],
),
FileExtension(
name="Portable Pixelmap (Raw)",
extension=".ppm",
priority=["PPMRAW-FI"],
),
FileExtension(
name="Ghostscript",
extension=".ps",
priority=["pillow", "EPS-PIL"],
),
FileExtension(
name="Adope Photoshop 2.5 and 3.0",
extension=".psd",
priority=["pillow", "PSD-PIL", "PSD-FI"],
),
FileExtension(
extension=".ptx",
priority=["RAW-FI"],
),
FileExtension(
extension=".pxn",
priority=["RAW-FI"],
),
FileExtension(
name="PIXAR raster image",
extension=".pxr",
priority=["pillow", "PIXAR-PIL"],
),
FileExtension(
extension=".qtk",
priority=["RAW-FI"],
),
FileExtension(
extension=".raf",
priority=["RAW-FI"],
),
FileExtension(
name="Sun Raster File",
extension=".ras",
priority=["pillow", "SUN-PIL", "RAS-FI"],
),
FileExtension(
extension=".raw",
priority=["RAW-FI", "LYTRO-ILLUM-RAW", "LYTRO-F01-RAW"],
),
FileExtension(
extension=".rdc",
priority=["RAW-FI"],
),
FileExtension(
name="Silicon Graphics Image",
extension=".rgb",
priority=["pillow", "SGI-PIL"],
),
FileExtension(
name="Silicon Graphics Image",
extension=".rgba",
priority=["pillow", "SGI-PIL"],
),
FileExtension(
extension=".rw2",
priority=["RAW-FI"],
),
FileExtension(
extension=".rwl",
priority=["RAW-FI"],
),
FileExtension(
extension=".rwz",
priority=["RAW-FI"],
),
FileExtension(
name="Silicon Graphics Image",
extension=".sgi",
priority=["pillow", "SGI-PIL"],
),
FileExtension(
name="SPE File Format",
extension=".spe",
priority=["SPE"],
),
FileExtension(
extension=".SPIDER",
priority=["pillow", "SPIDER-PIL"],
),
FileExtension(
extension=".sr2",
priority=["RAW-FI"],
),
FileExtension(
extension=".srf",
priority=["RAW-FI"],
),
FileExtension(
extension=".srw",
priority=["RAW-FI"],
),
FileExtension(
extension=".sti",
priority=["RAW-FI"],
),
FileExtension(
extension=".stk",
priority=["TIFF"],
),
FileExtension(
name="Shockwave Flash",
extension=".swf",
priority=["SWF"],
),
FileExtension(
name="Truevision TGA",
extension=".targa",
priority=["TARGA-FI"],
),
FileExtension(
name="Truevision TGA",
extension=".tga",
priority=["pillow", "TGA-PIL", "TARGA-FI"],
),
FileExtension(
name="Tagged Image File",
extension=".tif",
priority=["TIFF", "pillow", "TIFF-PIL", "TIFF-FI", "FEI", "ITK", "GDAL"],
),
FileExtension(
name="Tagged Image File Format",
extension=".tiff",
priority=["TIFF", "pillow", "TIFF-PIL", "TIFF-FI", "FEI", "ITK", "GDAL"],
),
FileExtension(
extension=".vtk",
priority=["ITK"],
),
FileExtension(
name="Wireless Bitmap",
extension=".wap",
priority=["WBMP-FI"],
),
FileExtension(
name="Wireless Bitmap",
extension=".wbm",
priority=["WBMP-FI"],
),
FileExtension(
name="Wireless Bitmap",
extension=".wbmp",
priority=["WBMP-FI"],
),
FileExtension(
name="JPEG Extended Range",
extension=".wdp",
priority=["JPEG-XR-FI"],
),
FileExtension(
extension=".webm",
priority=["FFMPEG"],
),
FileExtension(
name="Google WebP",
extension=".webp",
priority=["WEBP-FI"],
),
FileExtension(
name="Windows Meta File",
extension=".wmf",
priority=["pillow", "WMF-PIL"],
),
FileExtension(
name="Windows Media Video",
extension=".wmv",
priority=["FFMPEG"],
),
FileExtension(
name="X11 Bitmap",
extension=".xbm",
priority=["pillow", "XBM-PIL", "XBM-FI"],
),
FileExtension(
name="X11 Pixel Map",
extension=".xpm",
priority=["pillow", "XPM-PIL", "XPM-FI"],
),
FileExtension(
name="Thumbnail Image",
extension=".XVTHUMB",
priority=["pillow", "XVTHUMB-PIL"],
),
]
extension_list.sort(key=lambda x: x.extension)
known_extensions: Dict[str, List[FileExtension]] = dict()
for ext in extension_list:
if ext.extension not in known_extensions:
known_extensions[ext.extension] = list()
known_extensions[ext.extension].append(ext)
extension_list = [ext for ext_list in known_extensions.values() for ext in ext_list]
_video_extension_strings = [
".avi",
".mkv",
".mov",
".mp4",
".mpeg",
".mpg",
".webm",
".wmv",
".gif",
]
video_extensions: List[FileExtension] = list()
for ext_string in _video_extension_strings:
formats = known_extensions[ext_string]
video_extensions.append(formats[0])
video_extensions.sort(key=lambda x: x.extension)
|
imageio/imageio
|
imageio/config/extensions.py
|
Python
|
bsd-2-clause
| 22,732
|
[
"NetCDF",
"VTK"
] |
bd7865b5f791f4a0faa35e3dc6ab1ae79316b8b2ff69028cfc6d4da6535bd09e
|
#
# Copyright (C) 2011-2021 greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
import os
import sys
import tempfile
import unittest
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem import rdDepictor
from rdkit.Chem import Draw
from rdkit.Chem import rdMolDescriptors
try:
from rdkit.Chem.Draw import IPythonConsole
except ImportError:
IPythonConsole = None
try:
from rdkit.Chem.Draw import cairoCanvas
except ImportError:
cairoCanvas = None
try:
from rdkit.Chem.Draw import spingCanvas
except ImportError:
spingCanvas = None
try:
from rdkit.Chem.Draw import aggCanvas
except ImportError:
aggCanvas = None
try:
from rdkit.Chem.Draw import qtCanvas
except ImportError:
qtCanvas = None
class TestCase(unittest.TestCase):
showAllImages = False
def test_interactive(self):
# We avoid checking in the code with development flag set
self.assertFalse(self.showAllImages)
def setUp(self):
if IPythonConsole is not None and Draw.MolsToGridImage == IPythonConsole.ShowMols:
IPythonConsole.UninstallIPythonRenderer()
self.mol = Chem.MolFromSmiles('c1c(C[15NH3+])ccnc1[C@](Cl)(Br)[C@](Cl)(Br)F')
def _testMolToFile(self):
try:
fhdl, fn = tempfile.mkstemp(suffix='.png')
# mkstemp returns a file handle that we don't need; close it
os.close(fhdl)
fhdl = None
self.assertEqual(os.path.getsize(fn), 0)
Draw.MolToFile(self.mol, fn)
self.assertNotEqual(os.path.getsize(fn), 0)
finally:
os.remove(fn)
@unittest.skipIf(cairoCanvas is None, 'Skipping cairo test')
def testCairoFile(self):
os.environ['RDKIT_CANVAS'] = 'cairo'
self._testMolToFile()
@unittest.skipIf(aggCanvas is None, 'Skipping agg test')
def testAggFile(self):
os.environ['RDKIT_CANVAS'] = 'agg'
self._testMolToFile()
@unittest.skipIf(spingCanvas is None, 'Skipping sping test')
def testSpingFile(self):
os.environ['RDKIT_CANVAS'] = 'sping'
self._testMolToFile()
def _testMolToImage(self, mol=None, kekulize=True, options=None, showImage=False, **kwargs):
mol = mol or self.mol
img = Draw.MolToImage(mol, size=(300, 300), kekulize=kekulize, options=options, **kwargs)
self.assertTrue(img)
self.assertEqual(img.size[0], 300)
self.assertEqual(img.size[1], 300)
if self.showAllImages or showImage:
img.show()
@unittest.skipIf(cairoCanvas is None, 'Skipping cairo test')
def testCairoImage(self):
os.environ['RDKIT_CANVAS'] = 'cairo'
self._testMolToImage()
@unittest.skipIf(aggCanvas is None, 'Skipping agg test')
def testAggImage(self):
os.environ['RDKIT_CANVAS'] = 'agg'
self._testMolToImage()
@unittest.skipIf(spingCanvas is None, 'Skipping sping test')
def testSpingImage(self):
os.environ['RDKIT_CANVAS'] = 'sping'
self._testMolToImage()
@unittest.skipIf(qtCanvas is None, 'Skipping Qt test')
def testQtImage(self):
try:
from PySide import QtGui
except ImportError:
try:
from PyQt5 import QtGui
except ImportError:
from PySide2 import QtGui
_ = QtGui.QGuiApplication(sys.argv)
img = Draw.MolToQPixmap(self.mol, size=(300, 300))
self.assertTrue(img)
self.assertEqual(img.size().height(), 300)
self.assertEqual(img.size().width(), 300)
# img.save('/tmp/D_me.png')
@unittest.skipIf(cairoCanvas is None, 'Skipping cairo test')
def testCairoImageDash(self):
os.environ['RDKIT_CANVAS'] = 'cairo'
self._testMolToImage(kekulize=False)
@unittest.skipIf(aggCanvas is None, 'Skipping agg test')
def testAggImageDash(self):
os.environ['RDKIT_CANVAS'] = 'agg'
self._testMolToImage(kekulize=False)
@unittest.skipIf(spingCanvas is None, 'Skipping sping test')
def testSpingImageDash(self):
os.environ['RDKIT_CANVAS'] = 'sping'
self._testMolToImage(kekulize=False, showImage=False)
@unittest.skipIf(spingCanvas is None, 'Skipping sping test')
def testGithubIssue54(self):
# Assert that radicals depict with PIL
os.environ['RDKIT_CANVAS'] = 'sping'
mol = Chem.MolFromSmiles('c1([O])ccc(O)cc1')
img = Draw.MolToImage(mol)
self.assertTrue(img)
# img.show()
def testSpecialCases(self):
options = Draw.DrawingOptions()
options.atomLabelDeuteriumTritium = True
self._testMolToImage(mol=Chem.MolFromSmiles('[2H][C@]([3H])(C)F'), options=options)
# shared rings
self._testMolToImage(mol=Chem.MolFromSmiles('c1cccc2cc(cccc3)c3cc21'))
self._testMolToImage(mol=Chem.MolFromSmiles('C1=CC=CC=CC=C1'))
self._testMolToImage(mol=Chem.MolFromSmiles('C=C=C'))
self._testMolToImage(mol=Chem.MolFromSmiles('CC#N'), showImage=False)
self._testMolToImage(mol=Chem.MolFromSmiles('[CH2-][C-2]C[CH3+][CH5+2]'))
self._testMolToImage(mol=Chem.MolFromSmiles('[Na+].[OH-]'))
self._testMolToImage(mol=Chem.MolFromSmiles('c1ccccc1c1ccccc1'),
highlightAtoms=(0, 1, 2, 3, 4, 5, 6))
self._testMolToImage(mol=Chem.MolFromSmiles('c1ccccc1c1ccccc1'),
highlightBonds=(0, 2, 4, 6, 8, 10))
self._testMolToImage(mol=Chem.MolFromSmiles('c1ccccc1c1ccc(cc1)c1ccc(cc1)c1ccc(cc1)'))
def testGithubIssue86(self):
# Assert that drawing code doesn't modify wedge bonds
mol = Chem.MolFromSmiles('F[C@H](Cl)Br')
for b in mol.GetBonds():
self.assertEqual(b.GetBondDir(), Chem.BondDir.NONE)
rdDepictor.Compute2DCoords(mol)
img = Draw.MolToImage(mol, kekulize=False)
self.assertTrue(img)
# img.show()
for b in mol.GetBonds():
self.assertEqual(b.GetBondDir(), Chem.BondDir.NONE)
Chem.WedgeMolBonds(mol, mol.GetConformer())
obds = [x.GetBondDir() for x in mol.GetBonds()]
self.assertEqual(obds.count(Chem.BondDir.NONE), 2)
img = Draw.MolToImage(mol, kekulize=False)
self.assertTrue(img)
# img.show()
nbds = [x.GetBondDir() for x in mol.GetBonds()]
self.assertEqual(obds, nbds)
def testGridSVG(self):
mols = [Chem.MolFromSmiles('NC(C)C(=O)' * x) for x in range(10)]
legends = ['mol-%d' % x for x in range(len(mols))]
svg = Draw.MolsToGridImage(mols, legends=legends, molsPerRow=3, subImgSize=(200, 200),
useSVG=True)
self.assertIn("width='600px' height='800px'", svg)
svg = Draw.MolsToGridImage(mols, legends=legends, molsPerRow=4, subImgSize=(200, 200),
useSVG=True)
self.assertIn("width='800px' height='600px'", svg)
svg = Draw.MolsToGridImage(mols, legends=legends, molsPerRow=3, subImgSize=(300, 300),
useSVG=True)
self.assertIn("width='900px' height='1200px'", svg)
self.assertNotIn("class='note'", svg)
dopts = Draw.rdMolDraw2D.MolDrawOptions()
dopts.addAtomIndices = True
svg = Draw.MolsToGridImage(mols, legends=legends, molsPerRow=3, subImgSize=(300, 300),
useSVG=True, drawOptions=dopts)
self.assertIn("class='note'", svg)
def testDrawMorgan(self):
from rdkit.Chem import rdMolDescriptors
m = Chem.MolFromSmiles('c1ccccc1CC1CC1')
bi = {}
fp = rdMolDescriptors.GetMorganFingerprintAsBitVect(m, radius=2, bitInfo=bi)
self.assertTrue(872 in bi)
svg1 = Draw.DrawMorganBit(m, 872, bi)
aid, r = bi[872][0]
svg2 = Draw.DrawMorganEnv(m, aid, r)
self.assertEqual(svg1, svg2)
self.assertTrue("style='fill:#CCCCCC;" in svg1)
self.assertTrue("style='fill:#E5E533;" in svg1)
self.assertTrue("style='fill:#9999E5;" in svg1)
svg1 = Draw.DrawMorganBit(m, 872, bi, centerColor=None)
aid, r = bi[872][0]
svg2 = Draw.DrawMorganEnv(m, aid, r, centerColor=None)
self.assertEqual(svg1, svg2)
self.assertTrue("style='fill:#CCCCCC;" in svg1)
self.assertTrue("style='fill:#E5E533;" in svg1)
self.assertFalse("style='fill:#9999E5;" in svg1)
with self.assertRaises(KeyError):
Draw.DrawMorganBit(m, 32, bi)
if hasattr(Draw, 'MolDraw2DCairo'):
# Github #3796: make sure we aren't trying to generate metadata:
png = Draw.DrawMorganBit(m, 872, bi, useSVG=False)
self.assertIn(b'PNG', png)
self.assertIsNone(Chem.MolFromPNGString(png))
def testDrawRDKit(self):
m = Chem.MolFromSmiles('c1ccccc1CC1CC1')
bi = {}
rdkfp = Chem.RDKFingerprint(m, maxPath=5, bitInfo=bi)
self.assertTrue(1553 in bi)
svg1 = Draw.DrawRDKitBit(m, 1553, bi)
path = bi[1553][0]
svg2 = Draw.DrawRDKitEnv(m, path)
self.assertEqual(svg1, svg2)
self.assertTrue("style='fill:#E5E533;" in svg1)
self.assertFalse("style='fill:#CCCCCC;" in svg1)
self.assertFalse("style='fill:#9999E5;" in svg1)
with self.assertRaises(KeyError):
Draw.DrawRDKitBit(m, 32, bi)
if hasattr(Draw, 'MolDraw2DCairo'):
# Github #3796: make sure we aren't trying to generate metadata:
png = Draw.DrawRDKitBit(m, 1553, bi, useSVG=False)
self.assertIn(b'PNG', png)
self.assertIsNone(Chem.MolFromPNGString(png))
def testDrawReaction(self):
# this shouldn't throw an exception...
rxn = AllChem.ReactionFromSmarts(
"[c;H1:3]1:[c:4]:[c:5]:[c;H1:6]:[c:7]2:[nH:8]:[c:9]:[c;H1:1]:[c:2]:1:2.O=[C:10]1[#6;H2:11][#6;H2:12][N:13][#6;H2:14][#6;H2:15]1>>[#6;H2:12]3[#6;H1:11]=[C:10]([c:1]1:[c:9]:[n:8]:[c:7]2:[c:6]:[c:5]:[c:4]:[c:3]:[c:2]:1:2)[#6;H2:15][#6;H2:14][N:13]3"
)
img = Draw.ReactionToImage(rxn)
def testGithub3762(self):
m = Chem.MolFromSmiles('CC(=O)O')
ats = [1, 2, 3]
svg = Draw._moltoSVG(m, (250, 200), ats, "", False)
self.assertIn('stroke:#FF7F7F;stroke-width:2', svg)
svg = Draw._moltoSVG(m, (250, 200), ats, "", False, highlightBonds=[])
self.assertNotIn('stroke:#FF7F7F;stroke-width:2', svg)
if __name__ == '__main__':
unittest.main()
|
greglandrum/rdkit
|
rdkit/Chem/Draw/UnitTestDraw.py
|
Python
|
bsd-3-clause
| 9,926
|
[
"RDKit"
] |
813f8af74e47f5f14aa0cc658e6a0c5a9cc776a01a62cf3622984916b3481eae
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 - 2015 -- Lars Heuer <heuer[at]semagia.com>
# All rights reserved.
#
# License: BSD, see LICENSE.txt for more details.
#
"""\
Tests subject parsing.
:author: Lars Heuer (heuer[at]semagia.com)
:organization: Semagia - <http://www.semagia.com/>
:license: BSD license
"""
import os
import codecs
from nose.tools import eq_
from cablemap.core.reader import parse_subject
from cablemap.core import cable_from_file
_DATA_DIR = 'data-subject'
_TEST_DATA = (
(u"TAGS: TAG TAG2\nSUBJECT: SINGLE LINE SUBJECT \n\nREF: REF 1", u'SINGLE LINE SUBJECT'),
(u"TAGS: TAG TAG2\nSUBJECT: SUBJECT WHICH HAS a \nSECOND LINE \n\nREF: REF 1", u'SUBJECT WHICH HAS a SECOND LINE'),
("SUBJECT: SAG AGREES TO USG STEPS TO PROTECT OIL FACILITIES \nREF: A. RIYADH 1579 B. RIYADH 1408 C. RIYADH 1298\n",
u'SAG AGREES TO USG STEPS TO PROTECT OIL FACILITIES'),
("SUBJECT: SAG AGREES TO USG STEPS TO PROTECT OIL FACILITIES\nREF: A. RIYADH 1579 B. RIYADH 1408 C. RIYADH 1298\n",
u'SAG AGREES TO USG STEPS TO PROTECT OIL FACILITIES'),
("SUBJECT: SAG AGREES TO USG STEPS TO PROTECT OIL FACILITIES\nREF A. RIYADH 1579 B. RIYADH 1408 C. RIYADH 1298\n",
u'SAG AGREES TO USG STEPS TO PROTECT OIL FACILITIES'),
("SUBJECT SAG AGREES TO USG STEPS TO PROTECT OIL FACILITIES\nREF A. RIYADH 1579 B. RIYADH 1408 C. RIYADH 1298\n",
u'SAG AGREES TO USG STEPS TO PROTECT OIL FACILITIES'),
("SUBJECT: NEGOTIATIONS \n \n", u'NEGOTIATIONS'),
("SUBJECT: A CAUCASUS WEDDING \nClassified By: Deputy Chief of Mission Daniel A. Russell. Reason 1.4 ( b, d)",
u'A CAUCASUS WEDDING'),
("SUBJ: GUINEA - U.S./French Meeting with President Compaore \n\nClassified by Charge d'Affaires",
u'GUINEA - U.S./French Meeting with President Compaore'),
("SUBJECT: UAE FM DISCUSSES TALIBAN FINANCIAL FLOWS AND REINTEGRATION \nWITH AMB. HOLBROOKE AND TREASURY A/S COHEN\nCLASSIFIED BY: Richard",
u'UAE FM DISCUSSES TALIBAN FINANCIAL FLOWS AND REINTEGRATION WITH AMB. HOLBROOKE AND TREASURY A/S COHEN'),
("SUBJECT: EXTENDED NATIONAL JURISDICTIONS OVER HIGH SEAS \n\nREF: STATE 106206 CIRCULAR; STATE CA-3400 NOV 2, 1966",
u'EXTENDED NATIONAL JURISDICTIONS OVER HIGH SEAS'),
('SUBJECT: (S) GERMANY TAKING ACTION ON SHIPMENT OF ...', False,
u'(S) GERMANY TAKING ACTION ON SHIPMENT OF ...'),
('SUBJECT: (S) GERMANY TAKING ACTION ON SHIPMENT OF ...', True,
u'GERMANY TAKING ACTION ON SHIPMENT OF ...'),
(u'E.o. 12958: decl: 01/07/2014 Tags: prel, pgov, pins, tu Subject: turkish p.m. Erdogan goes to washington: how strong a leader in the face of strong challenges?\n\n(U) Classifi',
u'turkish p.m. Erdogan goes to washington: how strong a leader in the face of strong challenges?'),
("SUBJECT: PART 3 OF 3: THE LIFE AND TIMES OF SOUTH AFRICA'S \nNEW PRESIDENT\nPRETORIA 00000954 001.2 OF 004\n",
u"PART 3 OF 3: THE LIFE AND TIMES OF SOUTH AFRICA'S NEW PRESIDENT"),
("SUBJECT: ZIM NOTES 11-09-2009 \n----------- \n1. SUMMARY\n----------- \n",
u'ZIM NOTES 11-09-2009'),
("TAGS: PGOV EINV INRB GM\nSUBJECT: GERMANY/BAVARIA: CSU HOPES FOR FRESH START WITH NEW AND\nYOUNGER FACES IN CABINETAND A DYNAMIC SECRETARY GENERAL \n\n",
u'GERMANY/BAVARIA: CSU HOPES FOR FRESH START WITH NEW AND YOUNGER FACES IN CABINETAND A DYNAMIC SECRETARY GENERAL'),
('SUBJECT: ASSISTANT SECRETARY MEETS WITH ZIMBABWE \n CONFIDENTIAL\nPAGE 02 HARA',
u'ASSISTANT SECRETARY MEETS WITH ZIMBABWE CONFIDENTIAL'),
('TAGS ENRG, EUN, ECON, EIND, KGHG, SENV, SW\nSUBJECT: SWEDISH DEPUTY PM URGES SENIOR USG VISITS TO SWEDEN DURING\nEU PRESIDENCY; WANTS TO LAUNCH U.S.-EU ALERNATIVE ENERGY PARTNERSHIP AT U.S.-EU SUMMIT\nThis is an Action request. Please see para 2.\n',
u'SWEDISH DEPUTY PM URGES SENIOR USG VISITS TO SWEDEN DURING EU PRESIDENCY; WANTS TO LAUNCH U.S.-EU ALERNATIVE ENERGY PARTNERSHIP AT U.S.-EU SUMMIT'),
('C O R R E C T E D COPY//SUBJECT LINE//////////////////////////////////\n\nNOFORN\nSIPDIS\n\nEO 12958 DECL: 07/09/2018\nTAGS PREL, PTER, MOPS, IR, PK, AF, CA\n\nSUBJECT: COUNSELOR, CSIS DIRECTOR DISCUSS CT THREATS,\nPAKISTAN, AFGHANISTAN, IRAN\nREF: A. OTTAWA 360 B. OTTAWA 808 C. OTTAWA 850 D. OTTAWA 878\nOTTAWA 00000918 001.2 OF 003\n',
u'COUNSELOR, CSIS DIRECTOR DISCUSS CT THREATS, PAKISTAN, AFGHANISTAN, IRAN'),
(u"TAGS OVIP (CLINTON, HILLARY), PGOV, PREL, KDEV, ECON,\nNL, IS, SR\nSUBJECT: (U) Secretary Clinton's July 14 conversation\nwith Dutch Foreign Minister Verhagen\n1. Classified by Bureau Assistant Secretary Philip H. Gordon. Reason: 1.4 (d)\n2. (U) July 14; 2:45 p.m.; Washington, DC.\n3. (SBU) Participants:\n",
False,
u"(U) Secretary Clinton's July 14 conversation with Dutch Foreign Minister Verhagen"),
(u"TAGS OVIP (CLINTON, HILLARY), PGOV, PREL, KDEV, ECON,\nNL, IS, SR\nSUBJECT: (U) Secretary Clinton's July 14 conversation\nwith Dutch Foreign Minister Verhagen\n1. Classified by Bureau Assistant Secretary Philip H. Gordon. Reason: 1.4 (d)\n2. (U) July 14; 2:45 p.m.; Washington, DC.\n3. (SBU) Participants:\n",
True,
u"Secretary Clinton's July 14 conversation with Dutch Foreign Minister Verhagen"),
('TAGS: ECON EINV ENRG PGOV PBTS MARR BR\n\nSUBJECT: AMBASSADOR SOBEL MEETS WITH KEY ENERGY ENTITIES IN RIO Ref(s): A) 08 RIO DE JAN 138; B) 08 RIO DE JAN 0044 and previous Sensitive But Unclassified - Please handle accordingly. This message has been approved by Ambassador Sobel. ',
u'AMBASSADOR SOBEL MEETS WITH KEY ENERGY ENTITIES IN RIO'),
("SUBJECT: BRAZIL: BLACKOUT -CAUSES AND IMPLICATIONS Classified By: Charge d'Affaires Cherie Jackson, Reasons 1.4 (b) and (d). REFTELS: A) 2008 BRASILIA 672, B) 2008 BRASILIA 593, C)2008 SAO PAULO 260\n",
u'BRAZIL: BLACKOUT -CAUSES AND IMPLICATIONS'),
('SUBJECT: MEMBERS OF CONGRESS DISCUSS BONUSES, BAIL-OUTS AND\nOTHER REFORM MEASURES WITH UK OFFICIALS\n1. (SBU) Summary. Bonuses, regulatory structures',
u'MEMBERS OF CONGRESS DISCUSS BONUSES, BAIL-OUTS AND OTHER REFORM MEASURES WITH UK OFFICIALS'),
('SUBJECT: AARGH! SWEDISH PIRATES SET SAIL FOR BRUSSELS\n1. Summary and Comment: Sweden',
u'AARGH! SWEDISH PIRATES SET SAIL FOR BRUSSELS'),
('SUBJECT: EU JHA INFORMAL MINISTERIAL\n1. Summary. EU Justice and Home...',
u'EU JHA INFORMAL MINISTERIAL'),
('\nSUBJECT: CARDINAL HUMMES DISCUSSES LULA GOVERNMENT, THE OPPOSITION, AND FTAA REF: (A) 05 SAO PAULO 405; (B) 05 SAO PAULO 402 (C) 02 BRASILIA 2670',
u'CARDINAL HUMMES DISCUSSES LULA GOVERNMENT, THE OPPOSITION, AND FTAA'),
# 06BRASILIA882
('SUBJECT: ENERGY INSTALLATIONS REF: BRASILIA 861', u'ENERGY INSTALLATIONS'),
# 08MOSCOW864
("TAGS: EPET ENRG ECON PREL PGOV RS\nSUBJECT: WHAT'S BEHIND THE RAIDS ON TNK-BP AND BP REF: A. MOSCOW 816 B. MOSCOW 768 C. 07 MOSCOW 3054 Classified By: Ambassador William J. Burns for Reasons 1.4 (b/d)\n",
u"WHAT'S BEHIND THE RAIDS ON TNK-BP AND BP"),
# 08TRIPOLI266
('SUBJECT: GOL DELAYS RELEASING DETAINED HUMAN RIGHTS ACTIVIST FATHI EL-JAHMI REF: A) TRIPOLI 223, B) TRIPOLI 229 \n',
u'GOL DELAYS RELEASING DETAINED HUMAN RIGHTS ACTIVIST FATHI EL-JAHMI'),
# 05BRASILIA1839
('''E.O. 12958: N/A
TAGS: EAIR EINV
SUBJECT: BRAZIL - NEXT STEPS FOR VARIG UNCLEAR REFS: (A) BRASILIA 1608, (B) BRASILIA 1631, (C) BRASILIA 1566
1. (SBU) Summary.
''',
u'BRAZIL - NEXT STEPS FOR VARIG UNCLEAR'),
# 05BRASILIA2806
('''CAROLYN COLDREN FAA MIAMI FOR MARK RIOS
E.O. 12958: N/A
TAGS: EAIR EINV PGOV ETRD
SUBJECT: BRAZILIAN GOVERNMENT INTERVENES IN VARIG - TOO LITTLE, TOO LATE? REFS: (A) BRASILIA 1839, (B) BRASILIA 1608, (C) BRASILIA 1566
1. (SBU) Summary.''',
u'BRAZILIAN GOVERNMENT INTERVENES IN VARIG - TOO LITTLE, TOO LATE?'),
# 05TASHKENT284
('''TAGS ECON PREL PINR UZ
SUBJECT: GULNORA INC. STRIKES AGAIN
REFS: A) 04 TASHKENT 3390 B) 04 TASHKENT 2574 and previous
CLASSIFIED BY AMB. JON R. PURNELL, FOR REASONS 1.4 (B, D)
1. (C) ''',
u'GULNORA INC. STRIKES AGAIN'),
# 07REYKJAVIK232
('''TAGS: OEXC SCUL KPAO IC
SUBJECT: Grants Support for PD Projects (Iceland)
REF: STATE 105588 ''', True,
u'Grants Support for PD Projects (Iceland)'),
# 07REYKJAVIK232
('''TAGS: OEXC SCUL KPAO IC
SUBJECT: Grants Support for PD Projects (Iceland)
REF: STATE 105588 ''', False,
u'Grants Support for PD Projects (Iceland)'),
# 07MOSCOW1704
('''E.O. 12958: DECL: 04/13/2017
TAGS: KIPR KNNP KPAO TBIO PREL RS
SUBJECT: RUSSIAN SCIEN...
C O N F I D E N T I A L MOSCOW 001704 SIPDIS SIPDIS STATE FOR EUR/RUS (GUHA), EUR/ACE, OES/STC (DAUGHARTY) OSTP FOR MARBURGER BERLIN FOR HAGEN E.O. 12958: DECL: 04/13/2017 TAGS: KIPR KNNP KPAO TBIO PREL RS
Classified By: ES''',
u'RUSSIAN SCIEN...'),
# 10STATE2634
('''S E C R E T STATE 002634
C O R R E C T E D COPY (SUBJECT LINE)
NOFORN SIPDIS
GENEVA: FOR CD DELEGATION E.O. 12958: DECL: 01/11/2035
TAGS: CH MCAP PARM PREL TSPA
SUBJECT: DEMARCHE FOLLOWING CHINA'S JANUARY 2010 INTERCEPT FLIGHT-TEST
Classified By:''',
u"DEMARCHE FOLLOWING CHINA'S JANUARY 2010 INTERCEPT FLIGHT-TEST"),
# Test HTML entity replacements
('SUBJECT: Bla ’ bla “ bla ” bla ',
u'Bla \u2019 bla \u201c bla \u201d bla'),
# 03THEHAGUE1910
('''SUBJECT: CODEL HASTERT TO THE NETHERLANDS (JUL 30 - JUL 31)
REF: STATE 184642 ''',
'CODEL HASTERT TO THE NETHERLANDS (JUL 30 - JUL 31)'),
# 08LONDON1991
('''SUBJECT: (C/NF) WHO WOULD REPLACE GORDON BROWN AS UK PRIMEREF: A. LONDON 1939 B. LONDON 1704''',
u'WHO WOULD REPLACE GORDON BROWN AS UK PRIME'),
# 07CAIRO3126
(u'''SUBJECT: SCENESETTER FOR AMBASSADOR CROCKER’S VISIT TO CAIROClassified By: DEPUTY CHIEF OF MISSION STUART JONES. REASONS: 1.4 (B) and (D)\n1. (S) Welcome to Cairo.''',
u'SCENESETTER FOR AMBASSADOR CROCKER’S VISIT TO CAIRO'),
# 06CAIRO941
(u'''SUBJECT: FBI DIRECTOR MUELLER’S VISIT TO EGYPTREF: CAIRO 493
Classified by DCM Stua''',
u'FBI DIRECTOR MUELLER’S VISIT TO EGYPT'),
# 04BRASILIA445
(u'''E.O. 12958: N/A
TAGS: KIPR ECON ETRD KCRM PGOV BR IPR
SUBJECT: BRAZIL - 2004 SPECIAL 301 RESPONSE
Refs: A) State 29549
B) Sao Paulo 276
C) Rio de Janeiro 128
D) Brasilia 313
E) Brasilia 222
F) Brasilia 202
G) 2003 Sao Paulo 2199
H) 2003 Brasilia 3868
I) 2003 Brasilia 3138
J) 2003 Brasilia 3122
K) 2003 Brasilia 2943
L) 2003 Sao Paulo 1186
''',
u'BRAZIL - 2004 SPECIAL 301 RESPONSE'),
# No subject
(u'''E.O. 12958: N/A
TAGS: KIPR ECON ETRD KCRM PGOV BR IPR
''',
u''),
# 09CAIRO226
(u'''TAGS: TBIO KFLU KSTH PGOV ECON KSCA EAGR EG
SUBJECT:EGYPT'S 55th BIRD FLU VICTIM, ANOTHER CHILD Sensitive but ''',
'''EGYPT'S 55th BIRD FLU VICTIM, ANOTHER CHILD'''),
# 08MANAMA117
(u'''TAGS: PGOV PTER KISL ASEC BA
UBJECT: BAHRAIN WILL FORMALLY REQUEST QATAR TO EXECUTE LEGAL JUDGMENT AGAINST KHALIFA AL SUBAIE REF: A. A) MANAMA 20
''',
u'''BAHRAIN WILL FORMALLY REQUEST QATAR TO EXECUTE LEGAL JUDGMENT AGAINST KHALIFA AL SUBAIE'''),
# 06GENEVA2654
(ur'''E.O. 12958: DECL: 10/16/2016 \
TAGS: PHUM UNHRC
SUBJECT: LITTLE PROGRESS AND MUCH CONFUSION AND FRUSTRATION \
LEAD TO ADJOURNMENT OF HUMAN RIGHTS COUNCIL'S SECOND SESSION \
\
GENEVA 00002654 001.2 OF 004 \
\
\
Classified By: ''',
'''LITTLE PROGRESS AND MUCH CONFUSION AND FRUSTRATION LEAD TO ADJOURNMENT OF HUMAN RIGHTS COUNCIL'S SECOND SESSION'''),
# 06GENEVA1673
(ur'''TAGS: PHUM UNHRC
SUBJECT: HRC: SPECIAL SESSION ON PALESTINE \
\
REF: A. A) BERN 1253''',
u'''HRC: SPECIAL SESSION ON PALESTINE'''),
# 09CAIRO1945
(u'''
SUBJECT:
Cooperation and Coordination Busts Fraud Ring
1.(SBU) Summary: The Cairo Fraud Prevention Unit''',
u'Cooperation and Coordination Busts Fraud Ring'),
# 05OTTAWA3726
('''E.O. 12958: N/A
TAGS: ECON ETRD PGOV CA MX SIPDIS
SUBJECT: TRILATERAL REGULATORY COOPERATION MEETING UNDER
SPP: INFORMATION SHARING ''',
u'TRILATERAL REGULATORY COOPERATION MEETING UNDER SPP: INFORMATION SHARING'),
# 06BUENOSAIRES579
('''E.O. 12958: N/A
TAGS: KPAO OPRC KMDR PREL MEDIA REACTION
SUBJECT: MEDIA REACTION US PRESIDENT BUSH'S POPULARITY
RATING CHILEAN PRESIDENT MICHELE BACHELET'S
INAUGURATION CEREMONY RICE AND MORALES MEETING THE US-
LATIN AMERICAN RELATIONSHIP US-CHILEAN TIES US-
URUGUAYAN FTA USG AND ARGENTINA ON THE TRI-BORDER AREA
03/13/06
¶1. SUMMARY STATEMENT''',
u"MEDIA REACTION US PRESIDENT BUSH'S POPULARITY RATING CHILEAN PRESIDENT MICHELE BACHELET'S INAUGURATION CEREMONY RICE AND MORALES MEETING THE US-LATIN AMERICAN RELATIONSHIP US-CHILEAN TIES US-URUGUAYAN FTA USG AND ARGENTINA ON THE TRI-BORDER AREA 03/13/06"),
# 05DHAKA1919
(u'''USARPAC FOR APOP-IM (MAJ HEDRICK)
E.O. 12958: N/A
TAGS: KMDR OIIP OPRC KPAO PREL ETRD PTER ASEC BG OCII
SUBJECT: Media Reaction: Myanmar; Dhaka
Summary: Commenting''',
u'Media Reaction: Myanmar; Dhaka'),
)
def test_parse_subject():
def check(content, clean, expected):
eq_(expected, parse_subject(content, clean=clean))
for test in _TEST_DATA:
clean = True
if len(test) == 3:
content, clean, expected = test
else:
content, expected = test
yield check, content, clean, expected
def test_parse_subject_issue14():
"""\
Test against issue #14.
<https://github.com/heuer/cablemap/issues/#issue/14>
"""
def check(expected, input):
eq_(expected, parse_subject(input))
base = os.path.join(os.path.dirname(__file__), _DATA_DIR)
for name in [name for name in os.listdir(os.path.join(base, 'in')) if name.endswith('.html')]:
input = cable_from_file(os.path.join(base, 'in', name)).content
expected = codecs.open(os.path.join(base, 'out', name.replace('.html', '.txt')), 'rb', 'utf-8').readline().rstrip()
yield check, expected, input
if __name__ == '__main__':
import nose
nose.core.runmodule()
|
heuer/cablemap
|
cablemap.core/tests/test_reader_subject.py
|
Python
|
bsd-3-clause
| 13,996
|
[
"VisIt"
] |
59d173b740ca684cf3eab6b734fea2304e337222d5fbaf19a6956d5ef0e32388
|
from numpy import (
vstack, where, intersect1d, in1d, unique,
cross, abs, arccos, sign,
dot, array, cov, nan_to_num, inf, pi,
hstack, repeat, bincount, arange
)
from numpy.linalg import norm, solve
class Box2D:
def __init__(self, *args, **kwargs):
if len(args) <= 5:
self._compute_bounding_box(*args, **kwargs)
else:
self._set_variables(*args)
def _compute_bounding_box(self, points, point_ids, vectors, labels=None, level=None):
center = points.mean(0)
centered_points = points - center
orientation = vectors.sum(0)
orientation /= norm(orientation)
orthogonal_direction = orthogonal_vector(orientation)
orthogonal_direction /= norm(orthogonal_direction)
points_orthogonal = dot(
orthogonal_direction,
centered_points.T
)
points_orientation = dot(orientation, centered_points.T)
max_main = points_orientation.max()
min_main = points_orientation.min()
max_orthogonal = points_orthogonal.max()
min_orthogonal = points_orthogonal.min()
bounding_box_corners = (vstack((
orientation * max_main + orthogonal_direction * max_orthogonal,
orientation * max_main + orthogonal_direction * min_orthogonal,
orientation * min_main + orthogonal_direction * min_orthogonal,
orientation * min_main + orthogonal_direction * max_orthogonal,
)) + center)
center = bounding_box_corners.mean(0)
volume = (max_main - min_main) * (max_orthogonal - min_orthogonal)
self.orthogonal = orthogonal_direction
self.points_orientation = points_orientation
self.points_orthogonal = points_orthogonal
self._set_variables(
bounding_box_corners, center, orientation,
labels, points, point_ids, vectors, volume,
None, None, None, level
)
def _set_variables(self,
box,
center,
orientation,
labels,
points,
point_ids,
vectors,
volume,
parent,
left,
right,
level
):
self.box = box
self.center = center
self.orientation = orientation
self.labels = labels
self.points = points
self.point_ids = point_ids
self.vectors = vectors
self.volume = volume
self.parent = parent
self.left = left
self.right = right
self.level = level
self._calculate_orientation_limits()
self._calculate_orthogonal_limits()
def _calculate_orientation_limits(self):
projections = [dot(self.orientation, point) for point in self.box]
self.orientation_limits = (min(projections), max(projections))
def _calculate_orthogonal_limits(self):
projections = [dot(self.orthogonal, point) for point in self.box]
self.orthogonal_limits = (min(projections), max(projections))
def siblings(self, generations_up=0, generations_down=0):
if generations_up == 0 and generations_down == 1:
left = [self.left] if self.left is not None else []
right = [self.right] if self.right is not None else []
return left + right
elif generations_up > 0:
if self.parent is None:
return []
return self.parent.siblings(generations_up - 1, generations_down + 1)
elif generations_down > 1:
if self.left is not None:
left = self.left.siblings(0, generations_down - 1)
else:
left = []
if self.right is not None:
right = self.right.siblings(0, generations_down - 1)
else:
right = []
return left + right
def swap_direction(self):
self.orientation *= -1
self._calculate_orientation_limits()
def overlap_main(self, box):
projections = [dot(self.orientation, point) for point in box.box]
orientation_limits = (min(projections), max(projections))
if (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
):
return True
return False
def center_signed_orientational_distance(self, box):
return dot(self.orientation, self.center - box.center)
def center_distance(self, box):
return norm(box.center - self.center)
def __repr__(self):
return self.box.__repr__() + '\n' +\
'level:' + repr(self.level)
def __str__(self):
return self.box.__str__() + '\n' +\
'level:' + str(self.level)
class Box3D(Box2D):
def _compute_bounding_box(self, points, point_ids, vectors, labels=None, level=None):
original_points = points
original_point_ids = point_ids
original_labels = labels
original_vectors = vectors
orientation = vectors.mean(0)
orientation /= norm(orientation)
orthogonal_direction1 = orthogonal_vector(orientation)
orthogonal_direction2 = cross(orientation, orthogonal_direction1)
orthogonal_direction1 /= norm(orthogonal_direction1)
orthogonal_direction2 /= norm(orthogonal_direction2)
center = points.mean(0)
centered_points = points - center
points_orientation = dot(orientation, centered_points.T)
points_orthogonal1 = dot(
orthogonal_direction1,
centered_points.T
)
points_orthogonal2 = dot(
orthogonal_direction2,
centered_points.T
)
max_main, min_main = points_orientation.max(), points_orientation.min()
max_orthogonal1, min_orthogonal1 = (
points_orthogonal1.max(),
points_orthogonal1.min()
)
max_orthogonal2, min_orthogonal2 = (
points_orthogonal2.max(),
points_orthogonal2.min()
)
bounding_box_corners = (vstack((
orientation * max_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * max_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * max_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * max_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * min_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * min_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * min_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * min_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
)) + center)
center = bounding_box_corners.mean(0)
volume = (
(max_main - min_main) *
(max_orthogonal1 - min_orthogonal1) *
(max_orthogonal2 - min_orthogonal2)
)
self.orthogonal1 = orthogonal_direction1
self.orthogonal2 = orthogonal_direction2
self.points_orientation = points_orientation
self.points_orthogonal1 = points_orthogonal1
self.points_orthogonal2 = points_orthogonal2
self._set_variables(
bounding_box_corners, center, orientation,
original_labels, original_points, original_point_ids, original_vectors, volume,
None, None, None, level
)
def _calculate_orthogonal_limits(self):
projections = dot(self.orthogonal1, self.box.T).T
self.orthogonal1_limits = (min(projections), max(projections))
projections = dot(self.orthogonal2, self.box.T).T
self.orthogonal2_limits = (min(projections), max(projections))
def overlap_main(self, box):
projections = dot(self.orientation, box.box.T).T
orientation_limits = (min(projections), max(projections))
return (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
)
def overlap_orthogonal(self, box):
projections = dot(self.orthogonal1, box.box.T).T
orthogonal1_limits = (min(projections), max(projections))
if (
self.orthogonal1_limits[0] <= orthogonal1_limits[0] <= self.orthogonal1_limits[1] or
self.orthogonal1_limits[0] <= orthogonal1_limits[1] <= self.orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[0] <= orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[1] <= orthogonal1_limits[1]
):
overlap_orthogonal1 = True
if not overlap_orthogonal1:
return False
projections = dot(self.orthogonal2, box.box.T).T
orthogonal2_limits = (min(projections), max(projections))
if (
self.orthogonal2_limits[0] <= orthogonal2_limits[0] <= self.orthogonal2_limits[1] or
self.orthogonal2_limits[0] <= orthogonal2_limits[1] <= self.orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[0] <= orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[1] <= orthogonal2_limits[1]
):
overlap_orthogonal2 = True
return overlap_orthogonal1 and overlap_orthogonal2
def overlap(self, box):
return self.overlap_main(box) and self.overlap_orthogonal(box)
def overlap_volume(self, box):
projections = dot(self.orientation, box.box.T).T
orientation_limits = (min(projections), max(projections))
if not (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
):
return 0
overlap_main_length =\
min(orientation_limits[1], self.orientation_limits[1]) -\
max(orientation_limits[0], self.orientation_limits[0])
projections = dot(self.orthogonal1, box.box.T).T
orthogonal1_limits = (min(projections), max(projections))
if not\
(self.orthogonal1_limits[0] <= orthogonal1_limits[0] <= self.orthogonal1_limits[1] or
self.orthogonal1_limits[0] <= orthogonal1_limits[1] <= self.orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[0] <= orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[1] <= orthogonal1_limits[1]):
return 0
overlap_orthogonal1_length = \
min(orthogonal1_limits[1], self.orthogonal1_limits[1]) -\
max(orthogonal1_limits[0], self.orthogonal1_limits[0])
projections = dot(self.orthogonal2, box.box.T)
orthogonal2_limits = (min(projections), max(projections))
if not\
(self.orthogonal2_limits[0] <= orthogonal2_limits[0] <= self.orthogonal2_limits[1] or
self.orthogonal2_limits[0] <= orthogonal2_limits[1] <= self.orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[0] <= orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[1] <= orthogonal2_limits[1]):
return 0
overlap_orthogonal2_length = \
min(orthogonal2_limits[1], self.orthogonal2_limits[1]) -\
max(orthogonal2_limits[0], self.orthogonal2_limits[0])
return overlap_main_length * overlap_orthogonal1_length * overlap_orthogonal2_length
class Box3DRich(Box2D):
def _compute_bounding_box(self, points, point_ids, vectors, labels=None, level=None, robustify=None):
original_points = points
original_point_ids = point_ids
original_labels = labels
original_vectors = vectors
if robustify == 'points' and len(points) > 4:
p_mean = points.mean(0)
p_cov = cov(points.T)
c_points = points - p_mean
z = (solve(p_cov, c_points.T) * c_points.T).sum(0)
cutoff = 9.3484036044961485 # chi2.ppf(.975, 3)
points = points[z < cutoff]
point_ids = point_ids[z < cutoff]
print(('Discarded', (len(original_points) - len(points)) * 1. / len(points)))
vectors = vectors[z < cutoff]
if labels is not None:
labels = labels[z < cutoff]
orientation = vectors.mean(0)
orientation /= norm(orientation)
orthogonal_direction1 = orthogonal_vector(orientation)
orthogonal_direction2 = cross(orientation, orthogonal_direction1)
orthogonal_direction1 /= norm(orthogonal_direction1)
orthogonal_direction2 /= norm(orthogonal_direction2)
center = points.mean(0)
centered_points = points - center
points_orientation = dot(orientation, centered_points.T)
points_orthogonal1 = dot(
orthogonal_direction1, centered_points.T)
points_orthogonal2 = dot(
orthogonal_direction2, centered_points.T)
max_main, min_main = points_orientation.max(), points_orientation.min()
max_orthogonal1, min_orthogonal1 = points_orthogonal1.max(
), points_orthogonal1.min()
max_orthogonal2, min_orthogonal2 = points_orthogonal2.max(
), points_orthogonal2.min()
bounding_box_corners = (vstack((
orientation * max_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * max_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * max_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * max_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * min_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
orientation * min_main + orthogonal_direction1 *
max_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * min_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * min_orthogonal2,
orientation * min_main + orthogonal_direction1 *
min_orthogonal1 + orthogonal_direction2 * max_orthogonal2,
)) + center)
center = bounding_box_corners.mean(0)
volume = (
(max_main - min_main) *
(max_orthogonal1 - min_orthogonal1) *
(max_orthogonal2 - min_orthogonal2)
)
self.orthogonal1 = orthogonal_direction1
self.orthogonal2 = orthogonal_direction2
self.points_orientation = points_orientation
self.points_orthogonal1 = points_orthogonal1
self.points_orthogonal2 = points_orthogonal2
self._set_variables(
bounding_box_corners, center, orientation,
original_labels, original_points, original_point_ids, original_vectors, volume,
None, None, None, level
)
def _calculate_orthogonal_limits(self):
projections = dot(self.orthogonal1, self.box.T).T
self.orthogonal1_limits = (min(projections), max(projections))
projections = dot(self.orthogonal2, self.box.T).T
self.orthogonal2_limits = (min(projections), max(projections))
def overlap_main(self, box):
projections = dot(self.orientation, box.box.T).T
orientation_limits = (min(projections), max(projections))
return (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
)
def overlap_orthogonal(self, box):
projections = dot(self.orthogonal1, box.box.T).T
orthogonal1_limits = (min(projections), max(projections))
if (
self.orthogonal1_limits[0] <= orthogonal1_limits[0] <= self.orthogonal1_limits[1] or
self.orthogonal1_limits[0] <= orthogonal1_limits[1] <= self.orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[0] <= orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[1] <= orthogonal1_limits[1]
):
overlap_orthogonal1 = True
if not overlap_orthogonal1:
return False
projections = dot(self.orthogonal2, box.box.T).T
orthogonal2_limits = (min(projections), max(projections))
if (
self.orthogonal2_limits[0] <= orthogonal2_limits[0] <= self.orthogonal2_limits[1] or
self.orthogonal2_limits[0] <= orthogonal2_limits[1] <= self.orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[0] <= orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[1] <= orthogonal2_limits[1]
):
overlap_orthogonal2 = True
return overlap_orthogonal1 and overlap_orthogonal2
def overlap(self, box):
return self.overlap_main(box) and self.overlap_orthogonal(box)
def overlap_volume(self, box):
projections = dot(self.orientation, box.box.T).T
orientation_limits = (min(projections), max(projections))
if not (
self.orientation_limits[0] <= orientation_limits[0] <= self.orientation_limits[1] or
self.orientation_limits[0] <= orientation_limits[1] <= self.orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[0] <= orientation_limits[1] or
orientation_limits[0] <= self.orientation_limits[1] <= orientation_limits[1]
):
return 0
overlap_main_length =\
min(orientation_limits[1], self.orientation_limits[1]) -\
max(orientation_limits[0], self.orientation_limits[0])
projections = dot(self.orthogonal1, box.box.T).T
orthogonal1_limits = (min(projections), max(projections))
if not\
(self.orthogonal1_limits[0] <= orthogonal1_limits[0] <= self.orthogonal1_limits[1] or
self.orthogonal1_limits[0] <= orthogonal1_limits[1] <= self.orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[0] <= orthogonal1_limits[1] or
orthogonal1_limits[0] <= self.orthogonal1_limits[1] <= orthogonal1_limits[1]):
return 0
overlap_orthogonal1_length = \
min(orthogonal1_limits[1], self.orthogonal1_limits[1]) -\
max(orthogonal1_limits[0], self.orthogonal1_limits[0])
projections = dot(self.orthogonal2, box.box.T)
orthogonal2_limits = (min(projections), max(projections))
if not\
(self.orthogonal2_limits[0] <= orthogonal2_limits[0] <= self.orthogonal2_limits[1] or
self.orthogonal2_limits[0] <= orthogonal2_limits[1] <= self.orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[0] <= orthogonal2_limits[1] or
orthogonal2_limits[0] <= self.orthogonal2_limits[1] <= orthogonal2_limits[1]):
return 0
overlap_orthogonal2_length = \
min(orthogonal2_limits[1], self.orthogonal2_limits[1]) -\
max(orthogonal2_limits[0], self.orthogonal2_limits[0])
return overlap_main_length * overlap_orthogonal1_length * overlap_orthogonal2_length
def orthogonal_vector(vector, tol=1e-8):
a_vector = abs(vector)
if len(vector) == 3:
if a_vector[0] > tol:
orthogonal = vector[::-1] * (1, 0, -1)
elif a_vector[2] > tol:
orthogonal = vector[::-1] * (-1, 0, 1)
elif a_vector[1] > tol:
orthogonal = vector[::-1] * (-1, 0, 0)
else:
raise ValueError('vector must have non-null norm')
else:
if a_vector[0] > tol:
orthogonal = vector[::-1] * (-1, 1)
elif a_vector[1] > tol:
orthogonal = vector[::-1] * (1, -1)
else:
raise ValueError('vector must have non-null norm')
orthogonal /= norm(orthogonal)
return orthogonal
def box_cut(points, direction, mapped_points=None, max_main=None, min_main=None):
if mapped_points is None:
mapped_points = dot(direction, points.T)
if max_main is None:
max_main = mapped_points.max()
if min_main is None:
min_main = mapped_points.min()
mid_main = (max_main + min_main) / 2.
split1 = where(mapped_points <= mid_main)
split2 = where(mapped_points > mid_main)
return split1, split2
def all_obb_2d(points, vectors, labels, tol=1e-8, level=0, intersection_threshold=.8, split_threshold=.2, box=None):
if (box is not None) and (points is box.points) and (vectors is box.vectors) and (labels is box.labels):
box_center = box
box.level = level
else:
box_center = Box2D(points, vectors, labels, level)
level += 1
if len(unique(labels)) == 1:
return [box_center]
# First compute the splitting across the fibers
split_along_fiber = True
left, right = box_cut(points, box_center.orthogonal,
mapped_points=box_center.points_orthogonal)
labels_left = labels[left]
labels_right = labels[right]
if len(intersect1d(labels_left, labels_right)) >= len(unique(labels)) * intersection_threshold:
split_along_fiber = True
else:
points_left = points[left]
vectors_left = vectors[left]
box_left = Box2D(points_left, vectors_left, labels_left)
points_right = points[right]
vectors_right = vectors[right]
box_right = Box2D(points_left, vectors_left, labels_left)
if (box_left.volume + box_right.volume) < (1 - split_threshold) * box_center.volume:
split_along_fiber = False
left = all_obb_2d(
points_left, vectors_left, labels_left, tol=tol, level=level,
intersection_threshold=intersection_threshold, box=box_left)
right = all_obb_2d(
points_right, vectors_right, labels_right, tol=tol, level=level,
intersection_threshold=intersection_threshold, box=box_right)
else:
split_along_fiber = True
if split_along_fiber: # If we could not split across we split along
left, right = box_cut(
points, box_center.orientation, mapped_points=box_center.points_orientation)
labels_left = labels[left]
labels_right = labels[right]
if len(intersect1d(labels_left, labels_right)) <= len(unique(labels)) * intersection_threshold:
return [box_center]
points_left = points[left]
vectors_left = vectors[left]
left = all_obb_2d(
points_left, vectors_left, labels_left, tol=tol, level=level,
intersection_threshold=intersection_threshold)
points_right = points[right]
vectors_right = vectors[right]
right = all_obb_2d(
points_right, vectors_right, labels_right, tol=tol, level=level,
intersection_threshold=intersection_threshold)
box_center.left = left[0]
box_center.right = right[0]
left[0].parent = box_center
right[0].parent = box_center
return [box_center] + left + right
def all_obb_3d_along_tract(
points, vectors, labels, tol=1e-8, level=0,
intersection_threshold=.8, split_threshold=.2,
box=None, clean=False, point_ids=None
):
if point_ids is None:
point_ids = arange(len(points))
if (
(box is not None) and (points is box.points) and
(vectors is box.vectors) and (labels is box.labels)
):
box_center = box
box.level = level
else:
box_center = Box3D(points, point_ids, vectors, labels, level)
level += 1
if len(points) == 1:
return [box_center]
unique_labels = unique(labels)
left, right = box_cut(
points, box_center.orientation,
mapped_points=box_center.points_orientation
)
masks = {
'left': left,
'right': right
}
split_labels = {
'left': labels[left],
'right': labels[right]
}
labels_both = intersect1d(split_labels['left'], split_labels['right'])
if clean:
labels_count = bincount(labels)
labels_count = {
side: bincount(split_labels[side])
for side in split_labels
}
labels_ratio = {
side: nan_to_num(
labels_count[side] * 1. / labels_count[:len(labels_count(side))]
) for side in labels_count
}
new_results = [box_center]
if (
(len(labels_both) <= len(unique_labels) * intersection_threshold) and
(box_center.points_orientation.ptp() / 2. < min((norm(v) for v in vectors)))
):
return new_results
for side in ('left', 'right'):
mask = masks[side]
new_labels = split_labels[side]
if len(new_labels) > 0:
new_points = points[mask]
new_point_ids = point_ids[mask]
new_vectors = vectors[mask]
if clean:
clean_labels = in1d(
labels[side],
intersect1d(labels_both, (labels_ratio[side] > .2).nonzero()[0]),
)
new_points = new_points[clean_labels]
new_point_ids = new_point_ids[clean_labels]
new_vectors = new_vectors[clean_labels]
new_labels = new_labels[clean_labels]
if len(new_points) > 1:
new_tree = all_obb_3d_along_tract(
new_points, new_vectors, new_labels,
tol=tol, level=level, point_ids=new_point_ids,
intersection_threshold=intersection_threshold, clean=clean
)
setattr(box_center, side, new_tree[0])
getattr(box_center, side).parent = box_center
new_results += new_tree
return new_results
def all_obb_3d(points, vectors, labels, tol=1e-8, level=0, intersection_threshold=.8, split_threshold=.2, box=None, clean=False, point_ids=None):
if point_ids is None:
point_ids = arange(len(points))
if (
(box is not None) and (points is box.points) and
(vectors is box.vectors) and (labels is box.labels)
):
box_center = box
box.level = level
else:
box_center = Box3D(
points, point_ids, vectors,
labels, level
)
level += 1
if len(points) == 1:
return [box_center]
unique_labels = unique(labels)
for orientation in ('orthogonal1', 'orthogonal2', 'orientation'):
left, right = box_cut(
points, getattr(box_center, orientation),
mapped_points=getattr(box_center, 'points_' + orientation)
)
masks = {
'left': left,
'right': right
}
split_labels = {
'left': labels[left],
'right': labels[right]
}
labels_both = intersect1d(split_labels['left'], split_labels['right'])
if len(labels_both) == 0:
break
if clean:
labels_count = bincount(labels)
labels_count = {
side: bincount(split_labels[side])
for side in split_labels
}
labels_ratio = {
side: nan_to_num(
labels_count[side] * 1. / labels_count[:len(labels_count(side))]
) for side in labels_count
}
new_results = [box_center]
print(level)
if (
orientation == 'orientation' and
(len(labels_both) <= len(unique_labels) * intersection_threshold) # and
#(box_center.points_orientation.ptp() / 2. > min((norm(v) for v in vectors)))
):
return new_results
for side in ('left', 'right'):
mask = masks[side]
new_labels = split_labels[side]
if len(new_labels) > 0:
new_points = points[mask]
new_point_ids = point_ids[mask]
new_vectors = vectors[mask]
if clean:
clean_labels = in1d(
labels[side],
intersect1d(labels_both, (labels_ratio[side] > .2).nonzero()[0]),
)
new_points = new_points[clean_labels]
new_point_ids = new_point_ids[clean_labels]
new_vectors = new_vectors[clean_labels]
new_labels = new_labels[clean_labels]
if len(new_points) > 1:
new_tree = all_obb_3d(
new_points, new_vectors, new_labels, tol=tol, level=level, point_ids=new_point_ids,
intersection_threshold=intersection_threshold, clean=clean)
setattr(box_center, side, new_tree[0])
getattr(box_center, side).parent = box_center
new_results += new_tree
return new_results
def all_obb_3d_nr(points_, vectors_, labels_, tol=1e-8, level_=0, intersection_threshold=.8, split_threshold=.2, robustify=None, point_ids_=None):
if point_ids_ is None:
point_ids_ = arange(len(points_))
root = Box3D(points_, point_ids_, vectors_, labels_, level_, robustify=robustify)
stack = [root]
total_points = len(points_)
points_done = 0
while len(stack):
box = stack.pop()
level = box.level + 1
if len(box.points) == 1:
continue
unique_labels = unique(box.labels)
for orientation in ('orthogonal1', 'orthogonal2', 'orientation'):
left, right = box_cut(
box.points, getattr(box, orientation),
mapped_points=getattr(box, 'points_' + orientation)
)
masks = {
'left': left,
'right': right
}
split_labels = {
'left': box.labels[left],
'right': box.labels[right]
}
labels_both = intersect1d(split_labels['left'], split_labels['right'])
if len(labels_both) == 0:
break
print((level, len(unique_labels), len(box.points), total_points - points_done))
if (
orientation == 'orientation' and
(len(labels_both) <= len(unique_labels) * intersection_threshold) # and
#(box_center.points_orientation.ptp() / 2. > min((norm(v) for v in vectors)))
):
points_done += len(box.points)
continue
for side in ('left', 'right'):
mask = masks[side]
new_labels = split_labels[side]
if len(new_labels) > 0:
new_points = box.points[mask]
new_point_ids = box.point_ids[mask]
new_vectors = box.vectors[mask]
if len(new_points) > 1 and len(new_points) < len(box.points):
new_box = Box3D(new_points, new_point_ids, new_vectors, new_labels, level, robustify=robustify)
setattr(box, side, new_box)
getattr(box, side).parent = box
print(("\tAdded to stack ", side))
stack.append(new_box)
else:
points_done += len(new_points)
return root
def all_obb_3d_old(points, vectors, labels, tol=1e-8, level=0, intersection_threshold=.8, split_threshold=.2, box=None, point_ids=None):
if point_ids is None:
point_ids = arange(len(points))
if (box is not None) and (points is box.points) and (vectors is box.vectors) and (labels is box.labels):
box_center = box
box.level = level
else:
box_center = Box3D(points, point_ids, vectors, labels, level)
level += 1
if len(points) == 1:
return [box_center]
# First compute the splitting across the fibers
split_along_fiber = True
o1_left, o1_right = box_cut(
points, box_center.orthogonal1, mapped_points=box_center.points_orthogonal1)
o2_left, o2_right = box_cut(
points, box_center.orthogonal2, mapped_points=box_center.points_orthogonal2)
o1_labels_left = labels[o1_left]
o1_labels_right = labels[o1_right]
o2_labels_left = labels[o2_left]
o2_labels_right = labels[o2_right]
unique_labels = unique(labels)
if (
len(intersect1d(o1_labels_left, o1_labels_right)) > 0 and
len(intersect1d(o2_labels_left, o2_labels_right)) > 0
):
split_along_fiber = True
else:
o1_box_left = Box3D(points[o1_left], vectors[o1_left], o1_labels_left)
o1_box_right = Box3D(points[
o1_right], vectors[o1_right], o1_labels_right)
o2_box_left = Box3D(points[o2_left], vectors[o2_left], o2_labels_left)
o2_box_right = Box3D(points[
o2_right], vectors[o2_right], o2_labels_right)
if (o1_box_left.volume + o1_box_right.volume) < (o1_box_left.volume + o2_box_right.volume):
box_left = o1_box_left
box_right = o1_box_right
else:
box_left = o2_box_left
box_right = o2_box_right
if (box_left.volume + box_right.volume) < (1 - split_threshold) * box_center.volume:
split_along_fiber = False
left = all_obb_3d(
box_left.points, box_left.vectors, box_left.labels, tol=tol, level=level,
intersection_threshold=intersection_threshold, box=box_left)
right = all_obb_3d(
box_right.points, box_right.vectors, box_right.labels, tol=tol, level=level,
intersection_threshold=intersection_threshold, box=box_right)
else:
split_along_fiber = True
if split_along_fiber: # If we could not split across we split along
left, right = box_cut(
points, box_center.orientation, mapped_points=box_center.points_orientation)
labels_left = labels[left]
labels_right = labels[right]
if len(intersect1d(labels_left, labels_right)) <= len(unique_labels) * intersection_threshold:
return [box_center]
points_left = points[left]
point_ids_left = point_ids[left]
vectors_left = vectors[left]
left = all_obb_3d(
points_left, point_ids_left, vectors_left, labels_left, tol=tol, level=level,
intersection_threshold=intersection_threshold)
points_right = points[right]
point_ids_right = point_ids[left]
vectors_right = vectors[right]
right = all_obb_3d(
points_right, point_ids_right, vectors_right, labels_right, tol=tol, level=level,
intersection_threshold=intersection_threshold)
box_center.left = left[0]
box_center.right = right[0]
left[0].parent = box_center
right[0].parent = box_center
return [box_center] + left + right
def point_coverage_by_level(obbs, points):
level = 0
points_level = [obb.points for obb in obbs if obb.level == level]
level_coverage = []
while len(points_level) > 0:
level_coverage.append(sum((len(
points) for points in points_level)) * 1. / len(points))
level += 1
points_level = [obb.points for obb in obbs if obb.level == level if len(
obb.points) > 0]
return array(level_coverage)
def draw_boxes_2d(obbs, level, color=None, **args):
from pylab import plot, cm
for i, obb in enumerate(obbs):
if obb.level != level:
continue
box = vstack([obb.box, obb.box[0]])
if color is None:
plot(box.T[0], box.T[1], lw=5, hold=True, **args)
else:
plot(box.T[0], box.T[
1], lw=5, hold=True, c=cm.jet(color[i]), **args)
def draw_box_2d(obbs, **args):
from pylab import plot, quiver
if isinstance(obbs, Box2D):
obbs = [obbs]
for obb in obbs:
box = vstack([obb.box, obb.box[0]])
plot(box.T[0], box.T[1], lw=5, hold=True, **args)
quiver([obb.center[0]], [obb.center[1]], [obb.orientation[
0]], [obb.orientation[1]], pivot='middle', hold=True, **args)
def draw_box_3d(obbs, tube_radius=1, color=None, **kwargs):
from mayavi.mlab import plot3d
from numpy.random import rand
if isinstance(obbs, Box2D):
obbs = [obbs]
for obb in obbs:
if color is None:
color_ = tuple(rand(3))
else:
color_ = color
box = obb.box
b1 = vstack([box[:4], box[0]]).T
b2 = vstack([box[4:], box[4]]).T
es = [vstack([b1.T[i], b2.T[i]]).T for i in range(4)]
plot3d(b1[0], b1[1], b1[
2], tube_radius=tube_radius, color=color_, **kwargs)
plot3d(b2[0], b2[1], b2[
2], tube_radius=tube_radius, color=color_, **kwargs)
[plot3d(e[0], e[1], e[
2], tube_radius=tube_radius, color=color_, **kwargs) for e in es]
def oriented_trace(obb, positive=True, generations=2, angle_threshold=pi / 4):
tract = [obb]
center = obb
candidates = center.siblings(generations)
if positive:
sg = 1
else:
sg = -1
while len(candidates) > 0:
next_candidate_distance = inf
for c in candidates:
signed_distance = sg *\
sign(center.center_signed_orientational_distance(c)) *\
center.center_distance(c)
if (signed_distance <= 0) or\
not center.overlap_orthogonal(c) or\
arccos(dot(center.orientation, c.orientation)) > angle_threshold:
continue
if signed_distance < next_candidate_distance:
next_candidate_distance = signed_distance
next_candidate = c
if next_candidate_distance < inf:
if next_candidate in tract:
break
tract.append(next_candidate)
if dot(center.orientation, next_candidate.orientation) < 0:
next_candidate.swap_direction()
center = next_candidate
candidates = center.siblings(generations)
else:
break
return tract
def trace(obb, generations=2, angle_threshold=pi / 4):
trace_positive = oriented_trace(obb, True, generations, angle_threshold)
trace_negative = oriented_trace(obb, False, generations, angle_threshold)
return trace_negative[::-1] + trace_positive
def get_most_probable_trace(obbs, generations=2, angle_threshold=pi / 4, return_all=True):
traces_list = [trace(obb, generations=generations,
angle_threshold=angle_threshold) for obb in obbs]
traces_w_set = [(t, set(t)) for t in traces_list]
n = 1. * len(traces_w_set)
traces_with_frequency = []
while len(traces_w_set) > 0:
trace_ = traces_w_set.pop()
traces_w_set_new = []
count = 1
for t in traces_w_set:
if t[1] == trace_[1]:
count += 1
else:
traces_w_set_new.append(t)
traces_with_frequency.append((count / n, trace_[0]))
traces_w_set = traces_w_set_new
traces_with_frequency.sort(cmp=lambda x, y: int(sign(y[0] - x[0])))
return traces_with_frequency
def get_level(tree, level):
if tree is None or tree.level > level:
return []
elif tree.level == level:
return [tree]
else:
return get_level(tree.left, level) + get_level(tree.right, level)
def overlapping_boxes(tree, box, levels=None, threshold=0.):
if tree is None:
return []
overlap = tree.overlap_volume(box)
if overlap < threshold:
return []
else:
left = overlapping_boxes(
tree.left, box, levels=levels, threshold=threshold)
right = overlapping_boxes(
tree.right, box, levels=levels, threshold=threshold)
if levels is None or tree.level in levels:
return [tree] + left + right
else:
return left + right
def containing_boxes(tree, box, levels=None, threshold=1.):
if tree is None or tree.level > max(levels):
return []
normalized_overlap = tree.overlap_volume(box) / box.volume
if normalized_overlap < threshold:
return []
else:
left = overlapping_boxes(
tree.left, box, levels=levels, threshold=threshold)
right = overlapping_boxes(
tree.right, box, levels=levels, threshold=threshold)
if levels is None or tree.level in levels:
return [tree] + left + right
else:
return left + right
def min_max(vector, axis=None):
return array((vector.min(axis), vector.max(axis)))
def overlap_vtk(self, box):
a = self
b = box
axes_a = vstack((a.orientation, a.orthogonal1, a.orthogonal2))
axes_b = vstack((b.orientation, b.orthogonal1, b.orthogonal2))
a2b = b.center - a.center
a_a2b_limits = min_max(dot(a2b, a.box.T))
b_a2b_limits = min_max(dot(a2b, a.box.T))
if (
a_a2b_limits[0] < b_a2b_limits[1] or
b_a2b_limits[1] < a_a2b_limits[0]
):
return False
def obb_tree_dfs(obb_tree):
for obb in obb_tree:
if obb.level == 0:
root = obb
break
else:
raise ValueError('No root in the tree')
return obb_tree_dfs_recursive(root)
def obb_tree_dfs_recursive(obb_node):
if obb_node is None:
return []
if obb_node.left is None and obb_node.right is None:
return [obb_node]
return obb_tree_dfs_recursive(obb_node.left) + obb_tree_dfs_recursive(obb_node.right)
def prototype_tract(
tracts, obb_tree=None, intersection_threshold=.01, minimum_level=0,
clean=False, return_obb_tree=False, return_leave_centers=False
):
if obb_tree is None:
points = vstack([t[:-1] for t in tracts])
vectors = vstack([t[1:] - t[:-1] for t in tracts])
labels = hstack([repeat(i, len(t) - 1) for i, t in enumerate(tracts)])
obb_tree = all_obb_3d_along_tract(
points, vectors, labels,
intersection_threshold=intersection_threshold, clean=clean
)
if minimum_level < 0:
max_level = max((obb.level for obb in obb_tree))
minimum_level = max_level + 1 - minimum_level
leave_centers = array(
[obb.center for obb in obb_tree if obb.left is None and obb.right is None and obb.level >
minimum_level]
)
mse_tract = array([
((t[..., None] - leave_centers[..., None].T) ** 2).sum(1).min(0).sum()
for t in tracts
])
tract_index = mse_tract.argmin()
if return_obb_tree or return_leave_centers:
res = (tract_index,)
if return_obb_tree:
res += (obb_tree,)
if return_leave_centers:
res += (leave_centers,)
return res
else:
return tract_index
def obb_tree_level(obb_tree, level, include_superior_leaves=True):
if not isinstance(obb_tree, Box3D):
node = obb_tree[0]
for n in obb_tree:
if n.level < node.level:
node = n
else:
node = obb_tree
return obb_tree_level_dfs(node, level, include_superior_leaves=include_superior_leaves)
def obb_tree_level_dfs(obb_node, level, include_superior_leaves=True):
if obb_node is None or obb_node.level > level:
return []
if (
obb_node.level == level or
(
include_superior_leaves and
obb_node.level < level and
obb_node.left is None and obb_node.right is None
)
):
return [obb_node]
return (
obb_tree_level_dfs(obb_node.left, level, include_superior_leaves=include_superior_leaves) +
obb_tree_level_dfs(
obb_node.right, level, include_superior_leaves=include_superior_leaves)
)
def obb_from_tractography(tractography, *args, **kwargs):
along_tract = False
if 'along_tract' in kwargs and kwargs['along_tract']:
along_tract = True
fibers = tractography.tracts()
points = vstack([f[:-1] for f in fibers])
vectors = vstack([f[1:] - f[:-1] for f in fibers])
labels = hstack([repeat(i, len(f) - 1) for i, f in enumerate(fibers)])
if along_tract:
obbs3d = all_obb_3d_along_tract(
points, vectors, labels, **kwargs
)
else:
obbs3d = all_obb_3d_nr(
points, vectors, labels, **kwargs
)
return obbs3d
|
demianw/tract_querier
|
tract_querier/tract_math/tract_obb.py
|
Python
|
bsd-3-clause
| 46,294
|
[
"Mayavi"
] |
d68c74330a80c4c8d5d21415a4f2523bf8fe3222a8da641992e1ce4040c52df0
|
#!/usr/bin/python
import os, sys # low level handling, such as command line stuff
import string # string methods available
import re # regular expressions
import getopt # comand line argument handling
from low import * # custom functions, written by myself
import anydbm # index databases (file hash)
from Bio import SeqIO # biopython stuff, to parse fasta files for instance
# =============================================================================
def show_help( ):
""" displays the program parameter list and usage information """
stdout( "usage: " + sys.argv[0] + " -c <path> -o <path>" )
stdout( " " )
stdout( " option description" )
stdout( " -h help (this text here)" )
stdout( " -f blast.out file" )
stdout( " " )
sys.exit(1)
# =============================================================================
def handle_arguments():
""" verifies the presence of all necessary arguments and returns the data dir """
if len ( sys.argv ) == 1:
stderr( "no arguments provided." )
show_help()
try: # check for the right arguments
keys, values = getopt.getopt( sys.argv[1:], "hf:" )
except getopt.GetoptError:
stderr( "invalid arguments provided." )
show_help()
args = {}
for key, value in keys:
if key == '-f': args['file'] = value
if not args.has_key('file'):
stderr( "blast.out file missing." )
show_help()
if not file_exists( args.get('file') ):
stderr( "blast.out file does not exist." )
show_help()
return args
# =============================================================================
def parse_descr( text ):
hash = {}
if not re.search("GO:\d+.*evidence", text):
sys.stderr.write("return None.\n")
return hash
for match in re.finditer( '(GO:\d+)\s*\"([^"]+)\"\s*evidence', text ):
id = match.group(1)
description = match.group(2)
hash[ id ] = description
return hash
# =============================================================================
# =============================================================================
def main( args ):
fo = open( args.get('file') )
descr_index = None
for line in fo:
line = line.rstrip()
cols = line.split("\t")
if descr_index == None:
for index, col in enumerate(cols):
if re.search("GO:\d+", col):
descr_index = index
break
descr = cols[ descr_index ]
go_hash = parse_descr( descr )
for goterm, godescr in go_hash.iteritems():
L = []
for index, col in enumerate(cols):
if index == descr_index:
L.append(goterm)
L.append(godescr)
else:
L.append(col)
print string.join(L,"\t")
fo.close()
# =============================================================================
# === MAIN ====================================================================
# =============================================================================
args = handle_arguments( )
main( args )
|
lotharwissler/bioinformatics
|
python/geneontology/go-from-blastout.py
|
Python
|
mit
| 2,996
|
[
"BLAST",
"Biopython"
] |
01e8d67b42890c5d7520479c1c24dc7a8f45fa509e0d1c76281912a3aa6a7397
|
"""\
PGBF.py Perform basic operations over primitive
gaussian basis functions. The equations herein are based upon
'Gaussian Expansion Methods for Molecular Orbitals.' H. Taketa,
S. Huzinaga, and K. O-ohata. H. Phys. Soc. Japan, 21, 2313, 1966.
[THO paper].
For the purposes of this routine, a gaussian is defined as:
g(x,y,z) = A*(x^i)*(y^j)*(z^k)*exp{-a*(r-ro)^2}
The functions defined are:
overlap(g'): Compute the overlap matrix element of g with g': Int(g*g')
kinetic(g'): Compute the kinetic energy matrix element
between g and g' = Int(G*lapl(G')), where lapl is the Laplacian.
nuclear(g',r): Compute the nuclear attraction integral
Int(g*(1/r)*g'). Only programmed for 1s gaussians.
coulomb(g,g',g'',g'''): Compute the two-electron colombic repulsion
integral Int(g(1)g'(1)(1/r12)g''(2)g'''(2)).
This program is part of the PyQuante quantum chemistry program suite.
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
from math import sqrt,pi,pow,exp
from NumWrap import array
from PyQuante.cints import kinetic,overlap,nuclear_attraction,fact2,dist2
from PyQuante.cints import binomial, three_center_1D
#from PyQuante.Defaults import coulomb_repulsion
#added 2/8/07 by Hatem Helal hhh23@cam.ac.uk
#probably need to write the C version of grad_nuc_att in cints...
from PyQuante.pyints import grad_nuc_att
from primitive_gto import PrimitiveGTO
class PGBF(PrimitiveGTO):
"Class for Primitive Gaussian Basis Functions."
# Constructor
def __init__(self,exponent,origin,powers=(0,0,0),norm=1.):
PrimitiveGTO.__init__(self, exponent, origin, powers)
self.exp = float(exponent)
self.origin = tuple([float(i) for i in origin])
self.powers= powers
# It is yet normalized
#self.norm = float(norm)
#self.normalize()
self.coef = 1
# Public
def reset_powers(self,px,py,pz):
# Addition of the Cython PrimitiveGTO function killed the ability to do
# normalization, which means that resetting the powers is no longer
# reliable, and should be replaced with simply creating a new function.
raise Exception("Warning: reset powers no longer reliable")
self.powers = (px,py,pz)
return
def overlap(self,other):
"Compute overlap element with another PGBF"
return self.norm*other.norm*\
overlap(self.exp,self.powers,self.origin,
other.exp,other.powers,other.origin)
def kinetic(self,other):
"Overlap between two gaussians. THO eq. 2.14."
return self.norm*other.norm*\
kinetic(self.exp,self.powers,self.origin,
other.exp,other.powers,other.origin)
def multipole(self,other,i,j,k):
from pyints import multipole_ints
return self.norm*other.norm*\
multipole_ints((i,j,k),
self.exp,self.powers,self.origin,
other.exp,other.powers,other.origin)
#Need to rewrite this to:
# 1. pull the norm constants out front to be consistent
# with overlap() and kinetic()
# 2. reorder the arguments to be in the same order as overlap()
# and kinetic()
def nuclear(self,other,C):
"THO eq. 2.17 and 3.1"
return nuclear_attraction(self.origin,self.norm,
self.powers,self.exp,
other.origin,other.norm,
other.powers,other.exp,
C)
def nuclear_gradient(self,ia,ib,ic,other,C):
return self.norm*other.norm*\
array(grad_nuc_att(ia,ib,ic,self.origin,self.powers,self.exp,
other.origin,other.powers,other.exp,
C))
def amp(self,x,y,z):
"Compute the amplitude of the PGBF at point x,y,z"
i,j,k = self.powers
x0,y0,z0 = self.origin
return self.norm*self.coef*\
pow(x-x0,i)*pow(y-y0,j)*pow(z-z0,k)*\
exp(-self.exp*dist2((x,y,z),(x0,y0,z0)))
def move_center(self,dx,dy,dz):
self.origin = (self.origin[0]+dx,self.origin[1]+dy,self.origin[2]+dz)
return
# Private
def normalize(self):
"Normalize basis function. From THO eq. 2.2"
l,m,n = self.powers
alpha = self.exp
self.norm = sqrt(pow(2,2*(l+m+n)+1.5)*
pow(alpha,l+m+n+1.5)/
fact2(2*l-1)/fact2(2*m-1)/
fact2(2*n-1)/pow(pi,1.5))
# This is a hack to allow zero-valued exponents, for testing
if abs(alpha) < 1e-8: self.norm = 1.
return
# Other overloads
def __str__(self):
return "PGBF(%.2f," % self.exp +\
"(%.2f,%.2f,%.2f)," % self.origin +\
"(%d,%d,%d)," % self.powers +\
"%.2f)" % self.norm
def prim_str(self,topnorm=1):
return " <prim exp=\"%6.4f\" coeff=\"%6.4f\" ncoeff=\"%6.4f\"/>\n" \
% (self.exp,self.coef,topnorm*self.norm*self.coef)
def laplacian(self,pos):
amp = self.amp(pos[0],pos[1],pos[2])
alpha = self.exp
x = pos[0]-self.origin[0]
y = pos[1]-self.origin[1]
z = pos[2]-self.origin[2]
x2 = x*x
y2 = y*y
z2 = z*z
r2 = x2+y2+z2
L,M,N = self.powers
term = (L*(L-1)/x2 + M*(M-1)/y2 + N*(N-1)/z2) +\
4*alpha*alpha*r2 - 2*alpha*(2*(L+M+N)+3)
return self.norm*self.coef*amp*term
def grad_old(self,pos):
amp = self.amp(pos[0],pos[1],pos[2])
alpha = self.exp
L,M,N = self.powers
x = pos[0]-self.origin[0]
y = pos[1]-self.origin[1]
z = pos[2]-self.origin[2]
val = array([L/x - 2*x*alpha,M/y - 2*y*alpha,N/z-2*z*alpha])
return self.norm*self.coef*val*amp
def grad(self,x,y,z):
alpha = self.exp
I,J,K = self.powers
C = self.norm*self.coef
x0,y0,z0 = self.origin
fx = pow(x-x0,I)*exp(-alpha*pow(x-x0,2))
fy = pow(y-y0,J)*exp(-alpha*pow(y-y0,2))
fz = pow(z-z0,K)*exp(-alpha*pow(z-z0,2))
gx = -2*alpha*(x-x0)*fx
gy = -2*alpha*(y-y0)*fy
gz = -2*alpha*(z-z0)*fz
if I > 0: gx += pow(x-x0,I-1)*exp(-alpha*pow(x-x0,2))
if J > 0: gy += pow(y-y0,J-1)*exp(-alpha*pow(y-y0,2))
if K > 0: gz += pow(z-z0,K-1)*exp(-alpha*pow(z-z0,2))
return array([C*gx*fy*fz,C*fx*gy*fz,C*fx*fy*gz])
# Friend functions
def coulomb(gA,gB,gC,gD):
"""Coulomb interaction between four cartesian Gaussians; THO eq. 2.22"""
from PyQuante.cints import coulomb_repulsion
return coulomb_repulsion(gA.origin,gA.norm,gA.powers,
gA.exp,gB.origin,gB.norm,
gB.powers,gB.exp,gC.origin,
gC.norm,gC.powers,gC.exp,
gD.origin,gD.norm,gD.powers,
gD.exp)
def three_center(gA,gB,gC):
"Three-center integral between Gaussians"
na = gA.norm
nb = gB.norm
nc = gC.norm
ix = three_center_1D(gA.origin[0],gA.powers[0],gA.exp,
gB.origin[0],gB.powers[0],gB.exp,
gC.origin[0],gC.powers[0],gC.exp)
iy = three_center_1D(gA.origin[1],gA.powers[1],gA.exp,
gB.origin[1],gB.powers[1],gB.exp,
gC.origin[1],gC.powers[1],gC.exp)
iz = three_center_1D(gA.origin[2],gA.powers[2],gA.exp,
gB.origin[2],gB.powers[2],gB.exp,
gC.origin[2],gC.powers[2],gC.exp)
return na*nb*nc*ix*iy*iz
def test_3cent():
gA = PGBF(1,(0,0,0))
gB = PGBF(1,(1,0,0))
gC = PGBF(2.,(0,0,0))
gD = PGBF(1,(0,0,0),(1,0,0))
# Here we construct a "fake" function that has zero exponent, so that
# the three-center integrals that follow will be the same as a
# normal overlap integral
g0 = PGBF(0,(0,0,0))
for a,b in [(gA,gA),(gB,gB),(gA,gB),(gC,gC),(gA,gC),
(gD,gD),(gA,gD),(gB,gD)]: tester(a,b)
def tester(gA,gB):
# insure that the overlap integrals <ga|gb> and <gb|ga> are equal
# to the various three center integrals <ga|gb|g0> and its
# various permutations
g0 = PGBF(0,(0,0,0))
olab = gA.overlap(gB)
olba = gB.overlap(gA)
tcab0 = three_center(gA,gB,g0)
tcba0 = three_center(gB,gA,g0)
tca0b = three_center(gA,g0,gB)
tcb0a = three_center(gB,g0,gA)
tc0ab = three_center(g0,gA,gB)
tc0ba = three_center(g0,gB,gA)
diff = 0
vals = [olab,olba,tcab0,tcba0,tca0b,tcb0a,tc0ab,tc0ba]
for i in xrange(len(vals)):
for j in xrange(i):
diff = max(diff,abs(vals[i]-vals[j]))
print "For ints %s %s max diff is %f" % (gA,gB,diff)
if __name__ == '__main__': test_3cent()
|
berquist/PyQuante
|
PyQuante/PGBF.py
|
Python
|
bsd-3-clause
| 9,122
|
[
"Gaussian"
] |
dd0364858cf75e6112e9f246ccaad71a682e90e043f09953dd293f2650fa594a
|
# Storage filtering classes
#
# Copyright (C) 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Chris Lumens <clumens@redhat.com>
#
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from collections import namedtuple
from blivet import arch
from blivet.devices import DASDDevice, FcoeDiskDevice, iScsiDiskDevice, MultipathDevice, ZFCPDiskDevice
from blivet.fcoe import has_fcoe
from pyanaconda.flags import flags
from pyanaconda.i18n import CN_, CP_
from pyanaconda.storage_utils import try_populate_devicetree, on_disk_storage
from pyanaconda.ui.lib.disks import getDisks
from pyanaconda.ui.gui.utils import timed_action
from pyanaconda.ui.gui.spokes import NormalSpoke
from pyanaconda.ui.gui.spokes.advstorage.fcoe import FCoEDialog
from pyanaconda.ui.gui.spokes.advstorage.iscsi import ISCSIDialog
from pyanaconda.ui.gui.spokes.advstorage.zfcp import ZFCPDialog
from pyanaconda.ui.gui.spokes.advstorage.dasd import DASDDialog
from pyanaconda.ui.gui.spokes.lib.cart import SelectedDisksDialog
from pyanaconda.ui.categories.system import SystemCategory
__all__ = ["FilterSpoke"]
DiskStoreRow = namedtuple("DiskStoreRow", ["visible", "selected", "mutable",
"name", "type", "model", "capacity",
"vendor", "interconnect", "serial",
"wwid", "paths", "port", "target",
"lun", "ccw", "wwpn"])
class FilterPage(object):
"""A FilterPage is the logic behind one of the notebook tabs on the filter
UI spoke. Each page has its own specific filtered model overlaid on top
of a common model that holds all non-advanced disks.
A Page is created once, when the filter spoke is initialized. It is
setup multiple times - each time the spoke is revisited. When the Page
is setup, it is given a complete view of all disks that belong on this
Page. This is because certain pages may require populating a combo with
all vendor names, or other similar tasks.
This class is just a base class. One subclass should be created for each
more specialized type of page. Only one instance of each subclass should
ever be created.
"""
def __init__(self, storage, builder):
"""Create a new FilterPage instance.
Instance attributes:
builder -- A reference to the Gtk.Builder instance containing
this page's UI elements.
filterActive -- Whether the user has chosen to filter results down
on this page. If set, visible_func should take the
filter UI elements into account.
storage -- An instance of a blivet object.
"""
self.builder = builder
self.storage = storage
self.model = None
self.filterActive = False
def ismember(self, device):
"""Does device belong on this page? This function should taken into
account what kind of thing device is. It should not be concerned
with any sort of filtering settings. It only determines whether
device belongs.
"""
return True
def setup(self, store, selectedNames, disks):
"""Do whatever setup of the UI is necessary before this page can be
displayed. This function is called every time the filter spoke
is revisited, and thus must first do any cleanup that is necessary.
The setup function is passed a reference to the master store, a list
of names of disks the user has selected (either from a previous visit
or via kickstart), and a list of all disk objects that belong on this
page as determined from the ismember method.
At the least, this method should add all the disks to the store. It
may also need to populate combos and other lists as appropriate.
"""
pass
def clear(self):
"""Blank out any filtering-related fields on this page and return them
to their defaults. This is called when the Clear button is clicked.
"""
pass
def visible_func(self, model, itr, *args):
"""This method is called for every row (disk) in the store, in order to
determine if it should be displayed on this page or not. This method
should take into account whether filterActive is set, perhaps whether
something in pyanaconda.flags is setup, and other settings to make
a final decision. Because filtering can be complicated, many pages
will want to farm this decision out to another method.
The return value is a boolean indicating whether the row is visible
or not.
"""
return True
def setupCombo(self, combo, items):
"""Populate a given GtkComboBoxText instance with a list of items. The
combo will first be cleared, so this method is suitable for calling
repeatedly. The first item in the list will be empty to allow the
combo box criterion to be cleared. The first non-empty item in the
list will be selected by default.
"""
combo.remove_all()
combo.append_text('')
for i in sorted(set(items)):
combo.append_text(i)
if items:
combo.set_active(1)
def _long_identifier(self, disk):
# For iSCSI devices, we want the long ip-address:port-iscsi-tgtname-lun-XX
# identifier, but blivet doesn't expose that in any useful way and I don't
# want to go asking udev. Instead, we dig around in the deviceLinks and
# default to the name if we can't figure anything else out.
for link in disk.deviceLinks:
if "by-path" in link:
lastSlash = link.rindex("/")+1
return link[lastSlash:]
return disk.name
class SearchPage(FilterPage):
# Match these to searchTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_PORT_TARGET_LUN = 'PTL'
SEARCH_TYPE_WWID = 'WWID'
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("searchModel")
self.model.set_visible_func(self.visible_func)
self._lunEntry = self.builder.get_object("searchLUNEntry")
self._wwidEntry = self.builder.get_object("searchWWIDEntry")
self._combo = self.builder.get_object("searchTypeCombo")
self._portCombo = self.builder.get_object("searchPortCombo")
self._targetEntry = self.builder.get_object("searchTargetEntry")
def setup(self, store, selectedNames, disks):
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
ports = []
for disk in disks:
if hasattr(disk, "node"):
ports.append(str(disk.node.port))
self.setupCombo(self._portCombo, ports)
def clear(self):
self._lunEntry.set_text("")
self._portCombo.set_active(0)
self._targetEntry.set_text("")
self._wwidEntry.set_text("")
def _port_equal(self, device):
active = self._portCombo.get_active_text()
if active:
if hasattr(device, "node"):
return device.node.port == int(active)
else:
return False
else:
return True
def _target_equal(self, device):
active = self._targetEntry.get_text().strip()
if active:
return active in getattr(device, "initiator", "")
else:
return True
def _lun_equal(self, device):
active = self._lunEntry.get_text().strip()
if active:
if hasattr(device, "node"):
try:
return int(active) == device.node.tpgt
except ValueError:
return False
elif hasattr(device, "fcp_lun"):
return active in device.fcp_lun
else:
return True
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_PORT_TARGET_LUN:
return self._port_equal(device) and self._target_equal(device) and self._lun_equal(device)
elif filterBy == self.SEARCH_TYPE_WWID:
return self._wwidEntry.get_text() in getattr(device, "wwid", self._long_identifier(device))
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self._filter_func(device)
class MultipathPage(FilterPage):
# Match these to multipathTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_VENDOR = 'Vendor'
SEARCH_TYPE_INTERCONNECT = 'Interconnect'
SEARCH_TYPE_WWID = 'WWID'
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("multipathModel")
self.model.set_visible_func(self.visible_func)
self._combo = self.builder.get_object("multipathTypeCombo")
self._icCombo = self.builder.get_object("multipathInterconnectCombo")
self._vendorCombo = self.builder.get_object("multipathVendorCombo")
self._wwidEntry = self.builder.get_object("multipathWWIDEntry")
def ismember(self, device):
return isinstance(device, MultipathDevice)
def setup(self, store, selectedNames, disks):
vendors = []
interconnects = []
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial,
disk.wwid, "\n".join(paths), "", "",
"", "", ""])
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
self.setupCombo(self._vendorCombo, vendors)
self.setupCombo(self._icCombo, interconnects)
def clear(self):
self._icCombo.set_active(0)
self._vendorCombo.set_active(0)
self._wwidEntry.set_text("")
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_VENDOR:
return device.vendor == self._vendorCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_INTERCONNECT:
return device.bus == self._icCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_WWID:
return self._wwidEntry.get_text() in device.wwid
def visible_func(self, model, itr, *args):
if not flags.mpath:
return False
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class OtherPage(FilterPage):
# Match these to otherTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_VENDOR = 'Vendor'
SEARCH_TYPE_INTERCONNECT = 'Interconnect'
SEARCH_TYPE_ID = 'ID'
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("otherModel")
self.model.set_visible_func(self.visible_func)
self._combo = self.builder.get_object("otherTypeCombo")
self._icCombo = self.builder.get_object("otherInterconnectCombo")
self._idEntry = self.builder.get_object("otherIDEntry")
self._vendorCombo = self.builder.get_object("otherVendorCombo")
def ismember(self, device):
return isinstance(device, iScsiDiskDevice) or isinstance(device, FcoeDiskDevice)
def setup(self, store, selectedNames, disks):
vendors = []
interconnects = []
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
if hasattr(disk, "node"):
port = str(disk.node.port)
lun = str(disk.node.tpgt)
else:
port = ""
lun = ""
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial,
self._long_identifier(disk), "\n".join(paths), port, getattr(disk, "initiator", ""),
lun, "", ""])
if not disk.vendor in vendors:
vendors.append(disk.vendor)
if not disk.bus in interconnects:
interconnects.append(disk.bus)
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
self.setupCombo(self._vendorCombo, vendors)
self.setupCombo(self._icCombo, interconnects)
def clear(self):
self._icCombo.set_active(0)
self._idEntry.set_text("")
self._vendorCombo.set_active(0)
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_VENDOR:
return device.vendor == self._vendorCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_INTERCONNECT:
return device.bus == self._icCombo.get_active_text()
elif filterBy == self.SEARCH_TYPE_ID:
for link in device.deviceLinks:
if "by-path" in link:
return self._idEntry.get_text().strip() in link
return False
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class ZPage(FilterPage):
# Match these to zTypeCombo ids in glade
SEARCH_TYPE_NONE = 'None'
SEARCH_TYPE_CCW = 'CCW'
SEARCH_TYPE_WWPN = 'WWPN'
SEARCH_TYPE_LUN = 'LUN'
def __init__(self, storage, builder):
FilterPage.__init__(self, storage, builder)
self.model = self.builder.get_object("zModel")
self.model.set_visible_func(self.visible_func)
self._ccwEntry = self.builder.get_object("zCCWEntry")
self._wwpnEntry = self.builder.get_object("zWWPNEntry")
self._lunEntry = self.builder.get_object("zLUNEntry")
self._combo = self.builder.get_object("zTypeCombo")
self._isS390 = arch.isS390()
def clear(self):
self._lunEntry.set_text("")
self._ccwEntry.set_text("")
self._wwpnEntry.set_text("")
def ismember(self, device):
return isinstance(device, ZFCPDiskDevice) or isinstance(device, DASDDevice)
def setup(self, store, selectedNames, disks):
""" Set up our Z-page, but only if we're running on s390x. """
if not self._isS390:
return
else:
ccws = []
wwpns = []
luns = []
self._combo.set_active_id(self.SEARCH_TYPE_NONE)
self._combo.emit("changed")
for disk in disks:
paths = [d.name for d in disk.parents]
selected = disk.name in selectedNames
if getattr(disk, "type") == "zfcp":
# remember to store all of the zfcp-related junk so we can
# see it in the UI
if not disk.fcp_lun in luns:
luns.append(disk.fcp_lun)
if not disk.wwpn in wwpns:
wwpns.append(disk.wwpn)
if not disk.hba_id in ccws:
ccws.append(disk.hba_id)
# now add it to our store
store.append([True, selected, not disk.protected,
disk.name, "", disk.model, str(disk.size),
disk.vendor, disk.bus, disk.serial, "", "\n".join(paths),
"", "", disk.fcp_lun, disk.hba_id, disk.wwpn])
def _filter_func(self, device):
if not self.filterActive:
return True
filterBy = self._combo.get_active_id()
if filterBy == self.SEARCH_TYPE_NONE:
return True
elif filterBy == self.SEARCH_TYPE_CCW:
return self._ccwEntry.get_text() in device.hba_id
elif filterBy == self.SEARCH_TYPE_WWPN:
return self._wwpnEntry.get_text() in device.wwpn
elif filterBy == self.SEARCH_TYPE_LUN:
return self._lunEntry.get_text() in device.fcp_lun
return False
def visible_func(self, model, itr, *args):
obj = DiskStoreRow(*model[itr])
device = self.storage.devicetree.getDeviceByName(obj.name, hidden=True)
return self.ismember(device) and self._filter_func(device)
class FilterSpoke(NormalSpoke):
"""
.. inheritance-diagram:: FilterSpoke
:parts: 3
"""
builderObjects = ["diskStore", "filterWindow",
"searchModel", "multipathModel", "otherModel", "zModel"]
mainWidgetName = "filterWindow"
uiFile = "spokes/filter.glade"
helpFile = "FilterSpoke.xml"
category = SystemCategory
title = CN_("GUI|Spoke", "_INSTALLATION DESTINATION")
def __init__(self, *args):
NormalSpoke.__init__(self, *args)
self.applyOnSkip = True
self.ancestors = []
self.disks = []
self.selected_disks = []
@property
def indirect(self):
return True
# This spoke has no status since it's not in a hub
@property
def status(self):
return None
def apply(self):
onlyuse = self.selected_disks[:]
for disk in [d for d in self.storage.disks if d.name in onlyuse]:
onlyuse.extend([d.name for d in disk.ancestors
if d.name not in onlyuse])
self.data.ignoredisk.onlyuse = onlyuse
self.data.clearpart.drives = self.selected_disks[:]
# some disks may have been added in this spoke, we need to recreate the
# snapshot of on-disk storage
if on_disk_storage.created:
on_disk_storage.dispose_snapshot()
on_disk_storage.create_snapshot(self.storage)
def initialize(self):
NormalSpoke.initialize(self)
self.pages = [SearchPage(self.storage, self.builder),
MultipathPage(self.storage, self.builder),
OtherPage(self.storage, self.builder),
ZPage(self.storage, self.builder)]
self._notebook = self.builder.get_object("advancedNotebook")
if not arch.isS390():
self._notebook.remove_page(-1)
self.builder.get_object("addZFCPButton").destroy()
self.builder.get_object("addDASDButton").destroy()
if not has_fcoe():
self.builder.get_object("addFCOEButton").destroy()
self._store = self.builder.get_object("diskStore")
self._addDisksButton = self.builder.get_object("addDisksButton")
def _real_ancestors(self, disk):
# Return a list of all the ancestors of a disk, but remove the disk
# itself from this list.
return [d for d in disk.ancestors if d.name != disk.name]
def refresh(self):
NormalSpoke.refresh(self)
self.disks = getDisks(self.storage.devicetree)
self.selected_disks = self.data.ignoredisk.onlyuse[:]
self.ancestors = [d.name for disk in self.disks for d in self._real_ancestors(disk)]
self._store.clear()
allDisks = []
multipathDisks = []
otherDisks = []
zDisks = []
# Now all all the non-local disks to the store. Everything has been set up
# ahead of time, so there's no need to configure anything. We first make
# these lists of disks, then call setup on each individual page. This is
# because there could be page-specific setup to do that requires a complete
# view of all the disks on that page.
for disk in self.disks:
if self.pages[1].ismember(disk):
multipathDisks.append(disk)
elif self.pages[2].ismember(disk):
otherDisks.append(disk)
elif self.pages[3].ismember(disk):
zDisks.append(disk)
allDisks.append(disk)
self.pages[0].setup(self._store, self.selected_disks, allDisks)
self.pages[1].setup(self._store, self.selected_disks, multipathDisks)
self.pages[2].setup(self._store, self.selected_disks, otherDisks)
self.pages[3].setup(self._store, self.selected_disks, zDisks)
self._update_summary()
def _update_summary(self):
summaryButton = self.builder.get_object("summary_button")
label = self.builder.get_object("summary_button_label")
# We need to remove ancestor devices from the count. Otherwise, we'll
# end up in a situation where selecting one multipath device could
# potentially show three devices selected (mpatha, sda, sdb for instance).
count = len([disk for disk in self.selected_disks if disk not in self.ancestors])
summary = CP_("GUI|Installation Destination|Filter",
"%d _storage device selected",
"%d _storage devices selected",
count) % count
label.set_text(summary)
label.set_use_underline(True)
summaryButton.set_visible(count > 0)
label.set_sensitive(count > 0)
def on_back_clicked(self, button):
self.skipTo = "StorageSpoke"
NormalSpoke.on_back_clicked(self, button)
def on_summary_clicked(self, button):
dialog = SelectedDisksDialog(self.data)
# Include any disks selected in the initial storage spoke, plus any
# selected in this filter UI.
disks = [disk for disk in self.disks if disk.name in self.selected_disks]
free_space = self.storage.getFreeSpace(disks=disks)
with self.main_window.enlightbox(dialog.window):
dialog.refresh(disks, free_space, showRemove=False, setBoot=False)
dialog.run()
@timed_action(delay=1200, busy_cursor=False)
def on_filter_changed(self, *args):
n = self._notebook.get_current_page()
self.pages[n].filterActive = True
self.pages[n].model.refilter()
def on_clear_icon_clicked(self, entry, icon_pos, event):
if icon_pos == Gtk.EntryIconPosition.SECONDARY:
entry.set_text("")
def on_page_switched(self, notebook, newPage, newPageNum, *args):
self.pages[newPageNum].model.refilter()
notebook.get_nth_page(newPageNum).show_all()
def on_row_toggled(self, button, path):
if not path:
return
page_index = self._notebook.get_current_page()
filter_model = self.pages[page_index].model
model_itr = filter_model.get_iter(path)
itr = filter_model.convert_iter_to_child_iter(model_itr)
self._store[itr][1] = not self._store[itr][1]
if self._store[itr][1] and self._store[itr][3] not in self.selected_disks:
self.selected_disks.append(self._store[itr][3])
elif not self._store[itr][1] and self._store[itr][3] in self.selected_disks:
self.selected_disks.remove(self._store[itr][3])
self._update_summary()
@timed_action(delay=50, threshold=100)
def on_refresh_clicked(self, widget, *args):
try_populate_devicetree(self.storage.devicetree)
self.refresh()
def on_add_iscsi_clicked(self, widget, *args):
dialog = ISCSIDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_fcoe_clicked(self, widget, *args):
dialog = FCoEDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_zfcp_clicked(self, widget, *args):
dialog = ZFCPDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
rc = dialog.run()
if rc == 1:
self.skipTo = "StorageSpoke"
self.on_back_clicked(rc)
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
def on_add_dasd_clicked(self, widget, *args):
dialog = DASDDialog(self.data, self.storage)
with self.main_window.enlightbox(dialog.window):
dialog.refresh()
dialog.run()
# We now need to refresh so any new disks picked up by adding advanced
# storage are displayed in the UI.
self.refresh()
##
## SEARCH TAB SIGNAL HANDLERS
##
def on_search_type_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("searchTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
##
## MULTIPATH TAB SIGNAL HANDLERS
##
def on_multipath_type_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("multipathTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
##
## OTHER TAB SIGNAL HANDLERS
##
def on_other_type_combo_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("otherTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
##
## Z TAB SIGNAL HANDLERS
##
def on_z_type_combo_changed(self, combo):
ndx = combo.get_active()
notebook = self.builder.get_object("zTypeNotebook")
notebook.set_current_page(ndx)
self.on_filter_changed()
|
wgwoods/anaconda
|
pyanaconda/ui/gui/spokes/filter.py
|
Python
|
gpl-2.0
| 27,975
|
[
"VisIt"
] |
8fe7117ac429c1b4ac7c0e12e635a2d8d1d74302de5ca0953086e6d252998dc4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import tempfile
import os
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot, vtkGetTempDir
VTK_DATA_ROOT = vtkGetDataRoot()
VTK_TEMP_DIR = vtkGetTempDir()
class cells(vtk.test.Testing.vtkTest):
def testCells(self):
# Demonstrates all cell types
#
# NOTE: the use of NewInstance/DeepCopy is included to increase
# regression coverage. It is not required in most applications.
ren = vtk.vtkRenderer()
# turn off all cullers
ren.GetCullers().RemoveAllItems()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
renWin.SetSize(300, 150)
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
# create a scene with one of each cell type
# Voxel
voxelPoints = vtk.vtkPoints()
voxelPoints.SetNumberOfPoints(8)
voxelPoints.InsertPoint(0, 0, 0, 0)
voxelPoints.InsertPoint(1, 1, 0, 0)
voxelPoints.InsertPoint(2, 0, 1, 0)
voxelPoints.InsertPoint(3, 1, 1, 0)
voxelPoints.InsertPoint(4, 0, 0, 1)
voxelPoints.InsertPoint(5, 1, 0, 1)
voxelPoints.InsertPoint(6, 0, 1, 1)
voxelPoints.InsertPoint(7, 1, 1, 1)
aVoxel = vtk.vtkVoxel()
aVoxel.GetPointIds().SetId(0, 0)
aVoxel.GetPointIds().SetId(1, 1)
aVoxel.GetPointIds().SetId(2, 2)
aVoxel.GetPointIds().SetId(3, 3)
aVoxel.GetPointIds().SetId(4, 4)
aVoxel.GetPointIds().SetId(5, 5)
aVoxel.GetPointIds().SetId(6, 6)
aVoxel.GetPointIds().SetId(7, 7)
bVoxel = aVoxel.NewInstance()
bVoxel.DeepCopy(aVoxel)
aVoxelGrid = vtk.vtkUnstructuredGrid()
aVoxelGrid.Allocate(1, 1)
aVoxelGrid.InsertNextCell(aVoxel.GetCellType(), aVoxel.GetPointIds())
aVoxelGrid.SetPoints(voxelPoints)
aVoxelMapper = vtk.vtkDataSetMapper()
aVoxelMapper.SetInputData(aVoxelGrid)
aVoxelActor = vtk.vtkActor()
aVoxelActor.SetMapper(aVoxelMapper)
aVoxelActor.GetProperty().BackfaceCullingOn()
# Hexahedron
hexahedronPoints = vtk.vtkPoints()
hexahedronPoints.SetNumberOfPoints(8)
hexahedronPoints.InsertPoint(0, 0, 0, 0)
hexahedronPoints.InsertPoint(1, 1, 0, 0)
hexahedronPoints.InsertPoint(2, 1, 1, 0)
hexahedronPoints.InsertPoint(3, 0, 1, 0)
hexahedronPoints.InsertPoint(4, 0, 0, 1)
hexahedronPoints.InsertPoint(5, 1, 0, 1)
hexahedronPoints.InsertPoint(6, 1, 1, 1)
hexahedronPoints.InsertPoint(7, 0, 1, 1)
aHexahedron = vtk.vtkHexahedron()
aHexahedron.GetPointIds().SetId(0, 0)
aHexahedron.GetPointIds().SetId(1, 1)
aHexahedron.GetPointIds().SetId(2, 2)
aHexahedron.GetPointIds().SetId(3, 3)
aHexahedron.GetPointIds().SetId(4, 4)
aHexahedron.GetPointIds().SetId(5, 5)
aHexahedron.GetPointIds().SetId(6, 6)
aHexahedron.GetPointIds().SetId(7, 7)
bHexahedron = aHexahedron.NewInstance()
bHexahedron.DeepCopy(aHexahedron)
aHexahedronGrid = vtk.vtkUnstructuredGrid()
aHexahedronGrid.Allocate(1, 1)
aHexahedronGrid.InsertNextCell(aHexahedron.GetCellType(), aHexahedron.GetPointIds())
aHexahedronGrid.SetPoints(hexahedronPoints)
aHexahedronMapper = vtk.vtkDataSetMapper()
aHexahedronMapper.SetInputData(aHexahedronGrid)
aHexahedronActor = vtk.vtkActor()
aHexahedronActor.SetMapper(aHexahedronMapper)
aHexahedronActor.AddPosition(2, 0, 0)
aHexahedronActor.GetProperty().BackfaceCullingOn()
# Tetra
tetraPoints = vtk.vtkPoints()
tetraPoints.SetNumberOfPoints(4)
tetraPoints.InsertPoint(0, 0, 0, 0)
tetraPoints.InsertPoint(1, 1, 0, 0)
tetraPoints.InsertPoint(2, .5, 1, 0)
tetraPoints.InsertPoint(3, .5, .5, 1)
aTetra = vtk.vtkTetra()
aTetra.GetPointIds().SetId(0, 0)
aTetra.GetPointIds().SetId(1, 1)
aTetra.GetPointIds().SetId(2, 2)
aTetra.GetPointIds().SetId(3, 3)
bTetra = aTetra.NewInstance()
bTetra.DeepCopy(aTetra)
aTetraGrid = vtk.vtkUnstructuredGrid()
aTetraGrid.Allocate(1, 1)
aTetraGrid.InsertNextCell(aTetra.GetCellType(), aTetra.GetPointIds())
aTetraGrid.SetPoints(tetraPoints)
aTetraCopy = vtk.vtkUnstructuredGrid()
aTetraCopy.ShallowCopy(aTetraGrid)
aTetraMapper = vtk.vtkDataSetMapper()
aTetraMapper.SetInputData(aTetraCopy)
aTetraActor = vtk.vtkActor()
aTetraActor.SetMapper(aTetraMapper)
aTetraActor.AddPosition(4, 0, 0)
aTetraActor.GetProperty().BackfaceCullingOn()
# Wedge
wedgePoints = vtk.vtkPoints()
wedgePoints.SetNumberOfPoints(6)
wedgePoints.InsertPoint(0, 0, 1, 0)
wedgePoints.InsertPoint(1, 0, 0, 0)
wedgePoints.InsertPoint(2, 0, .5, .5)
wedgePoints.InsertPoint(3, 1, 1, 0)
wedgePoints.InsertPoint(4, 1, 0, 0)
wedgePoints.InsertPoint(5, 1, .5, .5)
aWedge = vtk.vtkWedge()
aWedge.GetPointIds().SetId(0, 0)
aWedge.GetPointIds().SetId(1, 1)
aWedge.GetPointIds().SetId(2, 2)
aWedge.GetPointIds().SetId(3, 3)
aWedge.GetPointIds().SetId(4, 4)
aWedge.GetPointIds().SetId(5, 5)
bWedge = aWedge.NewInstance()
bWedge.DeepCopy(aWedge)
aWedgeGrid = vtk.vtkUnstructuredGrid()
aWedgeGrid.Allocate(1, 1)
aWedgeGrid.InsertNextCell(aWedge.GetCellType(), aWedge.GetPointIds())
aWedgeGrid.SetPoints(wedgePoints)
aWedgeCopy = vtk.vtkUnstructuredGrid()
aWedgeCopy.DeepCopy(aWedgeGrid)
aWedgeMapper = vtk.vtkDataSetMapper()
aWedgeMapper.SetInputData(aWedgeCopy)
aWedgeActor = vtk.vtkActor()
aWedgeActor.SetMapper(aWedgeMapper)
aWedgeActor.AddPosition(6, 0, 0)
aWedgeActor.GetProperty().BackfaceCullingOn()
# Pyramid
pyramidPoints = vtk.vtkPoints()
pyramidPoints.SetNumberOfPoints(5)
pyramidPoints.InsertPoint(0, 0, 0, 0)
pyramidPoints.InsertPoint(1, 1, 0, 0)
pyramidPoints.InsertPoint(2, 1, 1, 0)
pyramidPoints.InsertPoint(3, 0, 1, 0)
pyramidPoints.InsertPoint(4, .5, .5, 1)
aPyramid = vtk.vtkPyramid()
aPyramid.GetPointIds().SetId(0, 0)
aPyramid.GetPointIds().SetId(1, 1)
aPyramid.GetPointIds().SetId(2, 2)
aPyramid.GetPointIds().SetId(3, 3)
aPyramid.GetPointIds().SetId(4, 4)
bPyramid = aPyramid.NewInstance()
bPyramid.DeepCopy(aPyramid)
aPyramidGrid = vtk.vtkUnstructuredGrid()
aPyramidGrid.Allocate(1, 1)
aPyramidGrid.InsertNextCell(aPyramid.GetCellType(), aPyramid.GetPointIds())
aPyramidGrid.SetPoints(pyramidPoints)
aPyramidMapper = vtk.vtkDataSetMapper()
aPyramidMapper.SetInputData(aPyramidGrid)
aPyramidActor = vtk.vtkActor()
aPyramidActor.SetMapper(aPyramidMapper)
aPyramidActor.AddPosition(8, 0, 0)
aPyramidActor.GetProperty().BackfaceCullingOn()
# Pixel
pixelPoints = vtk.vtkPoints()
pixelPoints.SetNumberOfPoints(4)
pixelPoints.InsertPoint(0, 0, 0, 0)
pixelPoints.InsertPoint(1, 1, 0, 0)
pixelPoints.InsertPoint(2, 0, 1, 0)
pixelPoints.InsertPoint(3, 1, 1, 0)
aPixel = vtk.vtkPixel()
aPixel.GetPointIds().SetId(0, 0)
aPixel.GetPointIds().SetId(1, 1)
aPixel.GetPointIds().SetId(2, 2)
aPixel.GetPointIds().SetId(3, 3)
bPixel = aPixel.NewInstance()
bPixel.DeepCopy(aPixel)
aPixelGrid = vtk.vtkUnstructuredGrid()
aPixelGrid.Allocate(1, 1)
aPixelGrid.InsertNextCell(aPixel.GetCellType(), aPixel.GetPointIds())
aPixelGrid.SetPoints(pixelPoints)
aPixelMapper = vtk.vtkDataSetMapper()
aPixelMapper.SetInputData(aPixelGrid)
aPixelActor = vtk.vtkActor()
aPixelActor.SetMapper(aPixelMapper)
aPixelActor.AddPosition(0, 0, 2)
aPixelActor.GetProperty().BackfaceCullingOn()
# Quad
quadPoints = vtk.vtkPoints()
quadPoints.SetNumberOfPoints(4)
quadPoints.InsertPoint(0, 0, 0, 0)
quadPoints.InsertPoint(1, 1, 0, 0)
quadPoints.InsertPoint(2, 1, 1, 0)
quadPoints.InsertPoint(3, 0, 1, 0)
aQuad = vtk.vtkQuad()
aQuad.GetPointIds().SetId(0, 0)
aQuad.GetPointIds().SetId(1, 1)
aQuad.GetPointIds().SetId(2, 2)
aQuad.GetPointIds().SetId(3, 3)
bQuad = aQuad.NewInstance()
bQuad.DeepCopy(aQuad)
aQuadGrid = vtk.vtkUnstructuredGrid()
aQuadGrid.Allocate(1, 1)
aQuadGrid.InsertNextCell(aQuad.GetCellType(), aQuad.GetPointIds())
aQuadGrid.SetPoints(quadPoints)
aQuadMapper = vtk.vtkDataSetMapper()
aQuadMapper.SetInputData(aQuadGrid)
aQuadActor = vtk.vtkActor()
aQuadActor.SetMapper(aQuadMapper)
aQuadActor.AddPosition(2, 0, 2)
aQuadActor.GetProperty().BackfaceCullingOn()
# Triangle
trianglePoints = vtk.vtkPoints()
trianglePoints.SetNumberOfPoints(3)
trianglePoints.InsertPoint(0, 0, 0, 0)
trianglePoints.InsertPoint(1, 1, 0, 0)
trianglePoints.InsertPoint(2, .5, .5, 0)
triangleTCoords = vtk.vtkFloatArray()
triangleTCoords.SetNumberOfComponents(2)
triangleTCoords.SetNumberOfTuples(3)
triangleTCoords.InsertTuple2(0, 1, 1)
triangleTCoords.InsertTuple2(1, 2, 2)
triangleTCoords.InsertTuple2(2, 3, 3)
aTriangle = vtk.vtkTriangle()
aTriangle.GetPointIds().SetId(0, 0)
aTriangle.GetPointIds().SetId(1, 1)
aTriangle.GetPointIds().SetId(2, 2)
bTriangle = aTriangle.NewInstance()
bTriangle.DeepCopy(aTriangle)
aTriangleGrid = vtk.vtkUnstructuredGrid()
aTriangleGrid.Allocate(1, 1)
aTriangleGrid.InsertNextCell(aTriangle.GetCellType(), aTriangle.GetPointIds())
aTriangleGrid.SetPoints(trianglePoints)
aTriangleGrid.GetPointData().SetTCoords(triangleTCoords)
aTriangleMapper = vtk.vtkDataSetMapper()
aTriangleMapper.SetInputData(aTriangleGrid)
aTriangleActor = vtk.vtkActor()
aTriangleActor.SetMapper(aTriangleMapper)
aTriangleActor.AddPosition(4, 0, 2)
aTriangleActor.GetProperty().BackfaceCullingOn()
# Polygon
polygonPoints = vtk.vtkPoints()
polygonPoints.SetNumberOfPoints(4)
polygonPoints.InsertPoint(0, 0, 0, 0)
polygonPoints.InsertPoint(1, 1, 0, 0)
polygonPoints.InsertPoint(2, 1, 1, 0)
polygonPoints.InsertPoint(3, 0, 1, 0)
aPolygon = vtk.vtkPolygon()
aPolygon.GetPointIds().SetNumberOfIds(4)
aPolygon.GetPointIds().SetId(0, 0)
aPolygon.GetPointIds().SetId(1, 1)
aPolygon.GetPointIds().SetId(2, 2)
aPolygon.GetPointIds().SetId(3, 3)
bPolygon = aPolygon.NewInstance()
bPolygon.DeepCopy(aPolygon)
aPolygonGrid = vtk.vtkUnstructuredGrid()
aPolygonGrid.Allocate(1, 1)
aPolygonGrid.InsertNextCell(aPolygon.GetCellType(), aPolygon.GetPointIds())
aPolygonGrid.SetPoints(polygonPoints)
aPolygonMapper = vtk.vtkDataSetMapper()
aPolygonMapper.SetInputData(aPolygonGrid)
aPolygonActor = vtk.vtkActor()
aPolygonActor.SetMapper(aPolygonMapper)
aPolygonActor.AddPosition(6, 0, 2)
aPolygonActor.GetProperty().BackfaceCullingOn()
# Triangle Strip
triangleStripPoints = vtk.vtkPoints()
triangleStripPoints.SetNumberOfPoints(5)
triangleStripPoints.InsertPoint(0, 0, 1, 0)
triangleStripPoints.InsertPoint(1, 0, 0, 0)
triangleStripPoints.InsertPoint(2, 1, 1, 0)
triangleStripPoints.InsertPoint(3, 1, 0, 0)
triangleStripPoints.InsertPoint(4, 2, 1, 0)
triangleStripTCoords = vtk.vtkFloatArray()
triangleStripTCoords.SetNumberOfComponents(2)
triangleStripTCoords.SetNumberOfTuples(3)
triangleStripTCoords.InsertTuple2(0, 1, 1)
triangleStripTCoords.InsertTuple2(1, 2, 2)
triangleStripTCoords.InsertTuple2(2, 3, 3)
triangleStripTCoords.InsertTuple2(3, 4, 4)
triangleStripTCoords.InsertTuple2(4, 5, 5)
aTriangleStrip = vtk.vtkTriangleStrip()
aTriangleStrip.GetPointIds().SetNumberOfIds(5)
aTriangleStrip.GetPointIds().SetId(0, 0)
aTriangleStrip.GetPointIds().SetId(1, 1)
aTriangleStrip.GetPointIds().SetId(2, 2)
aTriangleStrip.GetPointIds().SetId(3, 3)
aTriangleStrip.GetPointIds().SetId(4, 4)
bTriangleStrip = aTriangleStrip.NewInstance()
bTriangleStrip.DeepCopy(aTriangleStrip)
aTriangleStripGrid = vtk.vtkUnstructuredGrid()
aTriangleStripGrid.Allocate(1, 1)
aTriangleStripGrid.InsertNextCell(aTriangleStrip.GetCellType(), aTriangleStrip.GetPointIds())
aTriangleStripGrid.SetPoints(triangleStripPoints)
aTriangleStripGrid.GetPointData().SetTCoords(triangleStripTCoords)
aTriangleStripMapper = vtk.vtkDataSetMapper()
aTriangleStripMapper.SetInputData(aTriangleStripGrid)
aTriangleStripActor = vtk.vtkActor()
aTriangleStripActor.SetMapper(aTriangleStripMapper)
aTriangleStripActor.AddPosition(8, 0, 2)
aTriangleStripActor.GetProperty().BackfaceCullingOn()
# Line
linePoints = vtk.vtkPoints()
linePoints.SetNumberOfPoints(2)
linePoints.InsertPoint(0, 0, 0, 0)
linePoints.InsertPoint(1, 1, 1, 0)
aLine = vtk.vtkLine()
aLine.GetPointIds().SetId(0, 0)
aLine.GetPointIds().SetId(1, 1)
bLine = aLine.NewInstance()
bLine.DeepCopy(aLine)
aLineGrid = vtk.vtkUnstructuredGrid()
aLineGrid.Allocate(1, 1)
aLineGrid.InsertNextCell(aLine.GetCellType(), aLine.GetPointIds())
aLineGrid.SetPoints(linePoints)
aLineMapper = vtk.vtkDataSetMapper()
aLineMapper.SetInputData(aLineGrid)
aLineActor = vtk.vtkActor()
aLineActor.SetMapper(aLineMapper)
aLineActor.AddPosition(0, 0, 4)
aLineActor.GetProperty().BackfaceCullingOn()
# Poly line
polyLinePoints = vtk.vtkPoints()
polyLinePoints.SetNumberOfPoints(3)
polyLinePoints.InsertPoint(0, 0, 0, 0)
polyLinePoints.InsertPoint(1, 1, 1, 0)
polyLinePoints.InsertPoint(2, 1, 0, 0)
aPolyLine = vtk.vtkPolyLine()
aPolyLine.GetPointIds().SetNumberOfIds(3)
aPolyLine.GetPointIds().SetId(0, 0)
aPolyLine.GetPointIds().SetId(1, 1)
aPolyLine.GetPointIds().SetId(2, 2)
bPolyLine = aPolyLine.NewInstance()
bPolyLine.DeepCopy(aPolyLine)
aPolyLineGrid = vtk.vtkUnstructuredGrid()
aPolyLineGrid.Allocate(1, 1)
aPolyLineGrid.InsertNextCell(aPolyLine.GetCellType(), aPolyLine.GetPointIds())
aPolyLineGrid.SetPoints(polyLinePoints)
aPolyLineMapper = vtk.vtkDataSetMapper()
aPolyLineMapper.SetInputData(aPolyLineGrid)
aPolyLineActor = vtk.vtkActor()
aPolyLineActor.SetMapper(aPolyLineMapper)
aPolyLineActor.AddPosition(2, 0, 4)
aPolyLineActor.GetProperty().BackfaceCullingOn()
# Vertex
vertexPoints = vtk.vtkPoints()
vertexPoints.SetNumberOfPoints(1)
vertexPoints.InsertPoint(0, 0, 0, 0)
aVertex = vtk.vtkVertex()
aVertex.GetPointIds().SetId(0, 0)
bVertex = aVertex.NewInstance()
bVertex.DeepCopy(aVertex)
aVertexGrid = vtk.vtkUnstructuredGrid()
aVertexGrid.Allocate(1, 1)
aVertexGrid.InsertNextCell(aVertex.GetCellType(), aVertex.GetPointIds())
aVertexGrid.SetPoints(vertexPoints)
aVertexMapper = vtk.vtkDataSetMapper()
aVertexMapper.SetInputData(aVertexGrid)
aVertexActor = vtk.vtkActor()
aVertexActor.SetMapper(aVertexMapper)
aVertexActor.AddPosition(0, 0, 6)
aVertexActor.GetProperty().BackfaceCullingOn()
# Poly Vertex
polyVertexPoints = vtk.vtkPoints()
polyVertexPoints.SetNumberOfPoints(3)
polyVertexPoints.InsertPoint(0, 0, 0, 0)
polyVertexPoints.InsertPoint(1, 1, 0, 0)
polyVertexPoints.InsertPoint(2, 1, 1, 0)
aPolyVertex = vtk.vtkPolyVertex()
aPolyVertex.GetPointIds().SetNumberOfIds(3)
aPolyVertex.GetPointIds().SetId(0, 0)
aPolyVertex.GetPointIds().SetId(1, 1)
aPolyVertex.GetPointIds().SetId(2, 2)
bPolyVertex = aPolyVertex.NewInstance()
bPolyVertex.DeepCopy(aPolyVertex)
aPolyVertexGrid = vtk.vtkUnstructuredGrid()
aPolyVertexGrid.Allocate(1, 1)
aPolyVertexGrid.InsertNextCell(aPolyVertex.GetCellType(), aPolyVertex.GetPointIds())
aPolyVertexGrid.SetPoints(polyVertexPoints)
aPolyVertexMapper = vtk.vtkDataSetMapper()
aPolyVertexMapper.SetInputData(aPolyVertexGrid)
aPolyVertexActor = vtk.vtkActor()
aPolyVertexActor.SetMapper(aPolyVertexMapper)
aPolyVertexActor.AddPosition(2, 0, 6)
aPolyVertexActor.GetProperty().BackfaceCullingOn()
# Pentagonal prism
pentaPoints = vtk.vtkPoints()
pentaPoints.SetNumberOfPoints(10)
pentaPoints.InsertPoint(0, 0.25, 0.0, 0.0)
pentaPoints.InsertPoint(1, 0.75, 0.0, 0.0)
pentaPoints.InsertPoint(2, 1.0, 0.5, 0.0)
pentaPoints.InsertPoint(3, 0.5, 1.0, 0.0)
pentaPoints.InsertPoint(4, 0.0, 0.5, 0.0)
pentaPoints.InsertPoint(5, 0.25, 0.0, 1.0)
pentaPoints.InsertPoint(6, 0.75, 0.0, 1.0)
pentaPoints.InsertPoint(7, 1.0, 0.5, 1.0)
pentaPoints.InsertPoint(8, 0.5, 1.0, 1.0)
pentaPoints.InsertPoint(9, 0.0, 0.5, 1.0)
aPenta = vtk.vtkPentagonalPrism()
aPenta.GetPointIds().SetId(0, 0)
aPenta.GetPointIds().SetId(1, 1)
aPenta.GetPointIds().SetId(2, 2)
aPenta.GetPointIds().SetId(3, 3)
aPenta.GetPointIds().SetId(4, 4)
aPenta.GetPointIds().SetId(5, 5)
aPenta.GetPointIds().SetId(6, 6)
aPenta.GetPointIds().SetId(7, 7)
aPenta.GetPointIds().SetId(8, 8)
aPenta.GetPointIds().SetId(9, 9)
bPenta = aPenta.NewInstance()
bPenta.DeepCopy(aPenta)
aPentaGrid = vtk.vtkUnstructuredGrid()
aPentaGrid.Allocate(1, 1)
aPentaGrid.InsertNextCell(aPenta.GetCellType(), aPenta.GetPointIds())
aPentaGrid.SetPoints(pentaPoints)
aPentaCopy = vtk.vtkUnstructuredGrid()
aPentaCopy.DeepCopy(aPentaGrid)
aPentaMapper = vtk.vtkDataSetMapper()
aPentaMapper.SetInputData(aPentaCopy)
aPentaActor = vtk.vtkActor()
aPentaActor.SetMapper(aPentaMapper)
aPentaActor.AddPosition(10, 0, 0)
aPentaActor.GetProperty().BackfaceCullingOn()
# Hexagonal prism
hexaPoints = vtk.vtkPoints()
hexaPoints.SetNumberOfPoints(12)
hexaPoints.InsertPoint(0, 0.0, 0.0, 0.0)
hexaPoints.InsertPoint(1, 0.5, 0.0, 0.0)
hexaPoints.InsertPoint(2, 1.0, 0.5, 0.0)
hexaPoints.InsertPoint(3, 1.0, 1.0, 0.0)
hexaPoints.InsertPoint(4, 0.5, 1.0, 0.0)
hexaPoints.InsertPoint(5, 0.0, 0.5, 0.0)
hexaPoints.InsertPoint(6, 0.0, 0.0, 1.0)
hexaPoints.InsertPoint(7, 0.5, 0.0, 1.0)
hexaPoints.InsertPoint(8, 1.0, 0.5, 1.0)
hexaPoints.InsertPoint(9, 1.0, 1.0, 1.0)
hexaPoints.InsertPoint(10, 0.5, 1.0, 1.0)
hexaPoints.InsertPoint(11, 0.0, 0.5, 1.0)
aHexa = vtk.vtkHexagonalPrism()
aHexa.GetPointIds().SetId(0, 0)
aHexa.GetPointIds().SetId(1, 1)
aHexa.GetPointIds().SetId(2, 2)
aHexa.GetPointIds().SetId(3, 3)
aHexa.GetPointIds().SetId(4, 4)
aHexa.GetPointIds().SetId(5, 5)
aHexa.GetPointIds().SetId(6, 6)
aHexa.GetPointIds().SetId(7, 7)
aHexa.GetPointIds().SetId(8, 8)
aHexa.GetPointIds().SetId(9, 9)
aHexa.GetPointIds().SetId(10, 10)
aHexa.GetPointIds().SetId(11, 11)
bHexa = aHexa.NewInstance()
bHexa.DeepCopy(aHexa)
aHexaGrid = vtk.vtkUnstructuredGrid()
aHexaGrid.Allocate(1, 1)
aHexaGrid.InsertNextCell(aHexa.GetCellType(), aHexa.GetPointIds())
aHexaGrid.SetPoints(hexaPoints)
aHexaCopy = vtk.vtkUnstructuredGrid()
aHexaCopy.DeepCopy(aHexaGrid)
aHexaMapper = vtk.vtkDataSetMapper()
aHexaMapper.SetInputData(aHexaCopy)
aHexaActor = vtk.vtkActor()
aHexaActor.SetMapper(aHexaMapper)
aHexaActor.AddPosition(12, 0, 0)
aHexaActor.GetProperty().BackfaceCullingOn()
# RIB property
if hasattr(vtk, 'vtkRIBProperty'):
aRIBProperty = vtk.vtkRIBProperty()
aRIBProperty.SetVariable("Km", "float")
aRIBProperty.SetSurfaceShader("LGVeinedmarble")
aRIBProperty.SetVariable("veinfreq", "float")
aRIBProperty.AddVariable("warpfreq", "float")
aRIBProperty.AddVariable("veincolor", "color")
aRIBProperty.AddSurfaceShaderParameter("veinfreq", " 2")
aRIBProperty.AddSurfaceShaderParameter("veincolor", "1.0000 1.0000 0.9412")
bRIBProperty = vtk.vtkRIBProperty()
bRIBProperty.SetVariable("Km", "float")
bRIBProperty.SetSurfaceShaderParameter("Km", "1.0")
bRIBProperty.SetDisplacementShader("dented")
bRIBProperty.SetSurfaceShader("plastic")
aProperty = vtk.vtkProperty()
bProperty = vtk.vtkProperty()
aTriangleActor.SetProperty(aProperty)
aTriangleStripActor.SetProperty(bProperty)
ren.SetBackground(.1, .2, .4)
ren.AddActor(aVoxelActor);aVoxelActor.GetProperty().SetDiffuseColor(1, 0, 0)
ren.AddActor(aHexahedronActor);aHexahedronActor.GetProperty().SetDiffuseColor(1, 1, 0)
ren.AddActor(aTetraActor);aTetraActor.GetProperty().SetDiffuseColor(0, 1, 0)
ren.AddActor(aWedgeActor);aWedgeActor.GetProperty().SetDiffuseColor(0, 1, 1)
ren.AddActor(aPyramidActor);aPyramidActor.GetProperty().SetDiffuseColor(1, 0, 1)
ren.AddActor(aPixelActor);aPixelActor.GetProperty().SetDiffuseColor(0, 1, 1)
ren.AddActor(aQuadActor);aQuadActor.GetProperty().SetDiffuseColor(1, 0, 1)
ren.AddActor(aTriangleActor);aTriangleActor.GetProperty().SetDiffuseColor(.3, 1, .5)
ren.AddActor(aPolygonActor);aPolygonActor.GetProperty().SetDiffuseColor(1, .4, .5)
ren.AddActor(aTriangleStripActor);aTriangleStripActor.GetProperty().SetDiffuseColor(.3, .7, 1)
ren.AddActor(aLineActor);aLineActor.GetProperty().SetDiffuseColor(.2, 1, 1)
ren.AddActor(aPolyLineActor);aPolyLineActor.GetProperty().SetDiffuseColor(1, 1, 1)
ren.AddActor(aVertexActor);aVertexActor.GetProperty().SetDiffuseColor(1, 1, 1)
ren.AddActor(aPolyVertexActor);aPolyVertexActor.GetProperty().SetDiffuseColor(1, 1, 1)
ren.AddActor(aPentaActor);aPentaActor.GetProperty().SetDiffuseColor(.2, .4, .7)
ren.AddActor(aHexaActor);aHexaActor.GetProperty().SetDiffuseColor(.7, .5, 1)
if hasattr(vtk, 'vtkRIBLight'):
aRIBLight = vtk.vtkRIBLight()
ren.AddLight(aRIBLight)
aLight = vtk.vtkLight()
aLight.PositionalOn()
aLight.SetConeAngle(10.0)
aLight.SetIntensity(20.0)
ren.AddLight(aLight)
ren.ResetCamera()
ren.GetActiveCamera().Azimuth(30)
ren.GetActiveCamera().Elevation(20)
ren.GetActiveCamera().Dolly(2.8)
ren.ResetCameraClippingRange()
dir = VTK_TEMP_DIR
atext = vtk.vtkTexture()
pnmReader = vtk.vtkBMPReader()
pnmReader.SetFileName(VTK_DATA_ROOT + "/Data/masonry.bmp")
atext.SetInputConnection(pnmReader.GetOutputPort())
atext.InterpolateOff()
aTriangleActor.SetTexture(atext)
aRIBLight.SetFocalPoint(ren.GetActiveCamera().GetFocalPoint())
aRIBLight.SetPosition(ren.GetActiveCamera().GetPosition())
aLight.SetFocalPoint(ren.GetActiveCamera().GetFocalPoint())
aLight.SetPosition(ren.GetActiveCamera().GetPosition())
# bascially have IO/Export ?
if hasattr(vtk, 'vtkRIBExporter'):
rib = vtk.vtkRIBExporter()
rib.SetInput(renWin)
rib.SetFilePrefix(dir + '/cells')
rib.SetTexturePrefix(dir + '/cells')
rib.Write()
iv = vtk.vtkIVExporter()
iv.SetInput(renWin)
iv.SetFileName(dir + "/cells.iv")
iv.Write()
os.remove(dir + '/cells.iv')
obj = vtk.vtkOBJExporter()
obj.SetInput(renWin)
obj.SetFilePrefix(dir + "/cells")
obj.Write()
os.remove(dir + '/cells.obj')
os.remove(dir + '/cells.mtl')
vrml = vtk.vtkVRMLExporter()
vrml.SetInput(renWin)
#vrml.SetStartWrite(vrml.SetFileName(dir + "/cells.wrl"))
#vrml.SetEndWrite(vrml.SetFileName("/a/acells.wrl"))
vrml.SetFileName(dir + "/cells.wrl")
vrml.SetSpeed(5.5)
vrml.Write()
os.remove(dir + '/cells.wrl')
oogl = vtk.vtkOOGLExporter()
oogl.SetInput(renWin)
oogl.SetFileName(dir + "/cells.oogl")
oogl.Write()
os.remove(dir + '/cells.oogl')
# the UnRegister calls are because make object is the same as New,
# and causes memory leaks. (Python does not treat NewInstance the same as New).
def DeleteCopies():
bVoxel.UnRegister(None)
bHexahedron.UnRegister(None)
bTetra.UnRegister(None)
bWedge.UnRegister(None)
bPyramid.UnRegister(None)
bPixel.UnRegister(None)
bQuad.UnRegister(None)
bTriangle.UnRegister(None)
bPolygon.UnRegister(None)
bTriangleStrip.UnRegister(None)
bLine.UnRegister(None)
bPolyLine.UnRegister(None)
bVertex.UnRegister(None)
bPolyVertex.UnRegister(None)
bPenta.UnRegister(None)
bHexa.UnRegister(None)
DeleteCopies()
# render and interact with data
renWin.Render()
img_file = "cells.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(cells, 'test')])
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/VTK/Rendering/Core/Testing/Python/cells.py
|
Python
|
gpl-3.0
| 27,469
|
[
"VTK"
] |
ace6e5aa12af641603004e402ebd88f9839ae8e27b2b1b8e0b3334518a7619f2
|
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar, Evan Feinberg, and Karl Leswing"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import os
import shutil
from warnings import warn
import time
import tempfile
import hashlib
from collections import Counter
from rdkit import Chem
from rdkit.Chem import AllChem
from deepchem.utils.rdkit_util import load_molecule
import numpy as np
from scipy.spatial.distance import cdist
from copy import deepcopy
from deepchem.feat import ComplexFeaturizer
from deepchem.utils.save import log
"""
TODO(LESWING) add sanitization with rdkit upgrade to 2017.*
"""
def get_ligand_filetype(ligand_filename):
"""Returns the filetype of ligand."""
if ".mol2" in ligand_filename:
return "mol2"
elif ".sdf" in ligand_filename:
return "sdf"
elif ".pdbqt" in ligand_filename:
return "pdbqt"
elif ".pdb" in ligand_filename:
return "pdb"
else:
raise ValueError("Unrecognized_filename")
def compute_centroid(coordinates):
"""Compute the x,y,z centroid of provided coordinates
coordinates: np.ndarray
Shape (N, 3), where N is number atoms.
"""
centroid = np.mean(coordinates, axis=0)
return (centroid)
def generate_random__unit_vector():
"""Generate a random unit vector on the 3-sphere.
citation:
http://mathworld.wolfram.com/SpherePointPicking.html
a. Choose random theta \element [0, 2*pi]
b. Choose random z \element [-1, 1]
c. Compute output vector u: (x,y,z) = (sqrt(1-z^2)*cos(theta), sqrt(1-z^2)*sin(theta),z)
"""
theta = np.random.uniform(low=0.0, high=2 * np.pi)
z = np.random.uniform(low=-1.0, high=1.0)
u = np.array(
[np.sqrt(1 - z**2) * np.cos(theta),
np.sqrt(1 - z**2) * np.sin(theta), z])
return (u)
def generate_random_rotation_matrix():
"""
1. Generate a random unit vector u, randomly sampled from the unit
3-sphere (see function generate_random__unit_vector() for details)
2. Generate a second random unit vector v
a. If absolute value of u \dot v > 0.99, repeat.
(This is important for numerical stability. Intuition: we want them to
be as linearly independent as possible or else the orthogonalized
version of v will be much shorter in magnitude compared to u. I assume
in Stack they took this from Gram-Schmidt orthogonalization?)
b. v" = v - (u \dot v)*u, i.e. subtract out the component of v that's in
u's direction
c. normalize v" (this isn"t in Stack but I assume it must be done)
3. find w = u \cross v"
4. u, v", and w will form the columns of a rotation matrix, R. The
intuition is that u, v" and w are, respectively, what the standard basis
vectors e1, e2, and e3 will be mapped to under the transformation.
"""
u = generate_random__unit_vector()
v = generate_random__unit_vector()
while np.abs(np.dot(u, v)) >= 0.99:
v = generate_random__unit_vector()
vp = v - (np.dot(u, v) * u)
vp /= np.linalg.norm(vp)
w = np.cross(u, vp)
R = np.column_stack((u, vp, w))
return (R)
def rotate_molecules(mol_coordinates_list):
"""Rotates provided molecular coordinates.
Pseudocode:
1. Generate random rotation matrix. This matrix applies a random
transformation to any 3-vector such that, were the random transformation
repeatedly applied, it would randomly sample along the surface of a sphere
with radius equal to the norm of the given 3-vector cf.
_generate_random_rotation_matrix() for details
2. Apply R to all atomic coordinatse.
3. Return rotated molecule
"""
R = generate_random_rotation_matrix()
rotated_coordinates_list = []
for mol_coordinates in mol_coordinates_list:
coordinates = deepcopy(mol_coordinates)
rotated_coordinates = np.transpose(np.dot(R, np.transpose(coordinates)))
rotated_coordinates_list.append(rotated_coordinates)
return (rotated_coordinates_list)
def compute_pairwise_distances(protein_xyz, ligand_xyz):
"""Takes an input m x 3 and n x 3 np arrays of 3D coords of protein and ligand,
respectively, and outputs an m x n np array of pairwise distances in Angstroms
between protein and ligand atoms. entry (i,j) is dist between the i"th protein
atom and the j"th ligand atom.
"""
pairwise_distances = cdist(protein_xyz, ligand_xyz, metric='euclidean')
return (pairwise_distances)
"""following two functions adapted from:
http://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
"""
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(vector_i, vector_j):
"""Returns the angle in radians between vectors "vector_i" and "vector_j"::
>>> print("%0.06f" % angle_between((1, 0, 0), (0, 1, 0)))
1.570796
>>> print("%0.06f" % angle_between((1, 0, 0), (1, 0, 0)))
0.000000
>>> print("%0.06f" % angle_between((1, 0, 0), (-1, 0, 0)))
3.141593
Note that this function always returns the smaller of the two angles between
the vectors (value between 0 and pi).
"""
vector_i_u = unit_vector(vector_i)
vector_j_u = unit_vector(vector_j)
angle = np.arccos(np.dot(vector_i_u, vector_j_u))
if np.isnan(angle):
if np.allclose(vector_i_u, vector_j_u):
return 0.0
else:
return np.pi
return angle
def hash_sybyl(sybyl, sybyl_types):
return (sybyl_types.index(sybyl))
def hash_ecfp(ecfp, power):
"""
Returns an int of size 2^power representing that
ECFP fragment. Input must be a string.
"""
ecfp = ecfp.encode('utf-8')
md5 = hashlib.md5()
md5.update(ecfp)
digest = md5.hexdigest()
ecfp_hash = int(digest, 16) % (2**power)
return (ecfp_hash)
def hash_ecfp_pair(ecfp_pair, power):
"""Returns an int of size 2^power representing that ECFP pair. Input must be
a tuple of strings.
"""
ecfp = "%s,%s" % (ecfp_pair[0], ecfp_pair[1])
ecfp = ecfp.encode('utf-8')
md5 = hashlib.md5()
md5.update(ecfp)
digest = md5.hexdigest()
ecfp_hash = int(digest, 16) % (2**power)
return (ecfp_hash)
def compute_all_ecfp(mol, indices=None, degree=2):
"""Obtain molecular fragment for all atoms emanating outward to given degree.
For each fragment, compute SMILES string (for now) and hash to an int.
Return a dictionary mapping atom index to hashed SMILES.
"""
ecfp_dict = {}
for i in range(mol.GetNumAtoms()):
if indices is not None and i not in indices:
continue
env = Chem.FindAtomEnvironmentOfRadiusN(mol, degree, i, useHs=True)
submol = Chem.PathToSubmol(mol, env)
smile = Chem.MolToSmiles(submol)
ecfp_dict[i] = "%s,%s" % (mol.GetAtoms()[i].GetAtomicNum(), smile)
return ecfp_dict
def compute_ecfp_features(mol, ecfp_degree=2, ecfp_power=11):
"""Computes ECFP features for provided rdkit molecule.
Parameters:
-----------
mol: rdkit molecule
Molecule to featurize.
ecfp_degree: int
ECFP radius
ecfp_power: int
Number of bits to store ECFP features (2^ecfp_power will be length of
ECFP array)
Returns:
--------
ecfp_array: np.ndarray
Returns an array of size 2^ecfp_power where array at index i has a 1 if
that ECFP fragment is found in the molecule and array at index j has a 0
if ECFP fragment not in molecule.
"""
bv = AllChem.GetMorganFingerprintAsBitVect(
mol, ecfp_degree, nBits=2**ecfp_power)
return np.array(bv)
def featurize_binding_pocket_ecfp(protein_xyz,
protein,
ligand_xyz,
ligand,
pairwise_distances=None,
cutoff=4.5,
ecfp_degree=2):
"""Computes ECFP dicts for ligand and binding pocket of the protein.
Parameters
----------
protein_xyz: np.ndarray
Of shape (N_protein_atoms, 3)
protein: rdkit.rdchem.Mol
Contains more metadata.
ligand_xyz: np.ndarray
Of shape (N_ligand_atoms, 3)
ligand: rdkit.rdchem.Mol
Contains more metadata
pairwise_distances: np.ndarray
Array of pairwise protein-ligand distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration
ecfp_degree: int
ECFP radius
"""
if pairwise_distances is None:
pairwise_distances = compute_pairwise_distances(protein_xyz, ligand_xyz)
contacts = np.nonzero((pairwise_distances < cutoff))
protein_atoms = set([int(c) for c in contacts[0].tolist()])
protein_ecfp_dict = compute_all_ecfp(
protein, indices=protein_atoms, degree=ecfp_degree)
ligand_ecfp_dict = compute_all_ecfp(ligand, degree=ecfp_degree)
return (protein_ecfp_dict, ligand_ecfp_dict)
def compute_all_sybyl(mol, indices=None):
"""Computes Sybyl atom types for atoms in molecule."""
raise NotImplementedError("This function is not implemented yet")
def featurize_binding_pocket_sybyl(protein_xyz,
protein,
ligand_xyz,
ligand,
pairwise_distances=None,
cutoff=7.0):
"""Computes Sybyl dicts for ligand and binding pocket of the protein.
Parameters
----------
protein_xyz: np.ndarray
Of shape (N_protein_atoms, 3)
protein: Rdkit Molecule
Contains more metadata.
ligand_xyz: np.ndarray
Of shape (N_ligand_atoms, 3)
ligand: Rdkit Molecule
Contains more metadata
pairwise_distances: np.ndarray
Array of pairwise protein-ligand distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration.
"""
features_dict = {}
if pairwise_distances is None:
pairwise_distances = compute_pairwise_distances(protein_xyz, ligand_xyz)
contacts = np.nonzero((pairwise_distances < cutoff))
protein_atoms = set([int(c) for c in contacts[0].tolist()])
protein_sybyl_dict = compute_all_sybyl(protein, indices=protein_atoms)
ligand_sybyl_dict = compute_all_sybyl(ligand)
return (protein_sybyl_dict, ligand_sybyl_dict)
def compute_splif_features_in_range(protein,
ligand,
pairwise_distances,
contact_bin,
ecfp_degree=2):
"""Computes SPLIF features for protein atoms close to ligand atoms.
Finds all protein atoms that are > contact_bin[0] and < contact_bin[1] away
from ligand atoms. Then, finds the ECFP fingerprints for the contacting
atoms. Returns a dictionary mapping (protein_index_i, ligand_index_j) -->
(protein_ecfp_i, ligand_ecfp_j)
"""
contacts = np.nonzero((pairwise_distances > contact_bin[0]) &
(pairwise_distances < contact_bin[1]))
protein_atoms = set([int(c) for c in contacts[0].tolist()])
contacts = zip(contacts[0], contacts[1])
protein_ecfp_dict = compute_all_ecfp(
protein, indices=protein_atoms, degree=ecfp_degree)
ligand_ecfp_dict = compute_all_ecfp(ligand, degree=ecfp_degree)
splif_dict = {
contact: (protein_ecfp_dict[contact[0]], ligand_ecfp_dict[contact[1]])
for contact in contacts
}
return (splif_dict)
def featurize_splif(protein_xyz, protein, ligand_xyz, ligand, contact_bins,
pairwise_distances, ecfp_degree):
"""Computes SPLIF featurization of protein-ligand binding pocket.
For each contact range (i.e. 1 A to 2 A, 2 A to 3 A, etc.) compute a
dictionary mapping (protein_index_i, ligand_index_j) tuples -->
(protein_ecfp_i, ligand_ecfp_j) tuples. Return a list of such splif
dictionaries.
"""
splif_dicts = []
for i, contact_bin in enumerate(contact_bins):
splif_dicts.append(
compute_splif_features_in_range(protein, ligand, pairwise_distances,
contact_bin, ecfp_degree))
return (splif_dicts)
def compute_ring_center(mol, ring_indices):
"""Computes 3D coordinates of a center of a given ring.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
ring_indices: array-like
Indices of atoms forming a ring
Returns:
--------
ring_centroid: np.ndarray
Position of a ring center
"""
conformer = mol.GetConformer()
ring_xyz = np.zeros((len(ring_indices), 3))
for i, atom_idx in enumerate(ring_indices):
atom_position = conformer.GetAtomPosition(atom_idx)
ring_xyz[i] = np.array(atom_position)
ring_centroid = compute_centroid(ring_xyz)
return ring_centroid
def compute_ring_normal(mol, ring_indices):
"""Computes normal to a plane determined by a given ring.
Parameters:
-----------
mol: rdkit.rdchem.Mol
Molecule containing a ring
ring_indices: array-like
Indices of atoms forming a ring
Returns:
--------
normal: np.ndarray
Normal vector
"""
conformer = mol.GetConformer()
points = np.zeros((3, 3))
for i, atom_idx in enumerate(ring_indices[:3]):
atom_position = conformer.GetAtomPosition(atom_idx)
points[i] = np.array(atom_position)
v1 = points[1] - points[0]
v2 = points[2] - points[0]
normal = np.cross(v1, v2)
return normal
def is_pi_parallel(ring1_center,
ring1_normal,
ring2_center,
ring2_normal,
dist_cutoff=8.0,
angle_cutoff=30.0):
"""Check if two aromatic rings form a parallel pi-pi contact.
Parameters:
-----------
ring1_center, ring2_center: np.ndarray
Positions of centers of the two rings. Can be computed with the
compute_ring_center function.
ring1_normal, ring2_normal: np.ndarray
Normals of the two rings. Can be computed with the compute_ring_normal
function.
dist_cutoff: float
Distance cutoff. Max allowed distance between the ring center (Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal (0deg) angle between
the rings (in degrees).
"""
dist = np.linalg.norm(ring1_center - ring2_center)
angle = angle_between(ring1_normal, ring2_normal) * 180 / np.pi
if ((angle < angle_cutoff or angle > 180.0 - angle_cutoff) and
dist < dist_cutoff):
return True
return False
def is_pi_t(ring1_center,
ring1_normal,
ring2_center,
ring2_normal,
dist_cutoff=5.5,
angle_cutoff=30.0):
"""Check if two aromatic rings form a T-shaped pi-pi contact.
Parameters:
-----------
ring1_center, ring2_center: np.ndarray
Positions of centers of the two rings. Can be computed with the
compute_ring_center function.
ring1_normal, ring2_normal: np.ndarray
Normals of the two rings. Can be computed with the compute_ring_normal
function.
dist_cutoff: float
Distance cutoff. Max allowed distance between the ring center (Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal (90deg) angle between
the rings (in degrees).
"""
dist = np.linalg.norm(ring1_center - ring2_center)
angle = angle_between(ring1_normal, ring2_normal) * 180 / np.pi
if ((90.0 - angle_cutoff < angle < 90.0 + angle_cutoff) and
dist < dist_cutoff):
return True
return False
def compute_pi_stack(protein,
ligand,
pairwise_distances=None,
dist_cutoff=4.4,
angle_cutoff=30.):
"""Find aromatic rings in protein and ligand that form pi-pi contacts.
For each atom in the contact, count number of atoms in the other molecule
that form this contact.
Pseudocode:
for each aromatic ring in protein:
for each aromatic ring in ligand:
compute distance between centers
compute angle between normals
if it counts as parallel pi-pi:
count interacting atoms
if it counts as pi-T:
count interacting atoms
Parameters:
-----------
protein, ligand: rdkit.rdchem.Mol
Two interacting molecules.
pairwise_distances: np.ndarray (optional)
Array of pairwise protein-ligand distances (Angstroms)
dist_cutoff: float
Distance cutoff. Max allowed distance between the ring center (Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal angle between rings.
Returns:
--------
protein_pi_t, protein_pi_parallel, ligand_pi_t, ligand_pi_parallel: dict
Dictionaries mapping atom indices to number of atoms they interact with.
Separate dictionary is created for each type of pi stacking (parallel and
T-shaped) and each molecule (protein and ligand).
"""
protein_pi_parallel = Counter()
protein_pi_t = Counter()
ligand_pi_parallel = Counter()
ligand_pi_t = Counter()
protein_aromatic_rings = []
ligand_aromatic_rings = []
for mol, ring_list in ((protein, protein_aromatic_rings),
(ligand, ligand_aromatic_rings)):
aromatic_atoms = {atom.GetIdx() for atom in mol.GetAromaticAtoms()}
for ring in Chem.GetSymmSSSR(mol):
# if ring is aromatic
if set(ring).issubset(aromatic_atoms):
# save its indices, center, and normal
ring_center = compute_ring_center(mol, ring)
ring_normal = compute_ring_normal(mol, ring)
ring_list.append((ring, ring_center, ring_normal))
# remember protein-ligand pairs we already counted
counted_pairs_parallel = set()
counted_pairs_t = set()
for prot_ring, prot_ring_center, prot_ring_normal in protein_aromatic_rings:
for lig_ring, lig_ring_center, lig_ring_normal in ligand_aromatic_rings:
if is_pi_parallel(
prot_ring_center,
prot_ring_normal,
lig_ring_center,
lig_ring_normal,
angle_cutoff=angle_cutoff,
dist_cutoff=dist_cutoff):
prot_to_update = set()
lig_to_update = set()
for prot_atom_idx in prot_ring:
for lig_atom_idx in lig_ring:
if (prot_atom_idx, lig_atom_idx) not in counted_pairs_parallel:
# if this pair is new, count atoms forming a contact
prot_to_update.add(prot_atom_idx)
lig_to_update.add(lig_atom_idx)
counted_pairs_parallel.add((prot_atom_idx, lig_atom_idx))
protein_pi_parallel.update(prot_to_update)
ligand_pi_parallel.update(lig_to_update)
if is_pi_t(
prot_ring_center,
prot_ring_normal,
lig_ring_center,
lig_ring_normal,
angle_cutoff=angle_cutoff,
dist_cutoff=dist_cutoff):
prot_to_update = set()
lig_to_update = set()
for prot_atom_idx in prot_ring:
for lig_atom_idx in lig_ring:
if (prot_atom_idx, lig_atom_idx) not in counted_pairs_t:
# if this pair is new, count atoms forming a contact
prot_to_update.add(prot_atom_idx)
lig_to_update.add(lig_atom_idx)
counted_pairs_t.add((prot_atom_idx, lig_atom_idx))
protein_pi_t.update(prot_to_update)
ligand_pi_t.update(lig_to_update)
return (protein_pi_t, protein_pi_parallel, ligand_pi_t, ligand_pi_parallel)
def is_cation_pi(cation_position,
ring_center,
ring_normal,
dist_cutoff=6.5,
angle_cutoff=30.0):
"""Check if a cation and an aromatic ring form contact.
Parameters:
-----------
ring_center: np.ndarray
Positions of ring center. Can be computed with the compute_ring_center
function.
ring_normal: np.ndarray
Normal of ring. Can be computed with the compute_ring_normal function.
dist_cutoff: float
Distance cutoff. Max allowed distance between ring center and cation
(in Angstroms).
angle_cutoff: float
Angle cutoff. Max allowed deviation from the ideal (0deg) angle between
ring normal and vector pointing from ring center to cation (in degrees).
"""
cation_to_ring_vec = cation_position - ring_center
dist = np.linalg.norm(cation_to_ring_vec)
angle = angle_between(cation_to_ring_vec, ring_normal) * 180. / np.pi
if ((angle < angle_cutoff or angle > 180.0 - angle_cutoff) and
(dist < dist_cutoff)):
return True
return False
def compute_cation_pi(mol1, mol2, charge_tolerance=0.01, **kwargs):
"""Finds aromatic rings in mo1 and cations in mol2 that interact with each
other.
Parameters:
-----------
mol1: rdkit.rdchem.Mol
Molecule to look for interacting rings
mol2: rdkit.rdchem.Mol
Molecule to look for interacting cations
charge_tolerance: float
Atom is considered a cation if its formal charge is greater than
1 - charge_tolerance
**kwargs:
Arguments that are passed to is_cation_pi function
Returns:
--------
mol1_pi: dict
Dictionary that maps atom indices (from mol1) to the number of cations
(in mol2) they interact with
mol2_cation: dict
Dictionary that maps atom indices (from mol2) to the number of aromatic
atoms (in mol1) they interact with
"""
mol1_pi = Counter()
mol2_cation = Counter()
conformer = mol2.GetConformer()
aromatic_atoms = set(atom.GetIdx() for atom in mol1.GetAromaticAtoms())
rings = [list(r) for r in Chem.GetSymmSSSR(mol1)]
for ring in rings:
# if ring from mol1 is aromatic
if set(ring).issubset(aromatic_atoms):
ring_center = compute_ring_center(mol1, ring)
ring_normal = compute_ring_normal(mol1, ring)
for atom in mol2.GetAtoms():
# ...and atom from mol2 is a cation
if atom.GetFormalCharge() > 1.0 - charge_tolerance:
cation_position = np.array(conformer.GetAtomPosition(atom.GetIdx()))
# if angle and distance are correct
if is_cation_pi(cation_position, ring_center, ring_normal, **kwargs):
# count atoms forming a contact
mol1_pi.update(ring)
mol2_cation.update([atom.GetIndex()])
return mol1_pi, mol2_cation
def compute_binding_pocket_cation_pi(protein, ligand, **kwargs):
"""Finds cation-pi interactions between protein and ligand.
Parameters:
-----------
protein, ligand: rdkit.rdchem.Mol
Interacting molecules
**kwargs:
Arguments that are passed to compute_cation_pi function
Returns:
--------
protein_cation_pi, ligand_cation_pi: dict
Dictionaries that maps atom indices to the number of cations/aromatic
atoms they interact with
"""
# find interacting rings from protein and cations from ligand
protein_pi, ligand_cation = compute_cation_pi(protein, ligand, **kwargs)
# find interacting cations from protein and rings from ligand
ligand_pi, protein_cation = compute_cation_pi(ligand, protein, **kwargs)
# merge counters
protein_cation_pi = Counter()
protein_cation_pi.update(protein_pi)
protein_cation_pi.update(protein_cation)
ligand_cation_pi = Counter()
ligand_cation_pi.update(ligand_pi)
ligand_cation_pi.update(ligand_cation)
return protein_cation_pi, ligand_cation_pi
def get_partial_charge(atom):
"""Get partial charge of a given atom (rdkit Atom object)"""
try:
value = atom.GetProp(str("_GasteigerCharge"))
if value == '-nan':
return 0
return float(value)
except KeyError:
return 0
def get_formal_charge(atom):
warn('get_formal_charge function is deprecated and will be removed'
' in version 1.4, use get_partial_charge instead', DeprecationWarning)
return get_partial_charge(atom)
def is_salt_bridge(atom_i, atom_j):
"""Check if two atoms have correct charges to form a salt bridge"""
if np.abs(2.0 - np.abs(
get_partial_charge(atom_i) - get_partial_charge(atom_j))) < 0.01:
return True
return False
def compute_salt_bridges(protein_xyz,
protein,
ligand_xyz,
ligand,
pairwise_distances,
cutoff=5.0):
"""Find salt bridge contacts between protein and lingand.
Parameters:
-----------
protein_xyz, ligand_xyz: np.ndarray
Arrays with atomic coordinates
protein, ligand: rdkit.rdchem.Mol
Interacting molecules
pairwise_distances: np.ndarray
Array of pairwise protein-ligand distances (Angstroms)
cutoff: float
Cutoff distance for contact consideration
Returns:
--------
salt_bridge_contacts: list of tuples
List of contacts. Tuple (i, j) indicates that atom i from protein
interacts with atom j from ligand.
"""
salt_bridge_contacts = []
contacts = np.nonzero(pairwise_distances < cutoff)
contacts = zip(contacts[0], contacts[1])
for contact in contacts:
protein_atom = protein.GetAtoms()[int(contact[0])]
ligand_atom = ligand.GetAtoms()[int(contact[1])]
if is_salt_bridge(protein_atom, ligand_atom):
salt_bridge_contacts.append(contact)
return salt_bridge_contacts
def is_angle_within_cutoff(vector_i, vector_j, hbond_angle_cutoff):
angle = angle_between(vector_i, vector_j) * 180. / np.pi
return (angle > (180 - hbond_angle_cutoff) and
angle < (180. + hbond_angle_cutoff))
def is_hydrogen_bond(protein_xyz, protein, ligand_xyz, ligand, contact,
hbond_angle_cutoff):
"""
Determine if a pair of atoms (contact = tuple of protein_atom_index, ligand_atom_index)
between protein and ligand represents a hydrogen bond. Returns a boolean result.
"""
# TODO(LESWING)
return False
def compute_hbonds_in_range(protein, protein_xyz, ligand, ligand_xyz,
pairwise_distances, hbond_dist_bin,
hbond_angle_cutoff):
"""
Find all pairs of (protein_index_i, ligand_index_j) that hydrogen bond given
a distance bin and an angle cutoff.
"""
contacts = np.nonzero((pairwise_distances > hbond_dist_bin[0]) &
(pairwise_distances < hbond_dist_bin[1]))
contacts = zip(contacts[0], contacts[1])
hydrogen_bond_contacts = []
for contact in contacts:
if is_hydrogen_bond(protein_xyz, protein, ligand_xyz, ligand, contact,
hbond_angle_cutoff):
hydrogen_bond_contacts.append(contact)
return hydrogen_bond_contacts
def compute_hydrogen_bonds(protein_xyz, protein, ligand_xyz, ligand,
pairwise_distances, hbond_dist_bins,
hbond_angle_cutoffs):
"""Computes hydrogen bonds between proteins and ligands.
Returns a list of sublists. Each sublist is a series of tuples of
(protein_index_i, ligand_index_j) that represent a hydrogen bond. Each sublist
represents a different type of hydrogen bond.
"""
hbond_contacts = []
for i, hbond_dist_bin in enumerate(hbond_dist_bins):
hbond_angle_cutoff = hbond_angle_cutoffs[i]
hbond_contacts.append(
compute_hbonds_in_range(protein, protein_xyz, ligand, ligand_xyz,
pairwise_distances, hbond_dist_bin,
hbond_angle_cutoff))
return (hbond_contacts)
def convert_atom_to_voxel(molecule_xyz,
atom_index,
box_width,
voxel_width,
verbose=False):
"""Converts atom coordinates to an i,j,k grid index.
Parameters:
-----------
molecule_xyz: np.ndarray
Array with coordinates of all atoms in the molecule, shape (N, 3)
atom_index: int
Index of an atom
box_width: float
Size of a box
voxel_width: float
Size of a voxel
verbose: bool
Print warnings when atom is outside of a box
"""
indices = np.floor(
(molecule_xyz[atom_index] + box_width / 2.0) / voxel_width).astype(int)
if ((indices < 0) | (indices >= box_width / voxel_width)).any():
if verbose:
warn('Coordinates are outside of the box (atom id = %s,'
' coords xyz = %s, coords in box = %s' %
(atom_index, molecule_xyz[atom_index], indices))
return ([indices])
def convert_atom_pair_to_voxel(molecule_xyz_tuple, atom_index_pair, box_width,
voxel_width):
"""Converts a pair of atoms to a list of i,j,k tuples."""
indices_list = []
indices_list.append(
convert_atom_to_voxel(molecule_xyz_tuple[0], atom_index_pair[0],
box_width, voxel_width)[0])
indices_list.append(
convert_atom_to_voxel(molecule_xyz_tuple[1], atom_index_pair[1],
box_width, voxel_width)[0])
return (indices_list)
def compute_charge_dictionary(molecule):
"""Create a dictionary with partial charges for each atom in the molecule.
This function assumes that the charges for the molecule are already
computed (it can be done with rdkit_util.compute_charges(molecule))
"""
charge_dictionary = {}
for i, atom in enumerate(molecule.GetAtoms()):
charge_dictionary[i] = get_partial_charge(atom)
return charge_dictionary
def subtract_centroid(xyz, centroid):
"""Subtracts centroid from each coordinate.
Subtracts the centroid, a numpy array of dim 3, from all coordinates of all
atoms in the molecule
"""
xyz -= np.transpose(centroid)
return (xyz)
class RdkitGridFeaturizer(ComplexFeaturizer):
"""Featurizes protein-ligand complex using flat features or a 3D grid (in which
each voxel is described with a vector of features).
"""
def __init__(self,
nb_rotations=0,
feature_types=None,
ecfp_degree=2,
ecfp_power=3,
splif_power=3,
box_width=16.0,
voxel_width=1.0,
flatten=False,
verbose=True,
sanitize=False,
**kwargs):
"""Parameters:
-----------
nb_rotations: int, optional (default 0)
Number of additional random rotations of a complex to generate.
feature_types: list, optional (default ['ecfp'])
Types of features to calculate. Available types are:
flat features: 'ecfp_ligand', 'ecfp_hashed', 'splif_hashed', 'hbond_count'
voxel features: 'ecfp', 'splif', 'sybyl', 'salt_bridge', 'charge', 'hbond',
'pi_stack, 'cation_pi'
There are also 3 predefined sets of features: 'flat_combined',
'voxel_combined', and 'all_combined'. Calculated features are concatenated
and their order is preserved (features in predefined sets are in
alphabetical order).
ecfp_degree: int, optional (default 2)
ECFP radius.
ecfp_power: int, optional (default 3)
Number of bits to store ECFP features (resulting vector will be
2^ecfp_power long)
splif_power: int, optional (default 3)
Number of bits to store SPLIF features (resulting vector will be
2^splif_power long)
box_width: float, optional (default 16.0)
Size of a box in which voxel features are calculated. Box is centered on a
ligand centroid.
voxel_width: float, optional (default 1.0)
Size of a 3D voxel in a grid.
flatten: bool, optional (defaul False)
Indicate whether calculated features should be flattened. Output is always
flattened if flat features are specified in feature_types.
verbose: bool, optional (defaul True)
Verbolity for logging
sanitize: bool, optional (defaul False)
If set to True molecules will be sanitized. Note that calculating some
features (e.g. aromatic interactions) require sanitized molecules.
**kwargs: dict, optional
Keyword arguments can be usaed to specify custom cutoffs and bins (see
default values below).
Default cutoffs and bins:
-------------------------
hbond_dist_bins: [(2.2, 2.5), (2.5, 3.2), (3.2, 4.0)]
hbond_angle_cutoffs: [5, 50, 90]
splif_contact_bins: [(0, 2.0), (2.0, 3.0), (3.0, 4.5)]
ecfp_cutoff: 4.5
sybyl_cutoff: 7.0
salt_bridges_cutoff: 5.0
pi_stack_dist_cutoff: 4.4
pi_stack_angle_cutoff: 30.0
cation_pi_dist_cutoff: 6.5
cation_pi_angle_cutoff: 30.0
"""
# check if user tries to set removed arguments
deprecated_args = [
'box_x', 'box_y', 'box_z', 'save_intermediates', 'voxelize_features',
'parallel', 'voxel_feature_types'
]
# list of features that require sanitized molecules
require_sanitized = ['pi_stack', 'cation_pi', 'ecfp_ligand']
# not implemented featurization types
not_implemented = ['sybyl']
for arg in deprecated_args:
if arg in kwargs and verbose:
warn('%s argument was removed and it is ignored,'
' using it will result in error in version 1.4' % arg,
DeprecationWarning)
self.verbose = verbose
self.sanitize = sanitize
self.flatten = flatten
self.ecfp_degree = ecfp_degree
self.ecfp_power = ecfp_power
self.splif_power = splif_power
self.nb_rotations = nb_rotations
# default values
self.cutoffs = {
'hbond_dist_bins': [(2.2, 2.5), (2.5, 3.2), (3.2, 4.0)],
'hbond_angle_cutoffs': [5, 50, 90],
'splif_contact_bins': [(0, 2.0), (2.0, 3.0), (3.0, 4.5)],
'ecfp_cutoff': 4.5,
'sybyl_cutoff': 7.0,
'salt_bridges_cutoff': 5.0,
'pi_stack_dist_cutoff': 4.4,
'pi_stack_angle_cutoff': 30.0,
'cation_pi_dist_cutoff': 6.5,
'cation_pi_angle_cutoff': 30.0,
}
# update with cutoffs specified by the user
for arg, value in kwargs.items():
if arg in self.cutoffs:
self.cutoffs[arg] = value
self.box_width = float(box_width)
self.voxel_width = float(voxel_width)
self.voxels_per_edge = int(self.box_width / self.voxel_width)
self.sybyl_types = [
"C3", "C2", "C1", "Cac", "Car", "N3", "N3+", "Npl", "N2", "N1", "Ng+",
"Nox", "Nar", "Ntr", "Nam", "Npl3", "N4", "O3", "O-", "O2", "O.co2",
"O.spc", "O.t3p", "S3", "S3+", "S2", "So2", "Sox"
"Sac"
"SO", "P3", "P", "P3+", "F", "Cl", "Br", "I"
]
# define methods to calculate available flat features
# all methods (flat and voxel) must have the same API:
# f(prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances) -> list of np.ndarrays
self.FLAT_FEATURES = {
'ecfp_ligand': lambda prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances:
[compute_ecfp_features(
lig_rdk,
self.ecfp_degree,
self.ecfp_power)],
'ecfp_hashed': lambda prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances:
[self._vectorize(
hash_ecfp,
feature_dict=ecfp_dict,
channel_power=self.ecfp_power
) for ecfp_dict in featurize_binding_pocket_ecfp(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
distances,
cutoff=self.cutoffs['ecfp_cutoff'],
ecfp_degree=self.ecfp_degree)],
'splif_hashed': lambda prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances:
[self._vectorize(
hash_ecfp_pair,
feature_dict=splif_dict,
channel_power=self.splif_power
) for splif_dict in featurize_splif(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
self.cutoffs['splif_contact_bins'],
distances,
self.ecfp_degree)],
'hbond_count': lambda prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances:
[self._vectorize(
hash_ecfp_pair,
feature_list=hbond_list,
channel_power=0
) for hbond_list in compute_hydrogen_bonds(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
distances,
self.cutoffs['hbond_dist_bins'],
self.cutoffs['hbond_angle_cutoffs'])]
}
def voxelize_pi_stack(prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances):
protein_pi_t, protein_pi_parallel, ligand_pi_t, ligand_pi_parallel = (
compute_pi_stack(
prot_rdk,
lig_rdk,
distances,
dist_cutoff=self.cutoffs['pi_stack_dist_cutoff'],
angle_cutoff=self.cutoffs['pi_stack_angle_cutoff']))
pi_parallel_tensor = self._voxelize(
convert_atom_to_voxel,
None,
prot_xyz,
feature_dict=protein_pi_parallel,
nb_channel=1)
pi_parallel_tensor += self._voxelize(
convert_atom_to_voxel,
None,
lig_xyz,
feature_dict=ligand_pi_parallel,
nb_channel=1)
pi_t_tensor = self._voxelize(
convert_atom_to_voxel,
None,
prot_xyz,
feature_dict=protein_pi_t,
nb_channel=1)
pi_t_tensor += self._voxelize(
convert_atom_to_voxel,
None,
lig_xyz,
feature_dict=ligand_pi_t,
nb_channel=1)
return [pi_parallel_tensor, pi_t_tensor]
# define methods to calculate available voxel features
self.VOXEL_FEATURES = {
'ecfp': lambda prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances:
[sum([self._voxelize(
convert_atom_to_voxel,
hash_ecfp,
xyz,
feature_dict=ecfp_dict,
channel_power=self.ecfp_power
) for xyz, ecfp_dict in zip(
(prot_xyz, lig_xyz), featurize_binding_pocket_ecfp(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
distances,
cutoff=self.cutoffs['ecfp_cutoff'],
ecfp_degree=self.ecfp_degree
))])],
'splif': lambda prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances:
[self._voxelize(
convert_atom_pair_to_voxel,
hash_ecfp_pair,
(prot_xyz, lig_xyz),
feature_dict=splif_dict,
channel_power=self.splif_power
) for splif_dict in featurize_splif(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
self.cutoffs['splif_contact_bins'],
distances,
self.ecfp_degree)],
'sybyl': lambda prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances:
[self._voxelize(
convert_atom_to_voxel,
lambda x: hash_sybyl(x, sybyl_types=self.sybyl_types),
xyz,
feature_dict=sybyl_dict,
nb_channel=len(self.sybyl_types)
) for xyz, sybyl_dict in zip(
(prot_xyz, lig_xyz), featurize_binding_pocket_sybyl(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
distances,
cutoff=self.cutoffs['sybyl_cutoff']
))],
'salt_bridge': lambda prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances:
[self._voxelize(
convert_atom_pair_to_voxel,
None,
(prot_xyz, lig_xyz),
feature_list=compute_salt_bridges(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
distances,
cutoff=self.cutoffs['salt_bridges_cutoff']),
nb_channel=1
)],
'charge': lambda prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances:
[sum([self._voxelize(
convert_atom_to_voxel,
None,
xyz,
feature_dict=compute_charge_dictionary(mol),
nb_channel=1,
dtype="np.float16"
) for xyz, mol in ((prot_xyz, prot_rdk), (lig_xyz, lig_rdk))])],
'hbond': lambda prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances:
[self._voxelize(
convert_atom_pair_to_voxel,
None,
(prot_xyz, lig_xyz),
feature_list=hbond_list,
channel_power=0
) for hbond_list in compute_hydrogen_bonds(
prot_xyz,
prot_rdk,
lig_xyz,
lig_rdk,
distances,
self.cutoffs['hbond_dist_bins'],
self.cutoffs['hbond_angle_cutoffs'])
],
'pi_stack': voxelize_pi_stack,
'cation_pi': lambda prot_xyz, prot_rdk, lig_xyz, lig_rdk, distances:
[sum([self._voxelize(
convert_atom_to_voxel,
None,
xyz,
feature_dict=cation_pi_dict,
nb_channel=1
) for xyz, cation_pi_dict in zip(
(prot_xyz, lig_xyz), compute_binding_pocket_cation_pi(
prot_rdk,
lig_rdk,
dist_cutoff=self.cutoffs['cation_pi_dist_cutoff'],
angle_cutoff=self.cutoffs['cation_pi_angle_cutoff'],
))])],
}
if feature_types is None:
feature_types = ['ecfp']
# each entry is a tuple (is_flat, feature_name)
self.feature_types = []
# list of features that cannot be calculated with specified parameters
# this list is used to define <flat/voxel/all>_combined subset
ignored_features = []
if self.sanitize is False:
ignored_features += require_sanitized
ignored_features += not_implemented
# parse provided feature types
for feature_type in feature_types:
if self.sanitize is False and feature_type in require_sanitized:
if self.verbose:
warn('sanitize is set to False, %s feature will be ignored' %
feature_type)
continue
if feature_type in not_implemented:
if self.verbose:
warn('%s feature is not implemented yet and will be ignored' %
feature_type)
continue
if feature_type in self.FLAT_FEATURES:
self.feature_types.append((True, feature_type))
if self.flatten is False:
if self.verbose:
warn('%s feature is used, output will be flattened' % feature_type)
self.flatten = True
elif feature_type in self.VOXEL_FEATURES:
self.feature_types.append((False, feature_type))
elif feature_type == 'flat_combined':
self.feature_types += [(True, ftype)
for ftype in sorted(self.FLAT_FEATURES.keys())
if ftype not in ignored_features]
if self.flatten is False:
if self.verbose:
warn('Flat features are used, output will be flattened')
self.flatten = True
elif feature_type == 'voxel_combined':
self.feature_types += [(False, ftype)
for ftype in sorted(self.VOXEL_FEATURES.keys())
if ftype not in ignored_features]
elif feature_type == 'all_combined':
self.feature_types += [(True, ftype)
for ftype in sorted(self.FLAT_FEATURES.keys())
if ftype not in ignored_features]
self.feature_types += [(False, ftype)
for ftype in sorted(self.VOXEL_FEATURES.keys())
if ftype not in ignored_features]
if self.flatten is False:
if self.verbose:
warn('Flat feature are used, output will be flattened')
self.flatten = True
elif self.verbose:
warn('Ignoring unknown feature %s' % feature_type)
def _featurize_complex(self, ligand_ext, ligand_lines, protein_pdb_lines):
tempdir = tempfile.mkdtemp()
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
ligand_file = os.path.join(tempdir, "ligand.%s" % ligand_ext)
with open(ligand_file, "w") as mol_f:
mol_f.writelines(ligand_lines)
############################################################## TIMING
time2 = time.time()
log("TIMING: Writing ligand took %0.3f s" % (time2 - time1), self.verbose)
############################################################## TIMING
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
protein_pdb_file = os.path.join(tempdir, "protein.pdb")
with open(protein_pdb_file, "w") as protein_f:
protein_f.writelines(protein_pdb_lines)
############################################################## TIMING
time2 = time.time()
log("TIMING: Writing protein took %0.3f s" % (time2 - time1), self.verbose)
############################################################## TIMING
features_dict = self._transform(protein_pdb_file, ligand_file)
shutil.rmtree(tempdir)
return features_dict.values()
def featurize_complexes(self, mol_files, protein_pdbs, log_every_n=1000):
"""
Calculate features for mol/protein complexes.
Parameters
----------
mols: list
List of PDB filenames for molecules.
protein_pdbs: list
List of PDB filenames for proteins.
"""
features = []
for i, (mol_file, protein_pdb) in enumerate(zip(mol_files, protein_pdbs)):
if i % log_every_n == 0:
log("Featurizing %d / %d" % (i, len(mol_files)))
ligand_ext = get_ligand_filetype(mol_file)
with open(mol_file) as mol_f:
mol_lines = mol_f.readlines()
with open(protein_pdb) as protein_file:
protein_pdb_lines = protein_file.readlines()
features += self._featurize_complex(ligand_ext, mol_lines,
protein_pdb_lines)
features = np.asarray(features)
return features
def _transform(self, protein_pdb, ligand_file):
"""Computes featurization of protein/ligand complex.
Takes as input files (strings) for pdb of the protein, pdb of the ligand,
and a directory to save intermediate files.
This function then computes the centroid of the ligand; decrements this
centroid from the atomic coordinates of protein and ligand atoms, and then
merges the translated protein and ligand. This combined system/complex is then
saved.
This function then computes a featurization with scheme specified by the user.
"""
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
protein_xyz, protein_rdk = load_molecule(
protein_pdb, calc_charges=True, sanitize=self.sanitize)
############################################################## TIMING
time2 = time.time()
log("TIMING: Loading protein coordinates took %0.3f s" % (time2 - time1),
self.verbose)
############################################################## TIMING
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
ligand_xyz, ligand_rdk = load_molecule(
ligand_file, calc_charges=True, sanitize=self.sanitize)
############################################################## TIMING
time2 = time.time()
log("TIMING: Loading ligand coordinates took %0.3f s" % (time2 - time1),
self.verbose)
############################################################## TIMING
############################################################## TIMING
time1 = time.time()
############################################################## TIMING
centroid = compute_centroid(ligand_xyz)
ligand_xyz = subtract_centroid(ligand_xyz, centroid)
protein_xyz = subtract_centroid(protein_xyz, centroid)
############################################################## TIMING
time2 = time.time()
log("TIMING: Centroid processing took %0.3f s" % (time2 - time1),
self.verbose)
############################################################## TIMING
pairwise_distances = compute_pairwise_distances(protein_xyz, ligand_xyz)
transformed_systems = {}
transformed_systems[(0, 0)] = [protein_xyz, ligand_xyz]
for i in range(self.nb_rotations):
rotated_system = rotate_molecules([protein_xyz, ligand_xyz])
transformed_systems[(i + 1, 0)] = rotated_system
features = {}
for system_id, (protein_xyz, ligand_xyz) in transformed_systems.items():
feature_arrays = []
for is_flat, function_name in self.feature_types:
if is_flat:
function = self.FLAT_FEATURES[function_name]
else:
function = self.VOXEL_FEATURES[function_name]
feature_arrays += function(
protein_xyz,
protein_rdk,
ligand_xyz,
ligand_rdk,
pairwise_distances,
)
if self.flatten:
features[system_id] = np.concatenate(
[feature_array.flatten() for feature_array in feature_arrays])
else:
features[system_id] = np.concatenate(feature_arrays, axis=-1)
return features
def _voxelize(self,
get_voxels,
hash_function,
coordinates,
feature_dict=None,
feature_list=None,
channel_power=None,
nb_channel=16,
dtype="np.int8"):
if channel_power is not None:
if channel_power == 0:
nb_channel = 1
else:
nb_channel = int(2**channel_power)
if dtype == "np.int8":
feature_tensor = np.zeros(
(self.voxels_per_edge, self.voxels_per_edge, self.voxels_per_edge,
nb_channel),
dtype=np.int8)
else:
feature_tensor = np.zeros(
(self.voxels_per_edge, self.voxels_per_edge, self.voxels_per_edge,
nb_channel),
dtype=np.float16)
if feature_dict is not None:
for key, features in feature_dict.items():
voxels = get_voxels(coordinates, key, self.box_width, self.voxel_width)
for voxel in voxels:
if ((voxel >= 0) & (voxel < self.voxels_per_edge)).all():
if hash_function is not None:
feature_tensor[voxel[0], voxel[1], voxel[2],
hash_function(features, channel_power)] += 1.0
else:
feature_tensor[voxel[0], voxel[1], voxel[2], 0] += features
elif feature_list is not None:
for key in feature_list:
voxels = get_voxels(coordinates, key, self.box_width, self.voxel_width)
for voxel in voxels:
if ((voxel >= 0) & (voxel < self.voxels_per_edge)).all():
feature_tensor[voxel[0], voxel[1], voxel[2], 0] += 1.0
return feature_tensor
def _vectorize(self,
hash_function,
feature_dict=None,
feature_list=None,
channel_power=10):
feature_vector = np.zeros(2**channel_power)
if feature_dict is not None:
on_channels = [
hash_function(feature, channel_power)
for key, feature in feature_dict.items()
]
feature_vector[on_channels] += 1
elif feature_list is not None:
feature_vector[0] += len(feature_list)
return feature_vector
|
Agent007/deepchem
|
deepchem/feat/rdkit_grid_featurizer.py
|
Python
|
mit
| 51,675
|
[
"RDKit"
] |
3065817a3d40ba62ffe4979bd3117d1a17756e0170dbf4e9c915c83870648067
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.