text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright 2009 James Hensman and Michael Dewar
# Licensed under the Gnu General Public license, see COPYING
import numpy as np
import sys
sys.path.append('../src')
from pyvb import nodes,Network
def PCA_missing_data(plot=True):
#Principal Component Analysis, with randomly missing data
q = 2 #latent dimension
d = 5 #observation dimension
N = 200
niters = 200
Nmissing = 100
true_W = np.random.randn(d,q)
true_Z = np.random.randn(N,q)
true_mean = np.random.randn(d,1)
true_prec = 20.
Xdata_full = np.dot(true_Z,true_W.T) + true_mean.T
Xdata_observed = Xdata_full + np.random.randn(N,d)*np.sqrt(1./true_prec)
#erase some data
missing_index_i = np.argsort(np.random.randn(N))[:Nmissing]
missing_index_j = np.random.multinomial(1,np.ones(d)/d,Nmissing).nonzero()[1]
Xdata = Xdata_observed.copy()
Xdata[missing_index_i,missing_index_j] = np.nan
#set up the problem...
Ws = [nodes.Gaussian(d,np.zeros((d,1)),np.eye(d)*1e-3) for i in range(q)]
W = nodes.hstack(Ws)
Mu = nodes.Gaussian(d,np.zeros((d,1)),np.eye(d)*1e-3)
Beta = nodes.Gamma(d,1e-3,1e-3)
Zs = [nodes.Gaussian(q,np.zeros((q,1)),np.eye(q)) for i in range(N)]
Xs = [nodes.Gaussian(d,W*z+Mu,Beta) for z in Zs]
[xnode.observe(xval.reshape(d,1)) for xnode,xval in zip(Xs,Xdata)]
#make a network object
net = Network()
net.addnode(W)
net.fetch_network()# automagically fetches all of the other nodes...
#infer!
net.learn(100)
#plot
if plot:
import pylab
import hinton
#compare true and learned W
Qtrue,Rtrue = np.linalg.qr(true_W)
Qlearn,Rlearn = np.linalg.qr(W.pass_down_Ex())
pylab.figure();pylab.title('True W')
hinton.hinton(Qtrue)
pylab.figure();pylab.title('E[W]')
hinton.hinton(Qlearn)
if q==2:#plot the latent variables
pylab.figure();pylab.title('true Z')
pylab.scatter(true_Z[:,0],true_Z[:,1],50,true_Z[:,0])
pylab.figure();pylab.title('learned Z')
learned_Z = np.hstack([z.pass_down_Ex() for z in Zs]).T
pylab.scatter(learned_Z[:,0],learned_Z[:,1],50,true_Z[:,0])
#recovered X mean
X_rec = np.hstack([x.pass_down_Ex() for x in Xs]).T
#Recovered X Variance
#slight hack here - set q variance of observed nodes to zeros (it should be random...)
for x in Xs:
if x.observed:
x.qcov *=0
var_rec = np.vstack([np.diag(x.qcov) for x in Xs]) + 1./np.diag(Beta.pass_down_Ex())
#plot each recovered signal in a separate figure
for i in range(d):
pylab.figure();pylab.title('recovered_signal '+str(i))
pylab.plot(Xdata_full[:,i],'g',marker='.',label='True') # 'true' values of missing data (without noise)
pylab.plot(X_rec[:,i],'b',label='Recovered') # recovered mising data values
pylab.plot(Xdata[:,i],'k',marker='o',linewidth=2,label='Observed') # with noise, and holes where we took out values
pylab.legend()
volume_x = np.hstack((np.arange(len(Xs)),np.arange(len(Xs))[::-1]))
volume_y = np.hstack((X_rec[:,i]+2*np.sqrt(var_rec[:,i]), X_rec[:,i][::-1]-2*np.sqrt(var_rec[:,i])[::-1]))
pylab.fill(volume_x,volume_y,'b',alpha=0.3)
print '\nBeta'
print true_prec,Beta.pass_down_Ex()[0,0]
print '\nMu'
print np.hstack((true_mean,Mu.pass_down_Ex()))
pylab.show()
if __name__=='__main__':
PCA_missing_data(True)
|
jameshensman/pyvb
|
examples/PCA_missing_data.py
|
Python
|
gpl-3.0
| 3,264
|
[
"Gaussian"
] |
905b65c0c64a26bb697c65d26f7a0de86df1c75c0dcf8aa18a9de286ff01540e
|
import chardet
import codecs
import os
import re
class Clippings(object):
def __init__(self, source, dest=None):
'''
Prepares the import and store it into the 'clippings' dict.
'''
self.source = source
self.dest = self._get_default_dest() if dest is None else dest
self.book_author_couples = ()
self.clippings = []
self._fetch()
def _get_default_dest(self):
'''
When no destination is specified, output to <InputFilename>.html
'''
source_full_path = os.path.realpath(self.source)
dirname, filename_with_ext = os.path.split(source_full_path)
filename = os.path.splitext(filename_with_ext)[0]
default_destination = os.path.join(dirname, filename + '.html')
return default_destination
def _fetch(self):
'''
Imports clippings and book_author_couples from the source file
'''
clippings = ClippingsIterator(self.source)
for clipping in clippings:
# Only highlights
if clipping["type"] == "Highlight":
self.clippings.append(clipping)
# will be useful in the HTML to group by book/author
self.book_author_couples = set((clipping['book'], clipping['author'])
for clipping in self.clippings)
def export_to_html(self):
'''
Output the clippings dict to HTML, using a Jinja2 template
'''
from jinja2 import Environment, PackageLoader # available from pip
env = Environment(loader=PackageLoader('whoarder', 'templates'),
autoescape=True,
extensions=['jinja2.ext.autoescape'])
template = env.get_template('template1.html')
render = template.render(clippings=self.clippings,
book_author_couples=self.book_author_couples)
with open(self.dest, mode='w', encoding='utf-8') as output:
output.write(render)
class ClippingsIterator(object):
'''
Iterator that abstracts the Kindle format and spits a dict per clipping.
A 'clipping' can be either a Highlight or a Note, and is (as far as I
know, on my Kindle) a succession of five lines (see ex. and regexes below):
- Lines 1 & 2 contain metadata
- Line 3 is empty
- Line 4 is the clipping
- Line 5 is the separator
Example:
<book> (<author_last_name>, <author_first_name>)
- Your <type> on Page <page> | Location <locs>-<loce> | Added on <date>
<contents>
==========
'''
_clipping_separator = '==========\n'
_clipping_line1 = re.compile(r'''
^(?P<book>.*) # Le Petit Prince
\ \((?P<author>.*)\)$ # (De Saint-Exupery, Antoine)
''', re.VERBOSE | re.IGNORECASE)
_clipping_line2 = re.compile(r'''
^-\ Your\ (?P<type>\w*) # Your Highlight
\ (?:on\ )?(?P<page>Unnumbered\ Page|Page\ .*) # on Page 42
\ \|\ (?:on\ )?Location\ (?P<location>.*) # | Location 123-321
\ \|\ Added\ on\ (?P<date>.*)$ # | Added on...
''', re.VERBOSE | re.IGNORECASE)
def __init__(self, source):
detected_encoding = _detect_encoding(source)
self.source_file = open(source, mode='r', encoding=detected_encoding)
def __iter__(self):
return self
def __next__(self):
clipping_buffer = []
count = 1
while True:
if count > 5:
raise InvalidFormatException('''Input file doesn't seem to be
a clippings file, separators are missing or damaged''')
if self.source_file.closed:
raise StopIteration
line = self.source_file.readline()
if not line:
self.source_file.close()
raise StopIteration
elif line != self._clipping_separator:
# Kindle writes a FEFF BOM at the start of each clipping
# (i.e. every 6 lines), which is clearly wrong. We strip it.
if line[0] == "\ufeff":
line = line.replace("\ufeff", "")
clipping_buffer.append(line.strip())
count += 1
else:
break
if 'Page' not in clipping_buffer[1] and 'page' not in clipping_buffer[1]:
clipping_buffer[1] = re.sub(r'(- Your .*?) (.*)',
r'\1 on Unnumbered Page | \2',
clipping_buffer[1])
try:
line_dict = self._clipping_line1.search(clipping_buffer[0]).groupdict()
line_dic2 = self._clipping_line2.search(clipping_buffer[1]).groupdict()
line_dict.update(line_dic2)
line_dict['contents'] = clipping_buffer[3]
return line_dict
except AttributeError:
print("Failed to import the following note, please report to https://github.com/ronjouch/whoarder :\n {0}\n".format(clipping_buffer))
return self.__next__()
def _detect_encoding(source):
'''
Returns the encoding of the source file, using chardet.
'''
rawdata = open(source, "rb").read()
# chardet detects UTF-8 with BOM as 'UTF-8' (I don't know why), i.e.
# fails to notify us about the BOM, resulting in a string prepended
# with \ufeff, so we manually detect and set the utf-8-sig encoding
if rawdata.startswith(codecs.BOM_UTF8):
detected_encoding = 'utf-8-sig'
else:
result = chardet.detect(rawdata)
detected_encoding = result['encoding']
return detected_encoding
class InvalidFormatException(Exception):
pass
|
rejuvyesh/whoarder
|
whoarder/clippings.py
|
Python
|
mit
| 5,790
|
[
"FEFF"
] |
ff2507a623511705aa53c775aadef2c2c218583d620aeaa50b9425e9a85ca30e
|
#!/usr/bin/env vpython
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper for running gpu integration tests on Fuchsia devices."""
import argparse
import logging
import os
import shutil
import subprocess
import sys
import tempfile
import time
from gpu_tests import path_util
sys.path.insert(0,
os.path.join(path_util.GetChromiumSrcDir(), 'build', 'fuchsia'))
from common_args import (AddCommonArgs, ConfigureLogging,
GetDeploymentTargetForArgs)
from symbolizer import RunSymbolizer
def main():
parser = argparse.ArgumentParser()
AddCommonArgs(parser)
args, gpu_test_args = parser.parse_known_args()
ConfigureLogging(args)
# If output directory is not set, assume the script is being launched
# from the output directory.
if not args.output_directory:
args.output_directory = os.getcwd()
# Create a temporary log file that Telemetry will look to use to build
# an artifact when tests fail.
temp_log_file = False
if not args.system_log_file:
args.system_log_file = os.path.join(tempfile.mkdtemp(), 'system-log')
temp_log_file = True
package_names = ['web_engine', 'web_engine_shell']
web_engine_dir = os.path.join(args.output_directory, 'gen', 'fuchsia',
'engine')
gpu_script = [
os.path.join(path_util.GetChromiumSrcDir(), 'content', 'test', 'gpu',
'run_gpu_integration_test.py')
]
# Pass all other arguments to the gpu integration tests.
gpu_script.extend(gpu_test_args)
try:
with GetDeploymentTargetForArgs(args) as target:
target.Start()
_, fuchsia_ssh_port = target._GetEndpoint()
gpu_script.extend(['--fuchsia-ssh-config-dir', args.output_directory])
gpu_script.extend(['--fuchsia-ssh-port', str(fuchsia_ssh_port)])
gpu_script.extend(['--fuchsia-system-log-file', args.system_log_file])
# Set up logging of WebEngine
listener = target.RunCommandPiped(['log_listener'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
build_ids_paths = map(
lambda package_name: os.path.join(
web_engine_dir, package_name, 'ids.txt'),
package_names)
symbolizer = RunSymbolizer(listener.stdout, open(args.system_log_file,
'w'), build_ids_paths)
# Keep the Amber repository live while the test runs.
with target.GetAmberRepo():
# Install necessary packages on the device.
far_files = map(
lambda package_name: os.path.join(
web_engine_dir, package_name, package_name + '.far'),
package_names)
target.InstallPackage(far_files)
return subprocess.call(gpu_script)
finally:
if temp_log_file:
shutil.rmtree(os.path.dirname(args.system_log_file))
if __name__ == '__main__':
sys.exit(main())
|
endlessm/chromium-browser
|
content/test/gpu/run_gpu_integration_test_fuchsia.py
|
Python
|
bsd-3-clause
| 3,076
|
[
"Amber"
] |
e6399bc79217941d183d3fefc0d9e3576b19e11c1bcbde2d0c13def9bf5cfcba
|
"""
Synfirechain-like example
"""
#!/usr/bin/python
import os
import spynnaker.pyNN as p
import spynnaker_external_devices_plugin.pyNN as q
import numpy, pylab
p.setup(timestep=1.0, min_delay = 1.0, max_delay = 144.0)
nNeurons = 3 # number of neurons in each population
max_delay = 50
cell_params_lif = {'cm' : 0.25, # nF
'i_offset' : 0.0,
'tau_m' : 20.0,
'tau_refrac': 2.0,
'tau_syn_E' : 5.0,
'tau_syn_I' : 5.0,
'v_reset' : -70.0,
'v_rest' : -65.0,
'v_thresh' : -50.0
}
populations = list()
projections = list()
weight_to_spike = 2.0
delay = 3
delays = list()
loopConnections = list()
for i in range(0, nNeurons):
delays.append(float(delay))
singleConnection = (i, ((i + 1) % nNeurons), weight_to_spike, delay)
loopConnections.append(singleConnection)
injectionConnection = [(0, 0, weight_to_spike, 1)]
spikeArray = {'spike_times': [[0]]}
populations.append(p.Population(nNeurons, p.IF_curr_exp, cell_params_lif, label='spikes_out'))
populations.append(p.Population(1, p.SpikeSourceArray, spikeArray, label='inputSpikes_1'))
projections.append(p.Projection(populations[0], populations[0], p.FromListConnector(loopConnections)))
projections.append(p.Projection(populations[1], populations[0], p.FromListConnector(injectionConnection)))
populations[0].record()
q.activate_live_output_for(populations[0])
#populations[0].set_constraint(p.PlacerChipAndCoreConstraint(0,0,2))
#populations[1].set_constraint(p.PlacerChipAndCoreConstraint(0,0,3))
run_time = 10
print "Running for {} ms".format(run_time)
p.run(run_time)
v = None
gsyn = None
spikes = None
spikes = populations[0].getSpikes(compatible_output=True)
print "The number of spikes in pop 0 is", len(spikes)
if spikes is not None:
#print spikes
pylab.figure()
pylab.plot([i[1] for i in spikes], [i[0] for i in spikes], ".")
pylab.ylabel('neuron id')
pylab.xlabel('Time/ms')
pylab.xlim(0,run_time)
pylab.ylim(-1,2)
pylab.title('spikes')
pylab.show()
else:
print "No spikes received"
p.end()
|
svadams/SpinnIOTest
|
examples/live_packet_output_synfire_chain.py
|
Python
|
gpl-3.0
| 2,220
|
[
"NEURON"
] |
2b513f8091d5d7014bfa29656c846fe641dafa6056cc9a4324e6a63529c56809
|
# This file is part of the Minecraft Overviewer.
#
# Minecraft Overviewer is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# Minecraft Overviewer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the Overviewer. If not, see <http://www.gnu.org/licenses/>.
from collections import OrderedDict
import sys
import imp
import os
import os.path
import zipfile
from io import BytesIO
import math
from random import randint
import numpy
from PIL import Image, ImageEnhance, ImageOps, ImageDraw
import logging
import functools
from . import util
BLOCKTEX = "assets/minecraft/textures/block/"
# global variables to collate information in @material decorators
blockmap_generators = {}
known_blocks = set()
used_datas = set()
max_blockid = 0
max_data = 0
transparent_blocks = set()
solid_blocks = set()
fluid_blocks = set()
nospawn_blocks = set()
nodata_blocks = set()
# This is here for circular import reasons.
# Please don't ask, I choose to repress these memories.
# ... okay fine I'll tell you.
# Initialising the C extension requires access to the globals above.
# Due to the circular import, this wouldn't work, unless we reload the
# module in the C extension or just move the import below its dependencies.
from .c_overviewer import alpha_over
class TextureException(Exception):
"To be thrown when a texture is not found."
pass
color_map = ["white", "orange", "magenta", "light_blue", "yellow", "lime", "pink", "gray",
"light_gray", "cyan", "purple", "blue", "brown", "green", "red", "black"]
##
## Textures object
##
class Textures(object):
"""An object that generates a set of block sprites to use while
rendering. It accepts a background color, north direction, and
local textures path.
"""
def __init__(self, texturepath=None, bgcolor=(26, 26, 26, 0), northdirection=0):
self.bgcolor = bgcolor
self.rotation = northdirection
self.find_file_local_path = texturepath
# not yet configurable
self.texture_size = 24
self.texture_dimensions = (self.texture_size, self.texture_size)
# this is set in in generate()
self.generated = False
# see load_image_texture()
self.texture_cache = {}
# once we find a jarfile that contains a texture, we cache the ZipFile object here
self.jars = OrderedDict()
##
## pickle support
##
def __getstate__(self):
# we must get rid of the huge image lists, and other images
attributes = self.__dict__.copy()
for attr in ['blockmap', 'biome_grass_texture', 'watertexture', 'lavatexture', 'firetexture', 'portaltexture', 'lightcolor', 'grasscolor', 'foliagecolor', 'watercolor', 'texture_cache']:
try:
del attributes[attr]
except KeyError:
pass
attributes['jars'] = OrderedDict()
return attributes
def __setstate__(self, attrs):
# regenerate textures, if needed
for attr, val in list(attrs.items()):
setattr(self, attr, val)
self.texture_cache = {}
if self.generated:
self.generate()
##
## The big one: generate()
##
def generate(self):
# Make sure we have the foliage/grasscolor images available
try:
self.load_foliage_color()
self.load_grass_color()
except TextureException as e:
logging.error(
"Your system is missing either assets/minecraft/textures/colormap/foliage.png "
"or assets/minecraft/textures/colormap/grass.png. Either complement your "
"resource pack with these texture files, or install the vanilla Minecraft "
"client to use as a fallback.")
raise e
# generate biome grass mask
self.biome_grass_texture = self.build_block(self.load_image_texture("assets/minecraft/textures/block/grass_block_top.png"), self.load_image_texture("assets/minecraft/textures/block/grass_block_side_overlay.png"))
# generate the blocks
global blockmap_generators
global known_blocks, used_datas
self.blockmap = [None] * max_blockid * max_data
for (blockid, data), texgen in list(blockmap_generators.items()):
tex = texgen(self, blockid, data)
self.blockmap[blockid * max_data + data] = self.generate_texture_tuple(tex)
if self.texture_size != 24:
# rescale biome grass
self.biome_grass_texture = self.biome_grass_texture.resize(self.texture_dimensions, Image.ANTIALIAS)
# rescale the rest
for i, tex in enumerate(blockmap):
if tex is None:
continue
block = tex[0]
scaled_block = block.resize(self.texture_dimensions, Image.ANTIALIAS)
blockmap[i] = self.generate_texture_tuple(scaled_block)
self.generated = True
##
## Helpers for opening textures
##
def find_file(self, filename, mode="rb", verbose=False):
"""Searches for the given file and returns an open handle to it.
This searches the following locations in this order:
* In the directory textures_path given in the initializer if not already open
* In an already open resource pack or client jar file
* In the resource pack given by textures_path
* The program dir (same dir as overviewer.py) for extracted textures
* On Darwin, in /Applications/Minecraft for extracted textures
* Inside a minecraft client jar. Client jars are searched for in the
following location depending on platform:
* On Windows, at %APPDATA%/.minecraft/versions/
* On Darwin, at
$HOME/Library/Application Support/minecraft/versions
* at $HOME/.minecraft/versions/
Only the latest non-snapshot version >1.6 is used
* The overviewer_core/data/textures dir
"""
if verbose: logging.info("Starting search for {0}".format(filename))
# Look for the file is stored in with the overviewer
# installation. We include a few files that aren't included with Minecraft
# textures. This used to be for things such as water and lava, since
# they were generated by the game and not stored as images. Nowdays I
# believe that's not true, but we still have a few files distributed
# with overviewer.
# Do this first so we don't try all .jar files for stuff like "water.png"
programdir = util.get_program_path()
if verbose: logging.info("Looking for texture in overviewer_core/data/textures")
path = os.path.join(programdir, "overviewer_core", "data", "textures", filename)
if os.path.isfile(path):
if verbose: logging.info("Found %s in '%s'", filename, path)
return open(path, mode)
elif hasattr(sys, "frozen") or imp.is_frozen("__main__"):
# windows special case, when the package dir doesn't exist
path = os.path.join(programdir, "textures", filename)
if os.path.isfile(path):
if verbose: logging.info("Found %s in '%s'", filename, path)
return open(path, mode)
# A texture path was given on the command line. Search this location
# for the file first.
if self.find_file_local_path:
if (self.find_file_local_path not in self.jars
and os.path.isfile(self.find_file_local_path)):
# Must be a resource pack. Look for the requested file within
# it.
try:
pack = zipfile.ZipFile(self.find_file_local_path)
# pack.getinfo() will raise KeyError if the file is
# not found.
pack.getinfo(filename)
if verbose: logging.info("Found %s in '%s'", filename,
self.find_file_local_path)
self.jars[self.find_file_local_path] = pack
# ok cool now move this to the start so we pick it first
self.jars.move_to_end(self.find_file_local_path, last=False)
return pack.open(filename)
except (zipfile.BadZipfile, KeyError, IOError):
pass
elif os.path.isdir(self.find_file_local_path):
full_path = os.path.join(self.find_file_local_path, filename)
if os.path.isfile(full_path):
if verbose: logging.info("Found %s in '%s'", filename, full_path)
return open(full_path, mode)
# We already have some jars open, better use them.
if len(self.jars) > 0:
for jarpath in self.jars:
try:
jar = self.jars[jarpath]
jar.getinfo(filename)
if verbose: logging.info("Found (cached) %s in '%s'", filename,
jarpath)
return jar.open(filename)
except (KeyError, IOError) as e:
pass
# If we haven't returned at this point, then the requested file was NOT
# found in the user-specified texture path or resource pack.
if verbose: logging.info("Did not find the file in specified texture path")
# Look in the location of the overviewer executable for the given path
path = os.path.join(programdir, filename)
if os.path.isfile(path):
if verbose: logging.info("Found %s in '%s'", filename, path)
return open(path, mode)
if sys.platform.startswith("darwin"):
path = os.path.join("/Applications/Minecraft", filename)
if os.path.isfile(path):
if verbose: logging.info("Found %s in '%s'", filename, path)
return open(path, mode)
if verbose: logging.info("Did not find the file in overviewer executable directory")
if verbose: logging.info("Looking for installed minecraft jar files...")
# Find an installed minecraft client jar and look in it for the texture
# file we need.
versiondir = ""
if "APPDATA" in os.environ and sys.platform.startswith("win"):
versiondir = os.path.join(os.environ['APPDATA'], ".minecraft", "versions")
elif "HOME" in os.environ:
# For linux:
versiondir = os.path.join(os.environ['HOME'], ".minecraft", "versions")
if not os.path.exists(versiondir) and sys.platform.startswith("darwin"):
# For Mac:
versiondir = os.path.join(os.environ['HOME'], "Library",
"Application Support", "minecraft", "versions")
try:
if verbose: logging.info("Looking in the following directory: \"%s\"" % versiondir)
versions = os.listdir(versiondir)
if verbose: logging.info("Found these versions: {0}".format(versions))
except OSError:
# Directory doesn't exist? Ignore it. It will find no versions and
# fall through the checks below to the error at the bottom of the
# method.
versions = []
available_versions = []
for version in versions:
# Look for the latest non-snapshot that is at least 1.8. This
# version is only compatible with >=1.8, and we cannot in general
# tell if a snapshot is more or less recent than a release.
# Allow two component names such as "1.8" and three component names
# such as "1.8.1"
if version.count(".") not in (1,2):
continue
try:
versionparts = [int(x) for x in version.split(".")]
except ValueError:
continue
if versionparts < [1,8]:
continue
available_versions.append(versionparts)
available_versions.sort(reverse=True)
if not available_versions:
if verbose: logging.info("Did not find any non-snapshot minecraft jars >=1.8.0")
while(available_versions):
most_recent_version = available_versions.pop(0)
if verbose: logging.info("Trying {0}. Searching it for the file...".format(".".join(str(x) for x in most_recent_version)))
jarname = ".".join(str(x) for x in most_recent_version)
jarpath = os.path.join(versiondir, jarname, jarname + ".jar")
if os.path.isfile(jarpath):
try:
jar = zipfile.ZipFile(jarpath)
jar.getinfo(filename)
if verbose: logging.info("Found %s in '%s'", filename, jarpath)
self.jars[jarpath] = jar
return jar.open(filename)
except (KeyError, IOError) as e:
pass
except (zipfile.BadZipFile) as e:
logging.warning("Your jar {0} is corrupted, I'll be skipping it, but you "
"should probably look into that.".format(jarpath))
if verbose: logging.info("Did not find file {0} in jar {1}".format(filename, jarpath))
raise TextureException("Could not find the textures while searching for '{0}'. Try specifying the 'texturepath' option in your config file.\nSet it to the path to a Minecraft Resource pack.\nAlternately, install the Minecraft client (which includes textures)\nAlso see <http://docs.overviewer.org/en/latest/running/#installing-the-textures>\n(Remember, this version of Overviewer requires a 1.17-compatible resource pack)\n(Also note that I won't automatically use snapshots; you'll have to use the texturepath option to use a snapshot jar)".format(filename))
def load_image_texture(self, filename):
# Textures may be animated or in a different resolution than 16x16.
# This method will always return a 16x16 image
img = self.load_image(filename)
w,h = img.size
if w != h:
img = img.crop((0,0,w,w))
if w != 16:
img = img.resize((16, 16), Image.ANTIALIAS)
self.texture_cache[filename] = img
return img
def load_image(self, filename):
"""Returns an image object"""
try:
img = self.texture_cache[filename]
if isinstance(img, Exception): # Did we cache an exception?
raise img # Okay then, raise it.
return img
except KeyError:
pass
try:
fileobj = self.find_file(filename, verbose=logging.getLogger().isEnabledFor(logging.DEBUG))
except (TextureException, IOError) as e:
# We cache when our good friend find_file can't find
# a texture, so that we do not repeatedly search for it.
self.texture_cache[filename] = e
raise e
buffer = BytesIO(fileobj.read())
try:
img = Image.open(buffer).convert("RGBA")
except IOError:
raise TextureException("The texture {} appears to be corrupted. Please fix it. Run "
"Overviewer in verbose mode (-v) to find out where I loaded "
"that file from.".format(filename))
self.texture_cache[filename] = img
return img
def load_water(self):
"""Special-case function for loading water."""
watertexture = getattr(self, "watertexture", None)
if watertexture:
return watertexture
watertexture = self.load_image_texture("assets/minecraft/textures/block/water_still.png")
self.watertexture = watertexture
return watertexture
def load_lava(self):
"""Special-case function for loading lava."""
lavatexture = getattr(self, "lavatexture", None)
if lavatexture:
return lavatexture
lavatexture = self.load_image_texture("assets/minecraft/textures/block/lava_still.png")
self.lavatexture = lavatexture
return lavatexture
def load_portal(self):
"""Special-case function for loading portal."""
portaltexture = getattr(self, "portaltexture", None)
if portaltexture:
return portaltexture
portaltexture = self.load_image_texture("assets/minecraft/textures/block/nether_portal.png")
self.portaltexture = portaltexture
return portaltexture
def load_light_color(self):
"""Helper function to load the light color texture."""
if hasattr(self, "lightcolor"):
return self.lightcolor
try:
lightcolor = list(self.load_image("light_normal.png").getdata())
except Exception:
logging.warning("Light color image could not be found.")
lightcolor = None
self.lightcolor = lightcolor
return lightcolor
def load_grass_color(self):
"""Helper function to load the grass color texture."""
if not hasattr(self, "grasscolor"):
self.grasscolor = list(self.load_image("assets/minecraft/textures/colormap/grass.png").getdata())
return self.grasscolor
def load_foliage_color(self):
"""Helper function to load the foliage color texture."""
if not hasattr(self, "foliagecolor"):
self.foliagecolor = list(self.load_image("assets/minecraft/textures/colormap/foliage.png").getdata())
return self.foliagecolor
#I guess "watercolor" is wrong. But I can't correct as my texture pack don't define water color.
def load_water_color(self):
"""Helper function to load the water color texture."""
if not hasattr(self, "watercolor"):
self.watercolor = list(self.load_image("watercolor.png").getdata())
return self.watercolor
def _split_terrain(self, terrain):
"""Builds and returns a length 256 array of each 16x16 chunk
of texture.
"""
textures = []
(terrain_width, terrain_height) = terrain.size
texture_resolution = terrain_width / 16
for y in range(16):
for x in range(16):
left = x*texture_resolution
upper = y*texture_resolution
right = left+texture_resolution
lower = upper+texture_resolution
region = terrain.transform(
(16, 16),
Image.EXTENT,
(left,upper,right,lower),
Image.BICUBIC)
textures.append(region)
return textures
##
## Image Transformation Functions
##
@staticmethod
def transform_image_top(img):
"""Takes a PIL image and rotates it left 45 degrees and shrinks the y axis
by a factor of 2. Returns the resulting image, which will be 24x12 pixels
"""
# Resize to 17x17, since the diagonal is approximately 24 pixels, a nice
# even number that can be split in half twice
img = img.resize((17, 17), Image.ANTIALIAS)
# Build the Affine transformation matrix for this perspective
transform = numpy.matrix(numpy.identity(3))
# Translate up and left, since rotations are about the origin
transform *= numpy.matrix([[1,0,8.5],[0,1,8.5],[0,0,1]])
# Rotate 45 degrees
ratio = math.cos(math.pi/4)
#transform *= numpy.matrix("[0.707,-0.707,0;0.707,0.707,0;0,0,1]")
transform *= numpy.matrix([[ratio,-ratio,0],[ratio,ratio,0],[0,0,1]])
# Translate back down and right
transform *= numpy.matrix([[1,0,-12],[0,1,-12],[0,0,1]])
# scale the image down by a factor of 2
transform *= numpy.matrix("[1,0,0;0,2,0;0,0,1]")
transform = numpy.array(transform)[:2,:].ravel().tolist()
newimg = img.transform((24,12), Image.AFFINE, transform)
return newimg
@staticmethod
def transform_image_side(img):
"""Takes an image and shears it for the left side of the cube (reflect for
the right side)"""
# Size of the cube side before shear
img = img.resize((12,12), Image.ANTIALIAS)
# Apply shear
transform = numpy.matrix(numpy.identity(3))
transform *= numpy.matrix("[1,0,0;-0.5,1,0;0,0,1]")
transform = numpy.array(transform)[:2,:].ravel().tolist()
newimg = img.transform((12,18), Image.AFFINE, transform)
return newimg
@staticmethod
def transform_image_slope(img):
"""Takes an image and shears it in the shape of a slope going up
in the -y direction (reflect for +x direction). Used for minetracks"""
# Take the same size as trasform_image_side
img = img.resize((12,12), Image.ANTIALIAS)
# Apply shear
transform = numpy.matrix(numpy.identity(3))
transform *= numpy.matrix("[0.75,-0.5,3;0.25,0.5,-3;0,0,1]")
transform = numpy.array(transform)[:2,:].ravel().tolist()
newimg = img.transform((24,24), Image.AFFINE, transform)
return newimg
@staticmethod
def transform_image_angle(img, angle):
"""Takes an image an shears it in arbitrary angle with the axis of
rotation being vertical.
WARNING! Don't use angle = pi/2 (or multiplies), it will return
a blank image (or maybe garbage).
NOTE: angle is in the image not in game, so for the left side of a
block angle = 30 degree.
"""
# Take the same size as trasform_image_side
img = img.resize((12,12), Image.ANTIALIAS)
# some values
cos_angle = math.cos(angle)
sin_angle = math.sin(angle)
# function_x and function_y are used to keep the result image in the
# same position, and constant_x and constant_y are the coordinates
# for the center for angle = 0.
constant_x = 6.
constant_y = 6.
function_x = 6.*(1-cos_angle)
function_y = -6*sin_angle
big_term = ( (sin_angle * (function_x + constant_x)) - cos_angle* (function_y + constant_y))/cos_angle
# The numpy array is not really used, but is helpful to
# see the matrix used for the transformation.
transform = numpy.array([[1./cos_angle, 0, -(function_x + constant_x)/cos_angle],
[-sin_angle/(cos_angle), 1., big_term ],
[0, 0, 1.]])
transform = tuple(transform[0]) + tuple(transform[1])
newimg = img.transform((24,24), Image.AFFINE, transform)
return newimg
def build_block(self, top, side):
"""From a top texture and a side texture, build a block image.
top and side should be 16x16 image objects. Returns a 24x24 image
"""
img = Image.new("RGBA", (24,24), self.bgcolor)
original_texture = top.copy()
top = self.transform_image_top(top)
if not side:
alpha_over(img, top, (0,0), top)
return img
side = self.transform_image_side(side)
otherside = side.transpose(Image.FLIP_LEFT_RIGHT)
# Darken the sides slightly. These methods also affect the alpha layer,
# so save them first (we don't want to "darken" the alpha layer making
# the block transparent)
sidealpha = side.split()[3]
side = ImageEnhance.Brightness(side).enhance(0.9)
side.putalpha(sidealpha)
othersidealpha = otherside.split()[3]
otherside = ImageEnhance.Brightness(otherside).enhance(0.8)
otherside.putalpha(othersidealpha)
alpha_over(img, top, (0,0), top)
alpha_over(img, side, (0,6), side)
alpha_over(img, otherside, (12,6), otherside)
# Manually touch up 6 pixels that leave a gap because of how the
# shearing works out. This makes the blocks perfectly tessellate-able
for x,y in [(13,23), (17,21), (21,19)]:
# Copy a pixel to x,y from x-1,y
img.putpixel((x,y), img.getpixel((x-1,y)))
for x,y in [(3,4), (7,2), (11,0)]:
# Copy a pixel to x,y from x+1,y
img.putpixel((x,y), img.getpixel((x+1,y)))
return img
def build_slab_block(self, top, side, upper):
"""From a top texture and a side texture, build a slab block image.
top and side should be 16x16 image objects. Returns a 24x24 image
"""
# cut the side texture in half
mask = side.crop((0,8,16,16))
side = Image.new(side.mode, side.size, self.bgcolor)
alpha_over(side, mask,(0,0,16,8), mask)
# plain slab
top = self.transform_image_top(top)
side = self.transform_image_side(side)
otherside = side.transpose(Image.FLIP_LEFT_RIGHT)
sidealpha = side.split()[3]
side = ImageEnhance.Brightness(side).enhance(0.9)
side.putalpha(sidealpha)
othersidealpha = otherside.split()[3]
otherside = ImageEnhance.Brightness(otherside).enhance(0.8)
otherside.putalpha(othersidealpha)
# upside down slab
delta = 0
if upper:
delta = 6
img = Image.new("RGBA", (24,24), self.bgcolor)
alpha_over(img, side, (0,12 - delta), side)
alpha_over(img, otherside, (12,12 - delta), otherside)
alpha_over(img, top, (0,6 - delta), top)
# Manually touch up 6 pixels that leave a gap because of how the
# shearing works out. This makes the blocks perfectly tessellate-able
if upper:
for x,y in [(3,4), (7,2), (11,0)]:
# Copy a pixel to x,y from x+1,y
img.putpixel((x,y), img.getpixel((x+1,y)))
for x,y in [(13,17), (17,15), (21,13)]:
# Copy a pixel to x,y from x-1,y
img.putpixel((x,y), img.getpixel((x-1,y)))
else:
for x,y in [(3,10), (7,8), (11,6)]:
# Copy a pixel to x,y from x+1,y
img.putpixel((x,y), img.getpixel((x+1,y)))
for x,y in [(13,23), (17,21), (21,19)]:
# Copy a pixel to x,y from x-1,y
img.putpixel((x,y), img.getpixel((x-1,y)))
return img
def build_full_block(self, top, side1, side2, side3, side4, bottom=None):
"""From a top texture, a bottom texture and 4 different side textures,
build a full block with four differnts faces. All images should be 16x16
image objects. Returns a 24x24 image. Can be used to render any block.
side1 is in the -y face of the cube (top left, east)
side2 is in the +x (top right, south)
side3 is in the -x (bottom left, north)
side4 is in the +y (bottom right, west)
A non transparent block uses top, side 3 and side 4.
If top is a tuple then first item is the top image and the second
item is an increment (integer) from 0 to 16 (pixels in the
original minecraft texture). This increment will be used to crop the
side images and to paste the top image increment pixels lower, so if
you use an increment of 8, it will draw a half-block.
NOTE: this method uses the bottom of the texture image (as done in
minecraft with beds and cakes)
"""
increment = 0
if isinstance(top, tuple):
increment = int(round((top[1] / 16.)*12.)) # range increment in the block height in pixels (half texture size)
crop_height = increment
top = top[0]
if side1 is not None:
side1 = side1.copy()
ImageDraw.Draw(side1).rectangle((0, 0,16,crop_height),outline=(0,0,0,0),fill=(0,0,0,0))
if side2 is not None:
side2 = side2.copy()
ImageDraw.Draw(side2).rectangle((0, 0,16,crop_height),outline=(0,0,0,0),fill=(0,0,0,0))
if side3 is not None:
side3 = side3.copy()
ImageDraw.Draw(side3).rectangle((0, 0,16,crop_height),outline=(0,0,0,0),fill=(0,0,0,0))
if side4 is not None:
side4 = side4.copy()
ImageDraw.Draw(side4).rectangle((0, 0,16,crop_height),outline=(0,0,0,0),fill=(0,0,0,0))
img = Image.new("RGBA", (24,24), self.bgcolor)
# first back sides
if side1 is not None :
side1 = self.transform_image_side(side1)
side1 = side1.transpose(Image.FLIP_LEFT_RIGHT)
# Darken this side.
sidealpha = side1.split()[3]
side1 = ImageEnhance.Brightness(side1).enhance(0.9)
side1.putalpha(sidealpha)
alpha_over(img, side1, (0,0), side1)
if side2 is not None :
side2 = self.transform_image_side(side2)
# Darken this side.
sidealpha2 = side2.split()[3]
side2 = ImageEnhance.Brightness(side2).enhance(0.8)
side2.putalpha(sidealpha2)
alpha_over(img, side2, (12,0), side2)
if bottom is not None :
bottom = self.transform_image_top(bottom)
alpha_over(img, bottom, (0,12), bottom)
# front sides
if side3 is not None :
side3 = self.transform_image_side(side3)
# Darken this side
sidealpha = side3.split()[3]
side3 = ImageEnhance.Brightness(side3).enhance(0.9)
side3.putalpha(sidealpha)
alpha_over(img, side3, (0,6), side3)
if side4 is not None :
side4 = self.transform_image_side(side4)
side4 = side4.transpose(Image.FLIP_LEFT_RIGHT)
# Darken this side
sidealpha = side4.split()[3]
side4 = ImageEnhance.Brightness(side4).enhance(0.8)
side4.putalpha(sidealpha)
alpha_over(img, side4, (12,6), side4)
if top is not None :
top = self.transform_image_top(top)
alpha_over(img, top, (0, increment), top)
# Manually touch up 6 pixels that leave a gap because of how the
# shearing works out. This makes the blocks perfectly tessellate-able
for x,y in [(13,23), (17,21), (21,19)]:
# Copy a pixel to x,y from x-1,y
img.putpixel((x,y), img.getpixel((x-1,y)))
for x,y in [(3,4), (7,2), (11,0)]:
# Copy a pixel to x,y from x+1,y
img.putpixel((x,y), img.getpixel((x+1,y)))
return img
def build_axis_block(self, top, side, data):
"""
Build an block with Axis property.
data = {'y': 0, 'x': 1, 'z': 2}[axis]
"""
def draw_x():
return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90))
def draw_y():
return self.build_full_block(side, None, None, side.rotate(270), top)
draw_funcs = [draw_x, draw_y]
if data == 0: # up
return self.build_block(top, side)
elif data == 1: # x
return draw_funcs[(self.rotation + 0) % len(draw_funcs)]()
elif data == 2: # y
return draw_funcs[(self.rotation + 1) % len(draw_funcs)]()
def build_sprite(self, side):
"""From a side texture, create a sprite-like texture such as those used
for spiderwebs or flowers."""
img = Image.new("RGBA", (24,24), self.bgcolor)
side = self.transform_image_side(side)
otherside = side.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, side, (6,3), side)
alpha_over(img, otherside, (6,3), otherside)
return img
def build_billboard(self, tex):
"""From a texture, create a billboard-like texture such as
those used for tall grass or melon stems.
"""
img = Image.new("RGBA", (24,24), self.bgcolor)
front = tex.resize((14, 12), Image.ANTIALIAS)
alpha_over(img, front, (5,9))
return img
def generate_opaque_mask(self, img):
""" Takes the alpha channel of the image and generates a mask
(used for lighting the block) that deprecates values of alpha
smallers than 50, and sets every other value to 255. """
alpha = img.split()[3]
return alpha.point(lambda a: int(min(a, 25.5) * 10))
def tint_texture(self, im, c):
# apparently converting to grayscale drops the alpha channel?
i = ImageOps.colorize(ImageOps.grayscale(im), (0,0,0), c)
i.putalpha(im.split()[3]); # copy the alpha band back in. assuming RGBA
return i
def generate_texture_tuple(self, img):
""" This takes an image and returns the needed tuple for the
blockmap array."""
if img is None:
return None
return (img, self.generate_opaque_mask(img))
##
## The other big one: @material and associated framework
##
# the material registration decorator
def material(blockid=[], data=[0], **kwargs):
# mapping from property name to the set to store them in
properties = {"transparent" : transparent_blocks, "solid" : solid_blocks, "fluid" : fluid_blocks, "nospawn" : nospawn_blocks, "nodata" : nodata_blocks}
# make sure blockid and data are iterable
try:
iter(blockid)
except Exception:
blockid = [blockid,]
try:
iter(data)
except Exception:
data = [data,]
def inner_material(func):
global blockmap_generators
global max_data, max_blockid
# create a wrapper function with a known signature
@functools.wraps(func)
def func_wrapper(texobj, blockid, data):
return func(texobj, blockid, data)
used_datas.update(data)
if max(data) >= max_data:
max_data = max(data) + 1
for block in blockid:
# set the property sets appropriately
known_blocks.update([block])
if block >= max_blockid:
max_blockid = block + 1
for prop in properties:
try:
if block in kwargs.get(prop, []):
properties[prop].update([block])
except TypeError:
if kwargs.get(prop, False):
properties[prop].update([block])
# populate blockmap_generators with our function
for d in data:
blockmap_generators[(block, d)] = func_wrapper
return func_wrapper
return inner_material
# shortcut function for pure blocks, default to solid, nodata
def block(blockid=[], top_image=None, side_image=None, **kwargs):
new_kwargs = {'solid' : True, 'nodata' : True}
new_kwargs.update(kwargs)
if top_image is None:
raise ValueError("top_image was not provided")
if side_image is None:
side_image = top_image
@material(blockid=blockid, **new_kwargs)
def inner_block(self, unused_id, unused_data):
return self.build_block(self.load_image_texture(top_image), self.load_image_texture(side_image))
return inner_block
# shortcut function for sprite blocks, defaults to transparent, nodata
def sprite(blockid=[], imagename=None, **kwargs):
new_kwargs = {'transparent' : True, 'nodata' : True}
new_kwargs.update(kwargs)
if imagename is None:
raise ValueError("imagename was not provided")
@material(blockid=blockid, **new_kwargs)
def inner_sprite(self, unused_id, unused_data):
return self.build_sprite(self.load_image_texture(imagename))
return inner_sprite
# shortcut function for billboard blocks, defaults to transparent, nodata
def billboard(blockid=[], imagename=None, **kwargs):
new_kwargs = {'transparent' : True, 'nodata' : True}
new_kwargs.update(kwargs)
if imagename is None:
raise ValueError("imagename was not provided")
@material(blockid=blockid, **new_kwargs)
def inner_billboard(self, unused_id, unused_data):
return self.build_billboard(self.load_image_texture(imagename))
return inner_billboard
##
## and finally: actual texture definitions
##
# stone
@material(blockid=1, data=list(range(7)), solid=True)
def stone(self, blockid, data):
if data == 0: # regular old-school stone
img = self.load_image_texture("assets/minecraft/textures/block/stone.png")
elif data == 1: # granite
img = self.load_image_texture("assets/minecraft/textures/block/granite.png")
elif data == 2: # polished granite
img = self.load_image_texture("assets/minecraft/textures/block/polished_granite.png")
elif data == 3: # diorite
img = self.load_image_texture("assets/minecraft/textures/block/diorite.png")
elif data == 4: # polished diorite
img = self.load_image_texture("assets/minecraft/textures/block/polished_diorite.png")
elif data == 5: # andesite
img = self.load_image_texture("assets/minecraft/textures/block/andesite.png")
elif data == 6: # polished andesite
img = self.load_image_texture("assets/minecraft/textures/block/polished_andesite.png")
return self.build_block(img, img)
@material(blockid=2, data=list(range(11))+[0x10,], solid=True)
def grass(self, blockid, data):
# 0x10 bit means SNOW
side_img = self.load_image_texture("assets/minecraft/textures/block/grass_block_side.png")
if data & 0x10:
side_img = self.load_image_texture("assets/minecraft/textures/block/grass_block_snow.png")
img = self.build_block(self.load_image_texture("assets/minecraft/textures/block/grass_block_top.png"), side_img)
if not data & 0x10:
alpha_over(img, self.biome_grass_texture, (0, 0), self.biome_grass_texture)
return img
# dirt
@material(blockid=3, data=list(range(3)), solid=True)
def dirt_blocks(self, blockid, data):
texture_map = [{"top": "dirt", "side": "dirt"}, # Normal
{"top": "coarse_dirt", "side": "coarse_dirt"}, # Coarse
{"top": "podzol_top", "side": "podzol_side"}] # Podzol
top_img = self.load_image_texture("assets/minecraft/textures/block/%s.png"
% texture_map[data]["top"])
side_img = self.load_image_texture("assets/minecraft/textures/block/%s.png"
% texture_map[data]["side"])
return self.build_block(top_img, side_img)
# cobblestone
block(blockid=4, top_image="assets/minecraft/textures/block/cobblestone.png")
# wooden planks
@material(blockid=5, data=list(range(8)), solid=True)
def wooden_planks(self, blockid, data):
if data == 0: # normal
return self.build_block(self.load_image_texture("assets/minecraft/textures/block/oak_planks.png"), self.load_image_texture("assets/minecraft/textures/block/oak_planks.png"))
if data == 1: # pine
return self.build_block(self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png"),self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png"))
if data == 2: # birch
return self.build_block(self.load_image_texture("assets/minecraft/textures/block/birch_planks.png"),self.load_image_texture("assets/minecraft/textures/block/birch_planks.png"))
if data == 3: # jungle wood
return self.build_block(self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png"),self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png"))
if data == 4: # acacia
return self.build_block(self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png"),self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png"))
if data == 5: # dark oak
return self.build_block(self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png"),self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png"))
if data == 6: # crimson
return self.build_block(self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png"),self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png"))
if data == 7: # warped
return self.build_block(self.load_image_texture("assets/minecraft/textures/block/warped_planks.png"),self.load_image_texture("assets/minecraft/textures/block/warped_planks.png"))
@material(blockid=6, data=list(range(16)), transparent=True)
def saplings(self, blockid, data):
# usual saplings
tex = self.load_image_texture("assets/minecraft/textures/block/oak_sapling.png")
if data & 0x3 == 1: # spruce sapling
tex = self.load_image_texture("assets/minecraft/textures/block/spruce_sapling.png")
elif data & 0x3 == 2: # birch sapling
tex = self.load_image_texture("assets/minecraft/textures/block/birch_sapling.png")
elif data & 0x3 == 3: # jungle sapling
tex = self.load_image_texture("assets/minecraft/textures/block/jungle_sapling.png")
elif data & 0x3 == 4: # acacia sapling
tex = self.load_image_texture("assets/minecraft/textures/block/acacia_sapling.png")
elif data & 0x3 == 5: # dark oak/roofed oak/big oak sapling
tex = self.load_image_texture("assets/minecraft/textures/block/dark_oak_sapling.png")
return self.build_sprite(tex)
sprite(blockid=11385, imagename="assets/minecraft/textures/block/oak_sapling.png")
sprite(blockid=11386, imagename="assets/minecraft/textures/block/spruce_sapling.png")
sprite(blockid=11387, imagename="assets/minecraft/textures/block/birch_sapling.png")
sprite(blockid=11388, imagename="assets/minecraft/textures/block/jungle_sapling.png")
sprite(blockid=11389, imagename="assets/minecraft/textures/block/acacia_sapling.png")
sprite(blockid=11390, imagename="assets/minecraft/textures/block/dark_oak_sapling.png")
sprite(blockid=11413, imagename="assets/minecraft/textures/block/bamboo_stage0.png")
# bedrock
block(blockid=7, top_image="assets/minecraft/textures/block/bedrock.png")
# water, glass, and ice (no inner surfaces)
# uses pseudo-ancildata found in iterate.c
@material(blockid=[8, 9, 20, 79, 95], data=list(range(512)), fluid=(8, 9), transparent=True, nospawn=True, solid=(79, 20, 95))
def no_inner_surfaces(self, blockid, data):
if blockid == 8 or blockid == 9:
texture = self.load_water()
elif blockid == 20:
texture = self.load_image_texture("assets/minecraft/textures/block/glass.png")
elif blockid == 95:
texture = self.load_image_texture("assets/minecraft/textures/block/%s_stained_glass.png" % color_map[data & 0x0f])
else:
texture = self.load_image_texture("assets/minecraft/textures/block/ice.png")
# now that we've used the lower 4 bits to get color, shift down to get the 5 bits that encode face hiding
if not (blockid == 8 or blockid == 9): # water doesn't have a shifted pseudodata
data = data >> 4
if (data & 0b10000) == 16:
top = texture
else:
top = None
if (data & 0b0001) == 1:
side1 = texture # top left
else:
side1 = None
if (data & 0b1000) == 8:
side2 = texture # top right
else:
side2 = None
if (data & 0b0010) == 2:
side3 = texture # bottom left
else:
side3 = None
if (data & 0b0100) == 4:
side4 = texture # bottom right
else:
side4 = None
# if nothing shown do not draw at all
if top is None and side3 is None and side4 is None:
return None
img = self.build_full_block(top,None,None,side3,side4)
return img
@material(blockid=[10, 11], data=list(range(16)), fluid=True, transparent=False, nospawn=True)
def lava(self, blockid, data):
lavatex = self.load_lava()
return self.build_block(lavatex, lavatex)
# sand
@material(blockid=12, data=list(range(2)), solid=True)
def sand_blocks(self, blockid, data):
if data == 0: # normal
img = self.build_block(self.load_image_texture("assets/minecraft/textures/block/sand.png"), self.load_image_texture("assets/minecraft/textures/block/sand.png"))
if data == 1: # red
img = self.build_block(self.load_image_texture("assets/minecraft/textures/block/red_sand.png"), self.load_image_texture("assets/minecraft/textures/block/red_sand.png"))
return img
# gravel
block(blockid=13, top_image="assets/minecraft/textures/block/gravel.png")
# gold ore
block(blockid=14, top_image="assets/minecraft/textures/block/gold_ore.png")
# iron ore
block(blockid=15, top_image="assets/minecraft/textures/block/iron_ore.png")
# coal ore
block(blockid=16, top_image="assets/minecraft/textures/block/coal_ore.png")
@material(blockid=[17, 162, 11306, 11307, 11308, 11309, 11310, 11311, 1008, 1009],
data=list(range(12)), solid=True)
def wood(self, blockid, data):
# extract orientation and wood type frorm data bits
wood_type = data & 3
wood_orientation = data & 12
if self.rotation == 1:
if wood_orientation == 4: wood_orientation = 8
elif wood_orientation == 8: wood_orientation = 4
elif self.rotation == 3:
if wood_orientation == 4: wood_orientation = 8
elif wood_orientation == 8: wood_orientation = 4
# dictionary of blockid : { wood_type : (top, side) }
wood_tex = {
17: {
0: ("oak_log_top.png", "oak_log.png"),
1: ("spruce_log_top.png", "spruce_log.png"),
2: ("birch_log_top.png", "birch_log.png"),
3: ("jungle_log_top.png", "jungle_log.png"),
},
162: {
0: ("acacia_log_top.png", "acacia_log.png"),
1: ("dark_oak_log_top.png", "dark_oak_log.png"),
},
11306: {
0: ("stripped_oak_log_top.png", "stripped_oak_log.png"),
1: ("stripped_spruce_log_top.png", "stripped_spruce_log.png"),
2: ("stripped_birch_log_top.png", "stripped_birch_log.png"),
3: ("stripped_jungle_log_top.png", "stripped_jungle_log.png"),
},
11307: {
0: ("stripped_acacia_log_top.png", "stripped_acacia_log.png"),
1: ("stripped_dark_oak_log_top.png", "stripped_dark_oak_log.png"),
},
11308: {
0: ("oak_log.png", None),
1: ("spruce_log.png", None),
2: ("birch_log.png", None),
3: ("jungle_log.png", None),
},
11309: {
0: ("acacia_log.png", None),
1: ("dark_oak_log.png", None),
},
11310: {
0: ("stripped_oak_log.png", None),
1: ("stripped_spruce_log.png", None),
2: ("stripped_birch_log.png", None),
3: ("stripped_jungle_log.png", None),
},
11311: {
0: ("stripped_acacia_log.png", None),
1: ("stripped_dark_oak_log.png", None),
},
1008: {
0: ("warped_stem_top.png", "warped_stem.png"),
1: ("warped_stem_top.png", "stripped_warped_stem.png"),
2: ("crimson_stem_top.png", "crimson_stem.png"),
3: ("crimson_stem_top.png", "stripped_crimson_stem.png"),
},
1009: {
0: ("warped_stem.png", None),
1: ("stripped_warped_stem.png", None),
2: ("crimson_stem.png", None),
3: ("stripped_crimson_stem.png", None),
}
}
top_f, side_f = wood_tex[blockid].get(wood_type, wood_tex[blockid][0])
if not side_f:
side_f = top_f
top = self.load_image_texture(BLOCKTEX + top_f)
side = self.load_image_texture(BLOCKTEX + side_f)
# choose orientation and paste textures
if wood_orientation == 0:
return self.build_block(top, side)
elif wood_orientation == 4: # east-west orientation
return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90))
elif wood_orientation == 8: # north-south orientation
return self.build_full_block(side, None, None, side.rotate(270), top)
@material(blockid=[18, 161], data=list(range(16)), transparent=True, solid=True)
def leaves(self, blockid, data):
# mask out the bits 4 and 8
# they are used for player placed and check-for-decay blocks
data = data & 0x7
t = self.load_image_texture("assets/minecraft/textures/block/oak_leaves.png")
if (blockid, data) == (18, 1): # pine!
t = self.load_image_texture("assets/minecraft/textures/block/spruce_leaves.png")
elif (blockid, data) == (18, 2): # birth tree
t = self.load_image_texture("assets/minecraft/textures/block/birch_leaves.png")
elif (blockid, data) == (18, 3): # jungle tree
t = self.load_image_texture("assets/minecraft/textures/block/jungle_leaves.png")
elif (blockid, data) == (161, 4): # acacia tree
t = self.load_image_texture("assets/minecraft/textures/block/acacia_leaves.png")
elif (blockid, data) == (161, 5):
t = self.load_image_texture("assets/minecraft/textures/block/dark_oak_leaves.png")
elif (blockid, data) == (18, 6):
t = self.load_image_texture("assets/minecraft/textures/block/flowering_azalea_leaves.png")
elif (blockid, data) == (18, 7):
t = self.load_image_texture("assets/minecraft/textures/block/azalea_leaves.png")
return self.build_block(t, t)
# sponge
block(blockid=19, top_image="assets/minecraft/textures/block/sponge.png")
# lapis lazuli ore
block(blockid=21, top_image="assets/minecraft/textures/block/lapis_ore.png")
# lapis lazuli block
block(blockid=22, top_image="assets/minecraft/textures/block/lapis_block.png")
# dispenser, dropper, furnace, blast furnace, and smoker
@material(blockid=[23, 61, 158, 11362, 11364], data=list(range(14)), solid=True)
def furnaces(self, blockid, data):
# first, do the rotation if needed
# Masked as bit 4 indicates whether the block is lit/triggered or not
if self.rotation in [1, 2, 3] and data & 0b111 in [2, 3, 4, 5]:
rotation_map = {1: {2: 5, 3: 4, 4: 2, 5: 3},
2: {2: 3, 3: 2, 4: 5, 5: 4},
3: {2: 4, 3: 5, 4: 3, 5: 2}}
data = data & 0b1000 | rotation_map[self.rotation][data & 0b111]
# Rotation angles for top texture using data & 0b111 as an index
top_rotation_map = [0, 0, 180, 0, 270, 90, 0, 0]
# Dispenser
texture_map = {23: {'top': 'furnace_top', 'side': 'furnace_side',
'front': 'dispenser_front', 'top_vert': 'dispenser_front_vertical'},
# Furnace
61: {'top': 'furnace_top', 'side': 'furnace_side',
'front': 'furnace_front', 'front_on': 'furnace_front_on'},
# Dropper
158: {'top': 'furnace_top', 'side': 'furnace_side',
'front': 'dropper_front', 'top_vert': 'dropper_front_vertical'},
# Blast furance
11362: {'top': 'blast_furnace_top', 'side': 'blast_furnace_side',
'front': 'blast_furnace_front', 'front_on': 'blast_furnace_front_on'},
# Smoker
11364: {'top': 'smoker_top', 'side': 'smoker_side',
'front': 'smoker_front', 'front_on': 'smoker_front_on'}}
if data & 0b111 in [0, 1] and 'top_vert' in texture_map[blockid]:
# Block has a special top texture when it faces up/down
# This also affects which texture is used for the sides/front
top_name = 'top_vert' if data & 0b111 == 1 else 'top'
side_name = 'top'
front_name = 'top'
else:
top_name = 'top'
side_name = 'side'
# Use block's lit/on front texture if it is defined & bit 4 is set
# Note: Some front_on texture images have multiple frames,
# but load_image_texture() crops this appropriately
# as long as the image width is 16px
if data & 0b1000 == 8 and 'front_on' in texture_map[blockid]:
front_name = 'front_on'
else:
front_name = 'front'
top = self.load_image_texture("assets/minecraft/textures/block/%s.png" %
texture_map[blockid][top_name]).copy()
top = top.rotate(top_rotation_map[data & 0b111])
side = self.load_image_texture("assets/minecraft/textures/block/%s.png" %
texture_map[blockid][side_name])
front = self.load_image_texture("assets/minecraft/textures/block/%s.png" %
texture_map[blockid][front_name])
if data & 0b111 == 3: # pointing west
return self.build_full_block(top, None, None, side, front)
elif data & 0b111 == 4: # pointing north
return self.build_full_block(top, None, None, front, side)
else: # in any other direction the front can't be seen
return self.build_full_block(top, None, None, side, side)
# sandstone
@material(blockid=24, data=list(range(3)), solid=True)
def sandstone(self, blockid, data):
top = self.load_image_texture("assets/minecraft/textures/block/sandstone_top.png")
if data == 0: # normal
return self.build_block(top, self.load_image_texture("assets/minecraft/textures/block/sandstone.png"))
if data == 1: # hieroglyphic
return self.build_block(top, self.load_image_texture("assets/minecraft/textures/block/chiseled_sandstone.png"))
if data == 2: # soft
return self.build_block(top, self.load_image_texture("assets/minecraft/textures/block/cut_sandstone.png"))
# red sandstone
@material(blockid=179, data=list(range(3)), solid=True)
def sandstone(self, blockid, data):
top = self.load_image_texture("assets/minecraft/textures/block/red_sandstone_top.png")
if data == 0: # normal
side = self.load_image_texture("assets/minecraft/textures/block/red_sandstone.png")
return self.build_full_block(top, None, None, side, side, self.load_image_texture("assets/minecraft/textures/block/red_sandstone_bottom.png") )
if data == 1: # hieroglyphic
return self.build_block(top, self.load_image_texture("assets/minecraft/textures/block/chiseled_red_sandstone.png"))
if data == 2: # soft
return self.build_block(top, self.load_image_texture("assets/minecraft/textures/block/cut_red_sandstone.png"))
# note block
block(blockid=25, top_image="assets/minecraft/textures/block/note_block.png")
# Bed
@material(blockid=26, data=list(range(256)), transparent=True, nospawn=True)
def bed(self, blockid, data):
# Bits 1-2 Rotation
# Bit 3 Occupancy, no impact on appearance
# Bit 4 Foot/Head of bed (0 = foot, 1 = head)
# Bits 5-8 Color
# first get rotation done
# Masked to not clobber block head/foot & color info
data = data & 0b11111100 | ((self.rotation + (data & 0b11)) % 4)
bed_texture = self.load_image("assets/minecraft/textures/entity/bed/%s.png" % color_map[data >> 4])
increment = 8
left_face = None
right_face = None
top_face = None
if data & 0x8 == 0x8: # head of the bed
top = bed_texture.copy().crop((6, 6, 22, 22))
# Composing the side
side = Image.new("RGBA", (16, 16), self.bgcolor)
side_part1 = bed_texture.copy().crop((0, 6, 6, 22)).rotate(90, expand=True)
# foot of the bed
side_part2 = bed_texture.copy().crop((53, 3, 56, 6))
side_part2_f = side_part2.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(side, side_part1, (0, 7), side_part1)
alpha_over(side, side_part2, (0, 13), side_part2)
end = Image.new("RGBA", (16, 16), self.bgcolor)
end_part = bed_texture.copy().crop((6, 0, 22, 6)).rotate(180)
alpha_over(end, end_part, (0, 7), end_part)
alpha_over(end, side_part2, (0, 13), side_part2)
alpha_over(end, side_part2_f, (13, 13), side_part2_f)
if data & 0x03 == 0x00: # South
top_face = top.rotate(180)
left_face = side.transpose(Image.FLIP_LEFT_RIGHT)
right_face = end
elif data & 0x03 == 0x01: # West
top_face = top.rotate(90)
left_face = end
right_face = side.transpose(Image.FLIP_LEFT_RIGHT)
elif data & 0x03 == 0x02: # North
top_face = top
left_face = side
elif data & 0x03 == 0x03: # East
top_face = top.rotate(270)
right_face = side
else: # foot of the bed
top = bed_texture.copy().crop((6, 28, 22, 44))
side = Image.new("RGBA", (16, 16), self.bgcolor)
side_part1 = bed_texture.copy().crop((0, 28, 6, 44)).rotate(90, expand=True)
side_part2 = bed_texture.copy().crop((53, 3, 56, 6))
side_part2_f = side_part2.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(side, side_part1, (0, 7), side_part1)
alpha_over(side, side_part2, (13, 13), side_part2)
end = Image.new("RGBA", (16, 16), self.bgcolor)
end_part = bed_texture.copy().crop((22, 22, 38, 28)).rotate(180)
alpha_over(end, end_part, (0, 7), end_part)
alpha_over(end, side_part2, (0, 13), side_part2)
alpha_over(end, side_part2_f, (13, 13), side_part2_f)
if data & 0x03 == 0x00: # South
top_face = top.rotate(180)
left_face = side.transpose(Image.FLIP_LEFT_RIGHT)
elif data & 0x03 == 0x01: # West
top_face = top.rotate(90)
right_face = side.transpose(Image.FLIP_LEFT_RIGHT)
elif data & 0x03 == 0x02: # North
top_face = top
left_face = side
right_face = end
elif data & 0x03 == 0x03: # East
top_face = top.rotate(270)
left_face = end
right_face = side
top_face = (top_face, increment)
return self.build_full_block(top_face, None, None, left_face, right_face)
# powered, detector, activator and normal rails
@material(blockid=[27, 28, 66, 157], data=list(range(14)), transparent=True)
def rails(self, blockid, data):
# first, do rotation
# Masked to not clobber powered rail on/off info
# Ascending and flat straight
if self.rotation == 1:
if (data & 0b0111) == 0: data = data & 0b1000 | 1
elif (data & 0b0111) == 1: data = data & 0b1000 | 0
elif (data & 0b0111) == 2: data = data & 0b1000 | 5
elif (data & 0b0111) == 3: data = data & 0b1000 | 4
elif (data & 0b0111) == 4: data = data & 0b1000 | 2
elif (data & 0b0111) == 5: data = data & 0b1000 | 3
elif self.rotation == 2:
if (data & 0b0111) == 2: data = data & 0b1000 | 3
elif (data & 0b0111) == 3: data = data & 0b1000 | 2
elif (data & 0b0111) == 4: data = data & 0b1000 | 5
elif (data & 0b0111) == 5: data = data & 0b1000 | 4
elif self.rotation == 3:
if (data & 0b0111) == 0: data = data & 0b1000 | 1
elif (data & 0b0111) == 1: data = data & 0b1000 | 0
elif (data & 0b0111) == 2: data = data & 0b1000 | 4
elif (data & 0b0111) == 3: data = data & 0b1000 | 5
elif (data & 0b0111) == 4: data = data & 0b1000 | 3
elif (data & 0b0111) == 5: data = data & 0b1000 | 2
if blockid == 66: # normal minetrack only
#Corners
if self.rotation == 1:
if data == 6: data = 7
elif data == 7: data = 8
elif data == 8: data = 6
elif data == 9: data = 9
elif self.rotation == 2:
if data == 6: data = 8
elif data == 7: data = 9
elif data == 8: data = 6
elif data == 9: data = 7
elif self.rotation == 3:
if data == 6: data = 9
elif data == 7: data = 6
elif data == 8: data = 8
elif data == 9: data = 7
img = Image.new("RGBA", (24,24), self.bgcolor)
if blockid == 27: # powered rail
if data & 0x8 == 0: # unpowered
raw_straight = self.load_image_texture("assets/minecraft/textures/block/powered_rail.png")
raw_corner = self.load_image_texture("assets/minecraft/textures/block/rail_corner.png") # they don't exist but make the code
# much simplier
elif data & 0x8 == 0x8: # powered
raw_straight = self.load_image_texture("assets/minecraft/textures/block/powered_rail_on.png")
raw_corner = self.load_image_texture("assets/minecraft/textures/block/rail_corner.png") # leave corners for code simplicity
# filter the 'powered' bit
data = data & 0x7
elif blockid == 28: # detector rail
raw_straight = self.load_image_texture("assets/minecraft/textures/block/detector_rail.png")
raw_corner = self.load_image_texture("assets/minecraft/textures/block/rail_corner.png") # leave corners for code simplicity
elif blockid == 66: # normal rail
raw_straight = self.load_image_texture("assets/minecraft/textures/block/rail.png")
raw_corner = self.load_image_texture("assets/minecraft/textures/block/rail_corner.png")
elif blockid == 157: # activator rail
if data & 0x8 == 0: # unpowered
raw_straight = self.load_image_texture("assets/minecraft/textures/block/activator_rail.png")
raw_corner = self.load_image_texture("assets/minecraft/textures/block/rail_corner.png") # they don't exist but make the code
# much simplier
elif data & 0x8 == 0x8: # powered
raw_straight = self.load_image_texture("assets/minecraft/textures/block/activator_rail_on.png")
raw_corner = self.load_image_texture("assets/minecraft/textures/block/rail_corner.png") # leave corners for code simplicity
# filter the 'powered' bit
data = data & 0x7
## use transform_image to scale and shear
if data == 0:
track = self.transform_image_top(raw_straight)
alpha_over(img, track, (0,12), track)
elif data == 6:
track = self.transform_image_top(raw_corner)
alpha_over(img, track, (0,12), track)
elif data == 7:
track = self.transform_image_top(raw_corner.rotate(270))
alpha_over(img, track, (0,12), track)
elif data == 8:
# flip
track = self.transform_image_top(raw_corner.transpose(Image.FLIP_TOP_BOTTOM).rotate(90))
alpha_over(img, track, (0,12), track)
elif data == 9:
track = self.transform_image_top(raw_corner.transpose(Image.FLIP_TOP_BOTTOM))
alpha_over(img, track, (0,12), track)
elif data == 1:
track = self.transform_image_top(raw_straight.rotate(90))
alpha_over(img, track, (0,12), track)
#slopes
elif data == 2: # slope going up in +x direction
track = self.transform_image_slope(raw_straight)
track = track.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, track, (2,0), track)
# the 2 pixels move is needed to fit with the adjacent tracks
elif data == 3: # slope going up in -x direction
# tracks are sprites, in this case we are seeing the "side" of
# the sprite, so draw a line to make it beautiful.
ImageDraw.Draw(img).line([(11,11),(23,17)],fill=(164,164,164))
# grey from track texture (exterior grey).
# the track doesn't start from image corners, be carefull drawing the line!
elif data == 4: # slope going up in -y direction
track = self.transform_image_slope(raw_straight)
alpha_over(img, track, (0,0), track)
elif data == 5: # slope going up in +y direction
# same as "data == 3"
ImageDraw.Draw(img).line([(1,17),(12,11)],fill=(164,164,164))
return img
# sticky and normal piston body
@material(blockid=[29, 33], data=[0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13],
transparent=True, solid=True, nospawn=True)
def piston(self, blockid, data):
# first, rotation
# Masked to not clobber block head/foot info
if self.rotation in [1, 2, 3] and (data & 0b111) in [2, 3, 4, 5]:
rotation_map = {1: {2: 5, 3: 4, 4: 2, 5: 3},
2: {2: 3, 3: 2, 4: 5, 5: 4},
3: {2: 4, 3: 5, 4: 3, 5: 2}}
data = (data & 0b1000) | rotation_map[self.rotation][data & 0b111]
if blockid == 29: # sticky
piston_t = self.load_image_texture("assets/minecraft/textures/block/piston_top_sticky.png").copy()
else: # normal
piston_t = self.load_image_texture("assets/minecraft/textures/block/piston_top.png").copy()
# other textures
side_t = self.load_image_texture("assets/minecraft/textures/block/piston_side.png").copy()
back_t = self.load_image_texture("assets/minecraft/textures/block/piston_bottom.png").copy()
interior_t = self.load_image_texture("assets/minecraft/textures/block/piston_inner.png").copy()
if data & 0x08 == 0x08: # pushed out, non full blocks, tricky stuff
# remove piston texture from piston body
ImageDraw.Draw(side_t).rectangle((0, 0, 16, 3), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0))
if data & 0x07 == 0x0: # down
side_t = side_t.rotate(180)
img = self.build_full_block(back_t, None, None, side_t, side_t)
elif data & 0x07 == 0x1: # up
img = self.build_full_block((interior_t, 4), None, None, side_t, side_t)
elif data & 0x07 == 0x2: # north
img = self.build_full_block(side_t, None, None, side_t.rotate(90), back_t)
elif data & 0x07 == 0x3: # south
img = self.build_full_block(side_t.rotate(180), None, None, side_t.rotate(270), None)
temp = self.transform_image_side(interior_t)
temp = temp.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, temp, (9, 4), temp)
elif data & 0x07 == 0x4: # west
img = self.build_full_block(side_t.rotate(90), None, None, None, side_t.rotate(270))
temp = self.transform_image_side(interior_t)
alpha_over(img, temp, (3, 4), temp)
elif data & 0x07 == 0x5: # east
img = self.build_full_block(side_t.rotate(270), None, None, back_t, side_t.rotate(90))
else: # pushed in, normal full blocks, easy stuff
if data & 0x07 == 0x0: # down
side_t = side_t.rotate(180)
img = self.build_full_block(back_t, None, None, side_t, side_t)
elif data & 0x07 == 0x1: # up
img = self.build_full_block(piston_t, None, None, side_t, side_t)
elif data & 0x07 == 0x2: # north
img = self.build_full_block(side_t, None, None, side_t.rotate(90), back_t)
elif data & 0x07 == 0x3: # south
img = self.build_full_block(side_t.rotate(180), None, None, side_t.rotate(270), piston_t)
elif data & 0x07 == 0x4: # west
img = self.build_full_block(side_t.rotate(90), None, None, piston_t, side_t.rotate(270))
elif data & 0x07 == 0x5: # east
img = self.build_full_block(side_t.rotate(270), None, None, back_t, side_t.rotate(90))
return img
# sticky and normal piston shaft
@material(blockid=34, data=[0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13], transparent=True, nospawn=True)
def piston_extension(self, blockid, data):
# first, rotation
# Masked to not clobber block head/foot info
if self.rotation in [1, 2, 3] and (data & 0b111) in [2, 3, 4, 5]:
rotation_map = {1: {2: 5, 3: 4, 4: 2, 5: 3},
2: {2: 3, 3: 2, 4: 5, 5: 4},
3: {2: 4, 3: 5, 4: 3, 5: 2}}
data = (data & 0b1000) | rotation_map[self.rotation][data & 0b111]
if data & 0x8 == 0x8: # sticky
piston_t = self.load_image_texture("assets/minecraft/textures/block/piston_top_sticky.png").copy()
else: # normal
piston_t = self.load_image_texture("assets/minecraft/textures/block/piston_top.png").copy()
# other textures
side_t = self.load_image_texture("assets/minecraft/textures/block/piston_side.png").copy()
back_t = self.load_image_texture("assets/minecraft/textures/block/piston_top.png").copy()
# crop piston body
ImageDraw.Draw(side_t).rectangle((0, 4, 16, 16), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0))
# generate the horizontal piston extension stick
h_stick = Image.new("RGBA", (24, 24), self.bgcolor)
temp = self.transform_image_side(side_t)
alpha_over(h_stick, temp, (1, 7), temp)
temp = self.transform_image_top(side_t.rotate(90))
alpha_over(h_stick, temp, (1, 1), temp)
# Darken it
sidealpha = h_stick.split()[3]
h_stick = ImageEnhance.Brightness(h_stick).enhance(0.85)
h_stick.putalpha(sidealpha)
# generate the vertical piston extension stick
v_stick = Image.new("RGBA", (24, 24), self.bgcolor)
temp = self.transform_image_side(side_t.rotate(90))
alpha_over(v_stick, temp, (12, 6), temp)
temp = temp.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(v_stick, temp, (1, 6), temp)
# Darken it
sidealpha = v_stick.split()[3]
v_stick = ImageEnhance.Brightness(v_stick).enhance(0.85)
v_stick.putalpha(sidealpha)
# Piston orientation is stored in the 3 first bits
if data & 0x07 == 0x0: # down
side_t = side_t.rotate(180)
img = self.build_full_block((back_t, 12), None, None, side_t, side_t)
alpha_over(img, v_stick, (0, -3), v_stick)
elif data & 0x07 == 0x1: # up
img = Image.new("RGBA", (24, 24), self.bgcolor)
img2 = self.build_full_block(piston_t, None, None, side_t, side_t)
alpha_over(img, v_stick, (0, 4), v_stick)
alpha_over(img, img2, (0, 0), img2)
elif data & 0x07 == 0x2: # north
img = self.build_full_block(side_t, None, None, side_t.rotate(90), None)
temp = self.transform_image_side(back_t).transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, temp, (2, 2), temp)
alpha_over(img, h_stick, (6, 3), h_stick)
elif data & 0x07 == 0x3: # south
img = Image.new("RGBA", (24, 24), self.bgcolor)
img2 = self.build_full_block(side_t.rotate(180), None, None, side_t.rotate(270), piston_t)
alpha_over(img, h_stick, (0, 0), h_stick)
alpha_over(img, img2, (0, 0), img2)
elif data & 0x07 == 0x4: # west
img = self.build_full_block(side_t.rotate(90), None, None, piston_t, side_t.rotate(270))
h_stick = h_stick.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, h_stick, (0, 0), h_stick)
elif data & 0x07 == 0x5: # east
img = Image.new("RGBA", (24, 24), self.bgcolor)
img2 = self.build_full_block(side_t.rotate(270), None, None, None, side_t.rotate(90))
h_stick = h_stick.transpose(Image.FLIP_LEFT_RIGHT)
temp = self.transform_image_side(back_t)
alpha_over(img2, temp, (10, 2), temp)
alpha_over(img, img2, (0, 0), img2)
alpha_over(img, h_stick, (-3, 2), h_stick)
return img
# cobweb
sprite(blockid=30, imagename="assets/minecraft/textures/block/cobweb.png", nospawn=True)
@material(blockid=31, data=list(range(3)), transparent=True)
def tall_grass(self, blockid, data):
if data == 0: # dead shrub
texture = self.load_image_texture("assets/minecraft/textures/block/dead_bush.png")
elif data == 1: # tall grass
texture = self.load_image_texture("assets/minecraft/textures/block/grass.png")
elif data == 2: # fern
texture = self.load_image_texture("assets/minecraft/textures/block/fern.png")
return self.build_billboard(texture)
# dead bush
billboard(blockid=32, imagename="assets/minecraft/textures/block/dead_bush.png")
@material(blockid=35, data=list(range(16)), solid=True)
def wool(self, blockid, data):
texture = self.load_image_texture("assets/minecraft/textures/block/%s_wool.png" % color_map[data])
return self.build_block(texture, texture)
# dandelion
sprite(blockid=37, imagename="assets/minecraft/textures/block/dandelion.png")
# flowers
@material(blockid=38, data=list(range(13)), transparent=True)
def flower(self, blockid, data):
flower_map = ["poppy", "blue_orchid", "allium", "azure_bluet", "red_tulip", "orange_tulip",
"white_tulip", "pink_tulip", "oxeye_daisy", "dandelion", "wither_rose",
"cornflower", "lily_of_the_valley"]
texture = self.load_image_texture("assets/minecraft/textures/block/%s.png" % flower_map[data])
return self.build_billboard(texture)
# brown mushroom
sprite(blockid=39, imagename="assets/minecraft/textures/block/brown_mushroom.png")
# red mushroom
sprite(blockid=40, imagename="assets/minecraft/textures/block/red_mushroom.png")
# warped fungus
sprite(blockid=1016, imagename="assets/minecraft/textures/block/warped_fungus.png")
# crimson fungus
sprite(blockid=1017, imagename="assets/minecraft/textures/block/crimson_fungus.png")
# warped roots
sprite(blockid=1018, imagename="assets/minecraft/textures/block/warped_roots.png")
# crimson roots
sprite(blockid=1019, imagename="assets/minecraft/textures/block/crimson_roots.png")
# block of gold
block(blockid=41, top_image="assets/minecraft/textures/block/gold_block.png")
# block of iron
block(blockid=42, top_image="assets/minecraft/textures/block/iron_block.png")
# double slabs and slabs
# these wooden slabs are unobtainable without cheating, they are still
# here because lots of pre-1.3 worlds use this blocks, add prismarine slabs
@material(blockid=[43, 44, 181, 182, 204, 205] + list(range(11340, 11359)) +
list(range(1027, 1030)) + list(range(1072, 1080)) + list(range(1103, 1107)),
data=list(range(16)),
transparent=[44, 182, 205] + list(range(11340, 11359)) + list(range(1027, 1030)) +
list(range(1072, 1080)) + list(range(1103, 1107)), solid=True)
def slabs(self, blockid, data):
if blockid == 44 or blockid == 182:
texture = data & 7
else: # data > 8 are special double slabs
texture = data
if blockid == 44 or blockid == 43:
if texture== 0: # stone slab
top = self.load_image_texture("assets/minecraft/textures/block/stone.png")
side = self.load_image_texture("assets/minecraft/textures/block/stone.png")
elif texture== 1: # sandstone slab
top = self.load_image_texture("assets/minecraft/textures/block/sandstone_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/sandstone.png")
elif texture== 2: # wooden slab
top = side = self.load_image_texture("assets/minecraft/textures/block/oak_planks.png")
elif texture== 3: # cobblestone slab
top = side = self.load_image_texture("assets/minecraft/textures/block/cobblestone.png")
elif texture== 4: # brick
top = side = self.load_image_texture("assets/minecraft/textures/block/bricks.png")
elif texture== 5: # stone brick
top = side = self.load_image_texture("assets/minecraft/textures/block/stone_bricks.png")
elif texture== 6: # nether brick slab
top = side = self.load_image_texture("assets/minecraft/textures/block/nether_bricks.png")
elif texture== 7: #quartz
top = side = self.load_image_texture("assets/minecraft/textures/block/quartz_block_side.png")
elif texture== 8: # special stone double slab with top texture only
top = side = self.load_image_texture("assets/minecraft/textures/block/smooth_stone.png")
elif texture== 9: # special sandstone double slab with top texture only
top = side = self.load_image_texture("assets/minecraft/textures/block/sandstone_top.png")
else:
return None
elif blockid == 182: # single red sandstone slab
if texture == 0:
top = self.load_image_texture("assets/minecraft/textures/block/red_sandstone_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/red_sandstone.png")
else:
return None
elif blockid == 181: # double red sandstone slab
if texture == 0: # red sandstone
top = self.load_image_texture("assets/minecraft/textures/block/red_sandstone_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/red_sandstone.png")
elif texture == 8: # 'full' red sandstone (smooth)
top = side = self.load_image_texture("assets/minecraft/textures/block/red_sandstone_top.png");
else:
return None
elif blockid == 204 or blockid == 205: # purpur slab (single=205 double=204)
top = side = self.load_image_texture("assets/minecraft/textures/block/purpur_block.png");
elif blockid == 11340: # prismarine slabs
top = side = self.load_image_texture("assets/minecraft/textures/block/prismarine.png").copy()
elif blockid == 11341: # dark prismarine slabs
top = side = self.load_image_texture("assets/minecraft/textures/block/dark_prismarine.png").copy()
elif blockid == 11342: # prismarine brick slabs
top = side = self.load_image_texture("assets/minecraft/textures/block/prismarine_bricks.png").copy()
elif blockid == 11343: # andesite slabs
top = side = self.load_image_texture("assets/minecraft/textures/block/andesite.png").copy()
elif blockid == 11344: # diorite slabs
top = side = self.load_image_texture("assets/minecraft/textures/block/diorite.png").copy()
elif blockid == 11345: # granite slabs
top = side = self.load_image_texture("assets/minecraft/textures/block/granite.png").copy()
elif blockid == 11346: # polished andesite slabs
top = side = self.load_image_texture("assets/minecraft/textures/block/polished_andesite.png").copy()
elif blockid == 11347: # polished diorite slabs
top = side = self.load_image_texture("assets/minecraft/textures/block/polished_diorite.png").copy()
elif blockid == 11348: # polished granite slabs
top = side = self.load_image_texture("assets/minecraft/textures/block/polished_granite.png").copy()
elif blockid == 11349: # red nether brick slab
top = side = self.load_image_texture("assets/minecraft/textures/block/red_nether_bricks.png").copy()
elif blockid == 11350: # smooth sandstone slab
top = side = self.load_image_texture("assets/minecraft/textures/block/sandstone_top.png").copy()
elif blockid == 11351: # cut sandstone slab
top = side = self.load_image_texture("assets/minecraft/textures/block/cut_sandstone.png").copy()
elif blockid == 11352: # smooth red sandstone slab
top = side = self.load_image_texture("assets/minecraft/textures/block/red_sandstone_top.png").copy()
elif blockid == 11353: # cut red sandstone slab
top = side = self.load_image_texture("assets/minecraft/textures/block/cut_red_sandstone.png").copy()
elif blockid == 11354: # end_stone_brick_slab
top = side = self.load_image_texture("assets/minecraft/textures/block/end_stone_bricks.png").copy()
elif blockid == 11355: # mossy_cobblestone_slab
top = side = self.load_image_texture("assets/minecraft/textures/block/mossy_cobblestone.png").copy()
elif blockid == 11356: # mossy_stone_brick_slab
top = side = self.load_image_texture("assets/minecraft/textures/block/mossy_stone_bricks.png").copy()
elif blockid == 11357: # smooth_quartz_slab
top = side = self.load_image_texture("assets/minecraft/textures/block/quartz_block_bottom.png").copy()
elif blockid == 11358: # smooth_stone_slab
top = self.load_image_texture("assets/minecraft/textures/block/smooth_stone.png").copy()
side = self.load_image_texture("assets/minecraft/textures/block/smooth_stone_slab_side.png").copy()
elif blockid == 1027: # blackstone_slab
top = side = self.load_image_texture("assets/minecraft/textures/block/blackstone.png").copy()
elif blockid == 1028: # polished_blackstone_slab
top = side = self.load_image_texture("assets/minecraft/textures/block/polished_blackstone.png").copy()
elif blockid == 1029: # polished_blackstone_brick_slab
top = side = self.load_image_texture("assets/minecraft/textures/block/polished_blackstone_bricks.png").copy()
elif blockid in range(1072, 1080):
copper_tex = {
1072: "assets/minecraft/textures/block/cut_copper.png",
1076: "assets/minecraft/textures/block/cut_copper.png",
1073: "assets/minecraft/textures/block/exposed_cut_copper.png",
1077: "assets/minecraft/textures/block/exposed_cut_copper.png",
1074: "assets/minecraft/textures/block/weathered_cut_copper.png",
1078: "assets/minecraft/textures/block/weathered_cut_copper.png",
1075: "assets/minecraft/textures/block/oxidized_cut_copper.png",
1079: "assets/minecraft/textures/block/oxidized_cut_copper.png",
}
top = side = self.load_image_texture(copper_tex[blockid]).copy()
elif blockid in range(1103, 1107):
deepslate_tex = {
1103: "assets/minecraft/textures/block/cobbled_deepslate.png",
1104: "assets/minecraft/textures/block/polished_deepslate.png",
1105: "assets/minecraft/textures/block/deepslate_bricks.png",
1106: "assets/minecraft/textures/block/deepslate_tiles.png",
}
top = side = self.load_image_texture(deepslate_tex[blockid]).copy()
if blockid == 43 or blockid == 181 or blockid == 204: # double slab
return self.build_block(top, side)
return self.build_slab_block(top, side, data & 8 == 8);
# brick block
block(blockid=45, top_image="assets/minecraft/textures/block/bricks.png")
# TNT
block(blockid=46, top_image="assets/minecraft/textures/block/tnt_top.png", side_image="assets/minecraft/textures/block/tnt_side.png", nospawn=True)
# bookshelf
block(blockid=47, top_image="assets/minecraft/textures/block/oak_planks.png", side_image="assets/minecraft/textures/block/bookshelf.png")
# moss stone
block(blockid=48, top_image="assets/minecraft/textures/block/mossy_cobblestone.png")
# obsidian
block(blockid=49, top_image="assets/minecraft/textures/block/obsidian.png")
# torch, redstone torch (off), redstone torch(on), soul_torch
@material(blockid=[50, 75, 76, 1039], data=[1, 2, 3, 4, 5], transparent=True)
def torches(self, blockid, data):
# first, rotations
if self.rotation == 1:
if data == 1: data = 3
elif data == 2: data = 4
elif data == 3: data = 2
elif data == 4: data = 1
elif self.rotation == 2:
if data == 1: data = 2
elif data == 2: data = 1
elif data == 3: data = 4
elif data == 4: data = 3
elif self.rotation == 3:
if data == 1: data = 4
elif data == 2: data = 3
elif data == 3: data = 1
elif data == 4: data = 2
# choose the proper texture
if blockid == 50: # torch
small = self.load_image_texture("assets/minecraft/textures/block/torch.png")
elif blockid == 75: # off redstone torch
small = self.load_image_texture("assets/minecraft/textures/block/redstone_torch_off.png")
elif blockid == 76: # on redstone torch
small = self.load_image_texture("assets/minecraft/textures/block/redstone_torch.png")
elif blockid == 1039: # soul torch
small= self.load_image_texture("assets/minecraft/textures/block/soul_torch.png")
# compose a torch bigger than the normal
# (better for doing transformations)
torch = Image.new("RGBA", (16,16), self.bgcolor)
alpha_over(torch,small,(-4,-3))
alpha_over(torch,small,(-5,-2))
alpha_over(torch,small,(-3,-2))
# angle of inclination of the texture
rotation = 15
if data == 1: # pointing south
torch = torch.rotate(-rotation, Image.NEAREST) # nearest filter is more nitid.
img = self.build_full_block(None, None, None, torch, None, None)
elif data == 2: # pointing north
torch = torch.rotate(rotation, Image.NEAREST)
img = self.build_full_block(None, None, torch, None, None, None)
elif data == 3: # pointing west
torch = torch.rotate(rotation, Image.NEAREST)
img = self.build_full_block(None, torch, None, None, None, None)
elif data == 4: # pointing east
torch = torch.rotate(-rotation, Image.NEAREST)
img = self.build_full_block(None, None, None, None, torch, None)
elif data == 5: # standing on the floor
# compose a "3d torch".
img = Image.new("RGBA", (24,24), self.bgcolor)
small_crop = small.crop((2,2,14,14))
slice = small_crop.copy()
ImageDraw.Draw(slice).rectangle((6,0,12,12),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(slice).rectangle((0,0,4,12),outline=(0,0,0,0),fill=(0,0,0,0))
alpha_over(img, slice, (7,5))
alpha_over(img, small_crop, (6,6))
alpha_over(img, small_crop, (7,6))
alpha_over(img, slice, (7,7))
return img
# lantern
@material(blockid=[11373, 1038], data=[0, 1], transparent=True)
def lantern(self, blockid, data):
# get the multipart texture of the lantern
if blockid == 11373:
inputtexture = self.load_image_texture("assets/minecraft/textures/block/lantern.png")
if blockid == 1038:
inputtexture = self.load_image_texture("assets/minecraft/textures/block/soul_lantern.png")
# # now create a textures, using the parts defined in lantern.json
# JSON data for sides
# from": [ 5, 1, 5 ],
# "to": [11, 8, 11 ],
# { "uv": [ 0, 2, 6, 9 ], "texture": "#all" }
side_crop = inputtexture.crop((0, 2, 6, 9))
side_slice = side_crop.copy()
side_texture = Image.new("RGBA", (16, 16), self.bgcolor)
side_texture.paste(side_slice,(5, 8))
# JSON data for top
# { "uv": [ 0, 9, 6, 15 ], "texture": "#all" }
top_crop = inputtexture.crop((0, 9, 6, 15))
top_slice = top_crop.copy()
top_texture = Image.new("RGBA", (16, 16), self.bgcolor)
top_texture.paste(top_slice,(5, 5))
# mimic parts of build_full_block, to get an object smaller than a block
# build_full_block(self, top, side1, side2, side3, side4, bottom=None):
# a non transparent block uses top, side 3 and side 4.
img = Image.new("RGBA", (24, 24), self.bgcolor)
# prepare the side textures
# side3
side3 = self.transform_image_side(side_texture)
# Darken this side
sidealpha = side3.split()[3]
side3 = ImageEnhance.Brightness(side3).enhance(0.9)
side3.putalpha(sidealpha)
# place the transformed texture
hangoff = 0
if data == 1:
hangoff = 8
xoff = 4
yoff =- hangoff
alpha_over(img, side3, (xoff+0, yoff+6), side3)
# side4
side4 = self.transform_image_side(side_texture)
side4 = side4.transpose(Image.FLIP_LEFT_RIGHT)
# Darken this side
sidealpha = side4.split()[3]
side4 = ImageEnhance.Brightness(side4).enhance(0.8)
side4.putalpha(sidealpha)
alpha_over(img, side4, (12-xoff, yoff+6), side4)
# top
top = self.transform_image_top(top_texture)
alpha_over(img, top, (0, 8-hangoff), top)
return img
# bamboo
@material(blockid=11416, transparent=True)
def bamboo(self, blockid, data):
# get the multipart texture of the lantern
inputtexture = self.load_image_texture("assets/minecraft/textures/block/bamboo_stalk.png")
# # now create a textures, using the parts defined in bamboo1_age0.json
# { "from": [ 7, 0, 7 ],
# "to": [ 9, 16, 9 ],
# "faces": {
# "down": { "uv": [ 13, 4, 15, 6 ], "texture": "#all", "cullface": "down" },
# "up": { "uv": [ 13, 0, 15, 2], "texture": "#all", "cullface": "up" },
# "north": { "uv": [ 0, 0, 2, 16 ], "texture": "#all" },
# "south": { "uv": [ 0, 0, 2, 16 ], "texture": "#all" },
# "west": { "uv": [ 0, 0, 2, 16 ], "texture": "#all" },
# "east": { "uv": [ 0, 0, 2, 16 ], "texture": "#all" }
# }
# }
side_crop = inputtexture.crop((0, 0, 3, 16))
side_slice = side_crop.copy()
side_texture = Image.new("RGBA", (16, 16), self.bgcolor)
side_texture.paste(side_slice,(0, 0))
# JSON data for top
# "up": { "uv": [ 13, 0, 15, 2], "texture": "#all", "cullface": "up" },
top_crop = inputtexture.crop((13, 0, 16, 3))
top_slice = top_crop.copy()
top_texture = Image.new("RGBA", (16, 16), self.bgcolor)
top_texture.paste(top_slice,(5, 5))
# mimic parts of build_full_block, to get an object smaller than a block
# build_full_block(self, top, side1, side2, side3, side4, bottom=None):
# a non transparent block uses top, side 3 and side 4.
img = Image.new("RGBA", (24, 24), self.bgcolor)
# prepare the side textures
# side3
side3 = self.transform_image_side(side_texture)
# Darken this side
sidealpha = side3.split()[3]
side3 = ImageEnhance.Brightness(side3).enhance(0.9)
side3.putalpha(sidealpha)
# place the transformed texture
xoff = 3
yoff = 0
alpha_over(img, side3, (4+xoff, yoff), side3)
# side4
side4 = self.transform_image_side(side_texture)
side4 = side4.transpose(Image.FLIP_LEFT_RIGHT)
# Darken this side
sidealpha = side4.split()[3]
side4 = ImageEnhance.Brightness(side4).enhance(0.8)
side4.putalpha(sidealpha)
alpha_over(img, side4, (-4+xoff, yoff), side4)
# top
top = self.transform_image_top(top_texture)
alpha_over(img, top, (-4+xoff, -5), top)
return img
# composter
@material(blockid=11417, data=list(range(9)), transparent=True)
def composter(self, blockid, data):
side = self.load_image_texture("assets/minecraft/textures/block/composter_side.png")
top = self.load_image_texture("assets/minecraft/textures/block/composter_top.png")
# bottom = self.load_image_texture("assets/minecraft/textures/block/composter_bottom.png")
if data == 0: # empty
return self.build_full_block(top, side, side, side, side)
if data == 8:
compost = self.transform_image_top(
self.load_image_texture("assets/minecraft/textures/block/composter_ready.png"))
else:
compost = self.transform_image_top(
self.load_image_texture("assets/minecraft/textures/block/composter_compost.png"))
nudge = {1: (0, 9), 2: (0, 8), 3: (0, 7), 4: (0, 6), 5: (0, 4), 6: (0, 2), 7: (0, 0), 8: (0, 0)}
img = self.build_full_block(None, side, side, None, None)
alpha_over(img, compost, nudge[data], compost)
img2 = self.build_full_block(top, None, None, side, side)
alpha_over(img, img2, (0, 0), img2)
return img
# fire and soul_fire
@material(blockid=[51, 1040], transparent=True)
def fire(self, blockid, data):
if blockid == 51:
textureNS = self.load_image_texture("assets/minecraft/textures/block/fire_0.png")
textureEW = self.load_image_texture("assets/minecraft/textures/block/fire_1.png")
elif blockid == 1040:
textureNS = self.load_image_texture("assets/minecraft/textures/block/soul_fire_0.png")
textureEW = self.load_image_texture("assets/minecraft/textures/block/soul_fire_1.png")
side1 = self.transform_image_side(textureNS)
side2 = self.transform_image_side(textureEW).transpose(Image.FLIP_LEFT_RIGHT)
img = Image.new("RGBA", (24,24), self.bgcolor)
alpha_over(img, side1, (12,0), side1)
alpha_over(img, side2, (0,0), side2)
alpha_over(img, side1, (0,6), side1)
alpha_over(img, side2, (12,6), side2)
return img
# monster spawner
block(blockid=52, top_image="assets/minecraft/textures/block/spawner.png", transparent=True)
# wooden, cobblestone, red brick, stone brick, netherbrick, sandstone, spruce, birch,
# jungle, quartz, red sandstone, purper_stairs, crimson_stairs, warped_stairs, (dark) prismarine,
# mossy brick and mossy cobblestone, stone smooth_quartz
# polished_granite polished_andesite polished_diorite granite diorite andesite end_stone_bricks red_nether_brick stairs
# smooth_red_sandstone blackstone polished_blackstone polished_blackstone_brick
# also all the copper variants
# also all deepslate variants
@material(blockid=[53, 67, 108, 109, 114, 128, 134, 135, 136, 156, 163, 164, 180, 203, 509, 510,
11337, 11338, 11339, 11370, 11371, 11374, 11375, 11376, 11377, 11378, 11379,
11380, 11381, 11382, 11383, 11384, 11415, 1030, 1031, 1032, 1064, 1065, 1066,
1067, 1068, 1069, 1070, 1071, 1099, 1100, 1101, 1102],
data=list(range(128)), transparent=True, solid=True, nospawn=True)
def stairs(self, blockid, data):
# preserve the upside-down bit
upside_down = data & 0x4
# find solid quarters within the top or bottom half of the block
# NW NE SE SW
quarters = [data & 0x8, data & 0x10, data & 0x20, data & 0x40]
# rotate the quarters so we can pretend northdirection is always upper-left
numpy.roll(quarters, [0,1,3,2][self.rotation])
nw,ne,se,sw = quarters
stair_id_to_tex = {
53: "assets/minecraft/textures/block/oak_planks.png",
67: "assets/minecraft/textures/block/cobblestone.png",
108: "assets/minecraft/textures/block/bricks.png",
109: "assets/minecraft/textures/block/stone_bricks.png",
114: "assets/minecraft/textures/block/nether_bricks.png",
128: "assets/minecraft/textures/block/sandstone.png",
134: "assets/minecraft/textures/block/spruce_planks.png",
135: "assets/minecraft/textures/block/birch_planks.png",
136: "assets/minecraft/textures/block/jungle_planks.png",
156: "assets/minecraft/textures/block/quartz_block_side.png",
163: "assets/minecraft/textures/block/acacia_planks.png",
164: "assets/minecraft/textures/block/dark_oak_planks.png",
180: "assets/minecraft/textures/block/red_sandstone.png",
203: "assets/minecraft/textures/block/purpur_block.png",
509: "assets/minecraft/textures/block/crimson_planks.png",
510: "assets/minecraft/textures/block/warped_planks.png",
11337: "assets/minecraft/textures/block/prismarine.png",
11338: "assets/minecraft/textures/block/dark_prismarine.png",
11339: "assets/minecraft/textures/block/prismarine_bricks.png",
11370: "assets/minecraft/textures/block/mossy_stone_bricks.png",
11371: "assets/minecraft/textures/block/mossy_cobblestone.png",
11374: "assets/minecraft/textures/block/sandstone_top.png",
11375: "assets/minecraft/textures/block/quartz_block_side.png",
11376: "assets/minecraft/textures/block/polished_granite.png",
11377: "assets/minecraft/textures/block/polished_diorite.png",
11378: "assets/minecraft/textures/block/polished_andesite.png",
11379: "assets/minecraft/textures/block/stone.png",
11380: "assets/minecraft/textures/block/granite.png",
11381: "assets/minecraft/textures/block/diorite.png",
11382: "assets/minecraft/textures/block/andesite.png",
11383: "assets/minecraft/textures/block/end_stone_bricks.png",
11384: "assets/minecraft/textures/block/red_nether_bricks.png",
11415: "assets/minecraft/textures/block/red_sandstone_top.png",
1030: "assets/minecraft/textures/block/blackstone.png",
1031: "assets/minecraft/textures/block/polished_blackstone.png",
1032: "assets/minecraft/textures/block/polished_blackstone_bricks.png",
# Cut copper stairs
1064: "assets/minecraft/textures/block/cut_copper.png",
1065: "assets/minecraft/textures/block/exposed_cut_copper.png",
1066: "assets/minecraft/textures/block/weathered_cut_copper.png",
1067: "assets/minecraft/textures/block/oxidized_cut_copper.png",
# Waxed cut copper stairs
1068: "assets/minecraft/textures/block/cut_copper.png",
1069: "assets/minecraft/textures/block/exposed_cut_copper.png",
1070: "assets/minecraft/textures/block/weathered_cut_copper.png",
1071: "assets/minecraft/textures/block/oxidized_cut_copper.png",
# Deepslate
1099: "assets/minecraft/textures/block/cobbled_deepslate.png",
1100: "assets/minecraft/textures/block/polished_deepslate.png",
1101: "assets/minecraft/textures/block/deepslate_bricks.png",
1102: "assets/minecraft/textures/block/deepslate_tiles.png",
}
texture = self.load_image_texture(stair_id_to_tex[blockid]).copy()
outside_l = texture.copy()
outside_r = texture.copy()
inside_l = texture.copy()
inside_r = texture.copy()
# sandstone, red sandstone, and quartz stairs have special top texture
special_tops = {
128: "assets/minecraft/textures/block/sandstone_top.png",
156: "assets/minecraft/textures/block/quartz_block_top.png",
180: "assets/minecraft/textures/block/red_sandstone_top.png",
11375: "assets/minecraft/textures/block/quartz_block_top.png",
}
if blockid in special_tops:
texture = self.load_image_texture(special_tops[blockid]).copy()
slab_top = texture.copy()
push = 8 if upside_down else 0
def rect(tex,coords):
ImageDraw.Draw(tex).rectangle(coords,outline=(0,0,0,0),fill=(0,0,0,0))
# cut out top or bottom half from inner surfaces
rect(inside_l, (0,8-push,15,15-push))
rect(inside_r, (0,8-push,15,15-push))
# cut out missing or obstructed quarters from each surface
if not nw:
rect(outside_l, (0,push,7,7+push))
rect(texture, (0,0,7,7))
if not nw or sw:
rect(inside_r, (8,push,15,7+push)) # will be flipped
if not ne:
rect(texture, (8,0,15,7))
if not ne or nw:
rect(inside_l, (0,push,7,7+push))
if not ne or se:
rect(inside_r, (0,push,7,7+push)) # will be flipped
if not se:
rect(outside_r, (0,push,7,7+push)) # will be flipped
rect(texture, (8,8,15,15))
if not se or sw:
rect(inside_l, (8,push,15,7+push))
if not sw:
rect(outside_l, (8,push,15,7+push))
rect(outside_r, (8,push,15,7+push)) # will be flipped
rect(texture, (0,8,7,15))
img = Image.new("RGBA", (24,24), self.bgcolor)
if upside_down:
# top should have no cut-outs after all
texture = slab_top
else:
# render the slab-level surface
slab_top = self.transform_image_top(slab_top)
alpha_over(img, slab_top, (0,6))
# render inner left surface
inside_l = self.transform_image_side(inside_l)
# Darken the vertical part of the second step
sidealpha = inside_l.split()[3]
# darken it a bit more than usual, looks better
inside_l = ImageEnhance.Brightness(inside_l).enhance(0.8)
inside_l.putalpha(sidealpha)
alpha_over(img, inside_l, (6,3))
# render inner right surface
inside_r = self.transform_image_side(inside_r).transpose(Image.FLIP_LEFT_RIGHT)
# Darken the vertical part of the second step
sidealpha = inside_r.split()[3]
# darken it a bit more than usual, looks better
inside_r = ImageEnhance.Brightness(inside_r).enhance(0.7)
inside_r.putalpha(sidealpha)
alpha_over(img, inside_r, (6,3))
# render outer surfaces
alpha_over(img, self.build_full_block(texture, None, None, outside_l, outside_r))
return img
# normal, locked (used in april's fool day), ender and trapped chest
# NOTE: locked chest used to be id95 (which is now stained glass)
@material(blockid=[54, 130, 146], data=list(range(30)), transparent = True)
def chests(self, blockid, data):
# the first 3 bits are the orientation as stored in minecraft,
# bits 0x8 and 0x10 indicate which half of the double chest is it.
# first, do the rotation if needed
orientation_data = data & 7
if self.rotation == 1:
if orientation_data == 2: data = 5 | (data & 24)
elif orientation_data == 3: data = 4 | (data & 24)
elif orientation_data == 4: data = 2 | (data & 24)
elif orientation_data == 5: data = 3 | (data & 24)
elif self.rotation == 2:
if orientation_data == 2: data = 3 | (data & 24)
elif orientation_data == 3: data = 2 | (data & 24)
elif orientation_data == 4: data = 5 | (data & 24)
elif orientation_data == 5: data = 4 | (data & 24)
elif self.rotation == 3:
if orientation_data == 2: data = 4 | (data & 24)
elif orientation_data == 3: data = 5 | (data & 24)
elif orientation_data == 4: data = 3 | (data & 24)
elif orientation_data == 5: data = 2 | (data & 24)
if blockid == 130 and not data in [2, 3, 4, 5]: return None
# iterate.c will only return the ancil data (without pseudo
# ancil data) for locked and ender chests, so only
# ancilData = 2,3,4,5 are used for this blockids
if data & 24 == 0:
if blockid == 130: t = self.load_image("assets/minecraft/textures/entity/chest/ender.png")
else:
try:
t = self.load_image("assets/minecraft/textures/entity/chest/normal.png")
except (TextureException, IOError):
t = self.load_image("assets/minecraft/textures/entity/chest/chest.png")
t = ImageOps.flip(t) # for some reason the 1.15 images are upside down
# the textures is no longer in terrain.png, get it from
# item/chest.png and get by cropping all the needed stuff
if t.size != (64, 64): t = t.resize((64, 64), Image.ANTIALIAS)
# top
top = t.crop((28, 50, 42, 64))
top.load() # every crop need a load, crop is a lazy operation
# see PIL manual
img = Image.new("RGBA", (16, 16), self.bgcolor)
alpha_over(img, top, (1, 1))
top = img
# front
front_top = t.crop((42, 45, 56, 50))
front_top.load()
front_bottom = t.crop((42, 21, 56, 31))
front_bottom.load()
front_lock = t.crop((1, 59, 3, 63))
front_lock.load()
front = Image.new("RGBA", (16, 16), self.bgcolor)
alpha_over(front, front_top, (1, 1))
alpha_over(front, front_bottom, (1, 5))
alpha_over(front, front_lock, (7, 3))
# left side
# left side, right side, and back are essentially the same for
# the default texture, we take it anyway just in case other
# textures make use of it.
side_l_top = t.crop((14, 45, 28, 50))
side_l_top.load()
side_l_bottom = t.crop((14, 21, 28, 31))
side_l_bottom.load()
side_l = Image.new("RGBA", (16, 16), self.bgcolor)
alpha_over(side_l, side_l_top, (1, 1))
alpha_over(side_l, side_l_bottom, (1, 5))
# right side
side_r_top = t.crop((28, 45, 42, 50))
side_r_top.load()
side_r_bottom = t.crop((28, 21, 42, 31))
side_r_bottom.load()
side_r = Image.new("RGBA", (16, 16), self.bgcolor)
alpha_over(side_r, side_r_top, (1, 1))
alpha_over(side_r, side_r_bottom, (1, 5))
# back
back_top = t.crop((0, 45, 14, 50))
back_top.load()
back_bottom = t.crop((0, 21, 14, 31))
back_bottom.load()
back = Image.new("RGBA", (16, 16), self.bgcolor)
alpha_over(back, back_top, (1, 1))
alpha_over(back, back_bottom, (1, 5))
else:
# large chest
# the textures is no longer in terrain.png, get it from
# item/chest.png and get all the needed stuff
t_left = self.load_image("assets/minecraft/textures/entity/chest/normal_left.png")
t_right = self.load_image("assets/minecraft/textures/entity/chest/normal_right.png")
# for some reason the 1.15 images are upside down
t_left = ImageOps.flip(t_left)
t_right = ImageOps.flip(t_right)
# Top
top_left = t_right.crop((29, 50, 44, 64))
top_left.load()
top_right = t_left.crop((29, 50, 44, 64))
top_right.load()
top = Image.new("RGBA", (32, 16), self.bgcolor)
alpha_over(top,top_left, (1, 1))
alpha_over(top,top_right, (16, 1))
# Front
front_top_left = t_left.crop((43, 45, 58, 50))
front_top_left.load()
front_top_right = t_right.crop((43, 45, 58, 50))
front_top_right.load()
front_bottom_left = t_left.crop((43, 21, 58, 31))
front_bottom_left.load()
front_bottom_right = t_right.crop((43, 21, 58, 31))
front_bottom_right.load()
front_lock = t_left.crop((1, 59, 3, 63))
front_lock.load()
front = Image.new("RGBA", (32, 16), self.bgcolor)
alpha_over(front, front_top_left, (1, 1))
alpha_over(front, front_top_right, (16, 1))
alpha_over(front, front_bottom_left, (1, 5))
alpha_over(front, front_bottom_right, (16, 5))
alpha_over(front, front_lock, (15, 3))
# Back
back_top_left = t_right.crop((14, 45, 29, 50))
back_top_left.load()
back_top_right = t_left.crop((14, 45, 29, 50))
back_top_right.load()
back_bottom_left = t_right.crop((14, 21, 29, 31))
back_bottom_left.load()
back_bottom_right = t_left.crop((14, 21, 29, 31))
back_bottom_right.load()
back = Image.new("RGBA", (32, 16), self.bgcolor)
alpha_over(back, back_top_left, (1, 1))
alpha_over(back, back_top_right, (16, 1))
alpha_over(back, back_bottom_left, (1, 5))
alpha_over(back, back_bottom_right, (16, 5))
# left side
side_l_top = t_left.crop((29, 45, 43, 50))
side_l_top.load()
side_l_bottom = t_left.crop((29, 21, 43, 31))
side_l_bottom.load()
side_l = Image.new("RGBA", (16, 16), self.bgcolor)
alpha_over(side_l, side_l_top, (1, 1))
alpha_over(side_l, side_l_bottom, (1, 5))
# right side
side_r_top = t_right.crop((0, 45, 14, 50))
side_r_top.load()
side_r_bottom = t_right.crop((0, 21, 14, 31))
side_r_bottom.load()
side_r = Image.new("RGBA", (16, 16), self.bgcolor)
alpha_over(side_r, side_r_top, (1, 1))
alpha_over(side_r, side_r_bottom, (1, 5))
# double chest, left half
if ((data & 24 == 8 and data & 7 in [3, 5]) or (data & 24 == 16 and data & 7 in [2, 4])):
top = top.crop((0, 0, 16, 16))
top.load()
front = front.crop((0, 0, 16, 16))
front.load()
back = back.crop((0, 0, 16, 16))
back.load()
#~ side = side_l
# double chest, right half
elif ((data & 24 == 16 and data & 7 in [3, 5]) or (data & 24 == 8 and data & 7 in [2, 4])):
top = top.crop((16, 0, 32, 16))
top.load()
front = front.crop((16, 0, 32, 16))
front.load()
back = back.crop((16, 0, 32, 16))
back.load()
#~ side = side_r
else: # just in case
return None
# compose the final block
img = Image.new("RGBA", (24, 24), self.bgcolor)
if data & 7 == 2: # north
side = self.transform_image_side(side_r)
alpha_over(img, side, (1, 7))
back = self.transform_image_side(back)
alpha_over(img, back.transpose(Image.FLIP_LEFT_RIGHT), (11, 7))
front = self.transform_image_side(front)
top = self.transform_image_top(top.rotate(180))
alpha_over(img, top, (0, 2))
elif data & 7 == 3: # south
side = self.transform_image_side(side_l)
alpha_over(img, side, (1, 7))
front = self.transform_image_side(front).transpose(Image.FLIP_LEFT_RIGHT)
top = self.transform_image_top(top.rotate(180))
alpha_over(img, top, (0, 2))
alpha_over(img, front, (11, 7))
elif data & 7 == 4: # west
side = self.transform_image_side(side_r)
alpha_over(img, side.transpose(Image.FLIP_LEFT_RIGHT), (11, 7))
front = self.transform_image_side(front)
alpha_over(img, front, (1, 7))
top = self.transform_image_top(top.rotate(270))
alpha_over(img, top, (0, 2))
elif data & 7 == 5: # east
back = self.transform_image_side(back)
side = self.transform_image_side(side_l).transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, side, (11, 7))
alpha_over(img, back, (1, 7))
top = self.transform_image_top(top.rotate(270))
alpha_over(img, top, (0, 2))
else: # just in case
img = None
return img
# redstone wire
# uses pseudo-ancildata found in iterate.c
@material(blockid=55, data=list(range(128)), transparent=True)
def wire(self, blockid, data):
if data & 0b1000000 == 64: # powered redstone wire
redstone_wire_t = self.load_image_texture("assets/minecraft/textures/block/redstone_dust_line0.png").rotate(90)
redstone_wire_t = self.tint_texture(redstone_wire_t,(255,0,0))
redstone_cross_t = self.load_image_texture("assets/minecraft/textures/block/redstone_dust_dot.png")
redstone_cross_t = self.tint_texture(redstone_cross_t,(255,0,0))
else: # unpowered redstone wire
redstone_wire_t = self.load_image_texture("assets/minecraft/textures/block/redstone_dust_line0.png").rotate(90)
redstone_wire_t = self.tint_texture(redstone_wire_t,(48,0,0))
redstone_cross_t = self.load_image_texture("assets/minecraft/textures/block/redstone_dust_dot.png")
redstone_cross_t = self.tint_texture(redstone_cross_t,(48,0,0))
# generate an image per redstone direction
branch_top_left = redstone_cross_t.copy()
ImageDraw.Draw(branch_top_left).rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(branch_top_left).rectangle((11,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(branch_top_left).rectangle((0,11,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
branch_top_right = redstone_cross_t.copy()
ImageDraw.Draw(branch_top_right).rectangle((0,0,15,4),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(branch_top_right).rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(branch_top_right).rectangle((0,11,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
branch_bottom_right = redstone_cross_t.copy()
ImageDraw.Draw(branch_bottom_right).rectangle((0,0,15,4),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(branch_bottom_right).rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(branch_bottom_right).rectangle((11,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
branch_bottom_left = redstone_cross_t.copy()
ImageDraw.Draw(branch_bottom_left).rectangle((0,0,15,4),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(branch_bottom_left).rectangle((11,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(branch_bottom_left).rectangle((0,11,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
# generate the bottom texture
if data & 0b111111 == 0:
bottom = redstone_cross_t.copy()
# see iterate.c for where these masks come from
has_x = (data & 0b1010) > 0
has_z = (data & 0b0101) > 0
if has_x and has_z:
bottom = redstone_cross_t.copy()
if has_x:
alpha_over(bottom, redstone_wire_t.copy())
if has_z:
alpha_over(bottom, redstone_wire_t.copy().rotate(90))
else:
if has_x:
bottom = redstone_wire_t.copy()
elif has_z:
bottom = redstone_wire_t.copy().rotate(90)
elif data & 0b1111 == 0:
bottom = redstone_cross_t.copy()
# check for going up redstone wire
if data & 0b100000 == 32:
side1 = redstone_wire_t.rotate(90)
else:
side1 = None
if data & 0b010000 == 16:
side2 = redstone_wire_t.rotate(90)
else:
side2 = None
img = self.build_full_block(None,side1,side2,None,None,bottom)
return img
# diamond ore
block(blockid=56, top_image="assets/minecraft/textures/block/diamond_ore.png")
# diamond block
block(blockid=57, top_image="assets/minecraft/textures/block/diamond_block.png")
# Table blocks with no facing or other properties where sides are not all the same
# Includes: Crafting table, fletching table, cartography table, smithing table
@material(blockid=[58, 11359, 11360, 11361], solid=True, nodata=True)
def block_table(self, blockid, data):
block_name = {58: "crafting_table",
11359: "fletching_table",
11360: "cartography_table",
11361: "smithing_table"}[blockid]
# Top texture doesn't vary with self.rotation, but texture rotation does
top_tex = block_name + "_top"
top_rot = [0, 270, 180, 90][self.rotation]
# List of side textures from side 1 to 4 for each blockid
side_tex_map = {58: ["front", "side", "front", "side"],
11359: ["front", "side", "side", "front"],
11360: ["side3", "side3", "side2", "side1"],
11361: ["front", "side", "side", "front"]}[blockid]
# Determine which side textures to use
side3_id = [2, 3, 1, 0][self.rotation]
side4_id = [3, 1, 0, 2][self.rotation]
side3_tex = block_name + "_" + side_tex_map[side3_id]
side4_tex = block_name + "_" + side_tex_map[side4_id]
tex_path = "assets/minecraft/textures/block"
top = self.load_image_texture("{}/{}.png".format(tex_path, top_tex)).copy()
side3 = self.load_image_texture("{}/{}.png".format(tex_path, side3_tex))
side4 = self.load_image_texture("{}/{}.png".format(tex_path, side4_tex)).copy()
top = top.rotate(top_rot)
side4 = side4.transpose(Image.FLIP_LEFT_RIGHT)
return self.build_full_block(top, None, None, side3, side4, None)
@material(blockid=11366, data=list(range(8)), transparent=True, solid=True, nospawn=True)
def lectern(self, blockid, data):
# Do rotation, mask to not clobber book data
data = data & 0b100 | ((self.rotation + (data & 0b11)) % 4)
# Load textures
base_raw_t = self.load_image_texture("assets/minecraft/textures/block/lectern_base.png")
front_raw_t = self.load_image_texture("assets/minecraft/textures/block/lectern_front.png")
side_raw_t = self.load_image_texture("assets/minecraft/textures/block/lectern_sides.png")
top_raw_t = self.load_image_texture("assets/minecraft/textures/block/lectern_top.png")
def create_tile(img_src, coord_crop, coord_paste, rot):
# Takes an image, crops a region, optionally rotates the
# texture, then finally pastes it onto a 16x16 image
img_out = Image.new("RGBA", (16, 16), self.bgcolor)
img_in = img_src.crop(coord_crop)
if rot != 0:
img_in = img_in.rotate(rot, expand=True)
img_out.paste(img_in, coord_paste)
return img_out
def darken_image(img_src, darken_value):
# Takes an image & alters the brightness, leaving alpha intact
alpha = img_src.split()[3]
img_out = ImageEnhance.Brightness(img_src).enhance(darken_value)
img_out.putalpha(alpha)
return img_out
# Generate base
base_top_t = base_raw_t.rotate([0, 270, 180, 90][data & 0b11])
# Front & side textures are one pixel taller than they should be
# pre-transformation as otherwise the topmost row of pixels
# post-transformation are rather transparent, which results in
# a visible gap between the base's sides & top
base_front_t = create_tile(base_raw_t, (0, 13, 16, 16), (0, 13), 0)
base_side_t = create_tile(base_raw_t, (0, 5, 16, 8), (0, 13), 0)
base_side3_t = base_front_t if data & 0b11 == 1 else base_side_t
base_side4_t = base_front_t if data & 0b11 == 0 else base_side_t
img = self.build_full_block((base_top_t, 14), None, None, base_side3_t, base_side4_t, None)
# Generate central pillar
side_flip_t = side_raw_t.transpose(Image.FLIP_LEFT_RIGHT)
# Define parameters used to obtain the texture for each side
pillar_param = [{'img': front_raw_t, 'crop': (8, 4, 16, 16), 'paste': (4, 2), 'rot': 0}, # South
{'img': side_raw_t, 'crop': (2, 8, 15, 16), 'paste': (4, 1), 'rot': 270}, # West
{'img': front_raw_t, 'crop': (0, 4, 8, 13), 'paste': (4, 5), 'rot': 0}, # North
{'img': side_flip_t, 'crop': (2, 8, 15, 16), 'paste': (4, 1), 'rot': 90}] # East
# Determine which sides are rendered
pillar_side = [pillar_param[(3 - (data & 0b11)) % 4], pillar_param[(2 - (data & 0b11)) % 4]]
pillar_side3_t = create_tile(pillar_side[0]['img'], pillar_side[0]['crop'],
pillar_side[0]['paste'], pillar_side[0]['rot'])
pillar_side4_t = create_tile(pillar_side[1]['img'], pillar_side[1]['crop'],
pillar_side[1]['paste'], pillar_side[1]['rot'])
pillar_side4_t = pillar_side4_t.transpose(Image.FLIP_LEFT_RIGHT)
pillar_side3_t = self.transform_image_side(pillar_side3_t)
pillar_side3_t = darken_image(pillar_side3_t, 0.9)
pillar_side4_t = self.transform_image_side(pillar_side4_t).transpose(Image.FLIP_LEFT_RIGHT)
pillar_side4_t = darken_image(pillar_side4_t, 0.8)
alpha_over(img, pillar_side3_t, (3, 4), pillar_side3_t)
alpha_over(img, pillar_side4_t, (9, 4), pillar_side4_t)
# Generate stand
if (data & 0b11) in [0, 1]: # South, West
stand_side3_t = create_tile(side_raw_t, (0, 0, 16, 4), (0, 4), 0)
stand_side4_t = create_tile(side_raw_t, (0, 4, 13, 8), (0, 0), -22.5)
else: # North, East
stand_side3_t = create_tile(side_raw_t, (0, 4, 16, 8), (0, 0), 0)
stand_side4_t = create_tile(side_raw_t, (0, 4, 13, 8), (0, 0), 22.5)
stand_side3_t = self.transform_image_angle(stand_side3_t, math.radians(22.5))
stand_side3_t = darken_image(stand_side3_t, 0.9)
stand_side4_t = self.transform_image_side(stand_side4_t).transpose(Image.FLIP_LEFT_RIGHT)
stand_side4_t = darken_image(stand_side4_t, 0.8)
stand_top_t = create_tile(top_raw_t, (0, 1, 16, 14), (0, 1), 0)
if data & 0b100:
# Lectern has a book, modify the stand top texture
book_raw_t = self.load_image("assets/minecraft/textures/entity/enchanting_table_book.png")
book_t = Image.new("RGBA", (14, 10), self.bgcolor)
book_part_t = book_raw_t.crop((0, 0, 7, 10)) # Left cover
alpha_over(stand_top_t, book_part_t, (1, 3), book_part_t)
book_part_t = book_raw_t.crop((15, 0, 22, 10)) # Right cover
alpha_over(stand_top_t, book_part_t, (8, 3))
book_part_t = book_raw_t.crop((24, 10, 29, 18)).rotate(180) # Left page
alpha_over(stand_top_t, book_part_t, (3, 4), book_part_t)
book_part_t = book_raw_t.crop((29, 10, 34, 18)).rotate(180) # Right page
alpha_over(stand_top_t, book_part_t, (8, 4), book_part_t)
# Perform affine transformation
transform_matrix = numpy.matrix(numpy.identity(3))
if (data & 0b11) in [0, 1]: # South, West
# Translate: 8 -X, 8 -Y
transform_matrix *= numpy.matrix([[1, 0, 8], [0, 1, 8], [0, 0, 1]])
# Rotate 40 degrees clockwise
tc = math.cos(math.radians(40))
ts = math.sin(math.radians(40))
transform_matrix *= numpy.matrix([[tc, ts, 0], [-ts, tc, 0], [0, 0, 1]])
# Shear in the Y direction
tt = math.tan(math.radians(10))
transform_matrix *= numpy.matrix([[1, 0, 0], [tt, 1, 0], [0, 0, 1]])
# Scale to 70% height & 110% width
transform_matrix *= numpy.matrix([[1 / 1.1, 0, 0], [0, 1 / 0.7, 0], [0, 0, 1]])
# Translate: 12 +X, 8 +Y
transform_matrix *= numpy.matrix([[1, 0, -12], [0, 1, -8], [0, 0, 1]])
else: # North, East
# Translate: 8 -X, 8 -Y
transform_matrix *= numpy.matrix([[1, 0, 8], [0, 1, 8], [0, 0, 1]])
# Shear in the X direction
tt = math.tan(math.radians(25))
transform_matrix *= numpy.matrix([[1, tt, 0], [0, 1, 0], [0, 0, 1]])
# Scale to 80% height
transform_matrix *= numpy.matrix([[1, 0, 0], [0, 1 / 0.8, 0], [0, 0, 1]])
# Rotate 220 degrees clockwise
tc = math.cos(math.radians(40 + 180))
ts = math.sin(math.radians(40 + 180))
transform_matrix *= numpy.matrix([[tc, ts, 0], [-ts, tc, 0], [0, 0, 1]])
# Scale to 60% height
transform_matrix *= numpy.matrix([[1, 0, 0], [0, 1 / 0.6, 0], [0, 0, 1]])
# Translate: +13 X, +7 Y
transform_matrix *= numpy.matrix([[1, 0, -13], [0, 1, -7], [0, 0, 1]])
transform_matrix = numpy.array(transform_matrix)[:2, :].ravel().tolist()
stand_top_t = stand_top_t.transform((24, 24), Image.AFFINE, transform_matrix)
img_stand = Image.new("RGBA", (24, 24), self.bgcolor)
alpha_over(img_stand, stand_side3_t, (-4, 2), stand_side3_t) # Fix some holes
alpha_over(img_stand, stand_side3_t, (-3, 3), stand_side3_t)
alpha_over(img_stand, stand_side4_t, (12, 5), stand_side4_t)
alpha_over(img_stand, stand_top_t, (0, 0), stand_top_t)
# Flip the stand if North or South facing
if (data & 0b11) in [0, 2]:
img_stand = img_stand.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, img_stand, (0, -2), img_stand)
return img
@material(blockid=11367, data=list(range(4)), solid=True)
def loom(self, blockid, data):
# Do rotation
data = (self.rotation + data) % 4
top_rot = [180, 90, 0, 270][data]
side3_tex = "front" if data == 1 else "side"
side4_tex = "front" if data == 0 else "side"
tex_path = "assets/minecraft/textures/block"
top = self.load_image_texture("{}/loom_top.png".format(tex_path)).copy()
side3 = self.load_image_texture("{}/loom_{}.png".format(tex_path, side3_tex))
side4 = self.load_image_texture("{}/loom_{}.png".format(tex_path, side4_tex)).copy()
top = top.rotate(top_rot)
side4 = side4.transpose(Image.FLIP_LEFT_RIGHT)
return self.build_full_block(top, None, None, side3, side4, None)
@material(blockid=11368, data=list(range(4)), transparent=True, solid=True, nospawn=True)
def stonecutter(self, blockid, data):
# Do rotation
data = (self.rotation + data) % 4
top_t = self.load_image_texture("assets/minecraft/textures/block/stonecutter_top.png").copy()
side_t = self.load_image_texture("assets/minecraft/textures/block/stonecutter_side.png")
# Stonecutter saw texture contains multiple tiles, since it's
# 16px wide rely on load_image_texture() to crop appropriately
blade_t = self.load_image_texture("assets/minecraft/textures/block/stonecutter_saw.png").copy()
top_t = top_t.rotate([180, 90, 0, 270][data])
img = self.build_full_block((top_t, 7), None, None, side_t, side_t, None)
# Add saw blade
if data in [0, 2]:
blade_t = blade_t.transpose(Image.FLIP_LEFT_RIGHT)
blade_t = self.transform_image_side(blade_t)
if data in [0, 2]:
blade_t = blade_t.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, blade_t, (6, -4), blade_t)
return img
@material(blockid=11369, data=list(range(12)), transparent=True, solid=True, nospawn=True)
def grindstone(self, blockid, data):
# Do rotation, mask to not clobber mounting info
data = data & 0b1100 | ((self.rotation + (data & 0b11)) % 4)
# Load textures
side_raw_t = self.load_image_texture("assets/minecraft/textures/block/grindstone_side.png").copy()
round_raw_t = self.load_image_texture("assets/minecraft/textures/block/grindstone_round.png").copy()
pivot_raw_t = self.load_image_texture("assets/minecraft/textures/block/grindstone_pivot.png").copy()
leg_raw_t = self.load_image_texture("assets/minecraft/textures/block/dark_oak_log.png").copy()
def create_tile(img_src, coord_crop, coord_paste, scale):
# Takes an image, crops a region, optionally scales the
# texture, then finally pastes it onto a 16x16 image
img_out = Image.new("RGBA", (16, 16), self.bgcolor)
img_in = img_src.crop(coord_crop)
if scale >= 0 and scale != 1:
w, h = img_in.size
img_in = img_in.resize((int(w * scale), int(h * scale)), Image.NEAREST)
img_out.paste(img_in, coord_paste)
return img_out
# Set variables defining positions of various parts
wall_mounted = bool(data & 0b0100)
rot_leg = [0, 270, 0][data >> 2]
if wall_mounted:
pos_leg = (32, 28) if data & 0b11 in [2, 3] else (10, 18)
coord_leg = [(0, 0), (-10, -1), (2, 3)]
offset_final = [(2, 1), (-2, 1), (-2, -1), (2, -1)][data & 0b11]
else:
pos_leg = [(22, 31), (22, 9)][data >> 3]
coord_leg = [(0, 0), (-1, 2), (-2, -3)]
offset_final = (0, 2 * (data >> 2) - 1)
# Create parts
# Scale up small parts like pivot & leg to avoid ugly results
# when shearing & combining parts, then scale down to original
# size just before final image composition
scale_factor = 2
side_t = create_tile(side_raw_t, (0, 0, 12, 12), (2, 0), 1)
round_ud_t = create_tile(round_raw_t, (0, 0, 8, 12), (4, 2), 1)
round_lr_t = create_tile(round_raw_t, (0, 0, 8, 12), (4, 0), 1)
pivot_outer_t = create_tile(pivot_raw_t, (0, 0, 6, 6), (2, 2), scale_factor)
pivot_lr_t = create_tile(pivot_raw_t, (6, 0, 8, 6), (2, 2), scale_factor)
pivot_ud_t = create_tile(pivot_raw_t, (8, 0, 10, 6), (2, 2), scale_factor)
leg_outer_t = create_tile(leg_raw_t, (6, 9, 10, 16), (2, 2), scale_factor).rotate(rot_leg)
leg_lr_t = create_tile(leg_raw_t, (12, 9, 14, 16), (2, 2), scale_factor).rotate(rot_leg)
leg_ud_t = create_tile(leg_raw_t, (2, 6, 4, 10), (2, 2), scale_factor)
# Transform to block sides & tops
side_t = self.transform_image_side(side_t)
round_ud_t = self.transform_image_top(round_ud_t)
round_lr_t = self.transform_image_side(round_lr_t).transpose(Image.FLIP_LEFT_RIGHT)
pivot_outer_t = self.transform_image_side(pivot_outer_t)
pivot_lr_t = self.transform_image_side(pivot_lr_t).transpose(Image.FLIP_LEFT_RIGHT)
pivot_ud_t = self.transform_image_top(pivot_ud_t)
leg_outer_t = self.transform_image_side(leg_outer_t)
if wall_mounted:
leg_lr_t = self.transform_image_top(leg_lr_t).transpose(Image.FLIP_LEFT_RIGHT)
leg_ud_t = self.transform_image_side(leg_ud_t).transpose(Image.FLIP_LEFT_RIGHT)
else:
leg_lr_t = self.transform_image_side(leg_lr_t).transpose(Image.FLIP_LEFT_RIGHT)
leg_ud_t = self.transform_image_top(leg_ud_t)
# Compose leg texture
img_leg = Image.new("RGBA", (24 * scale_factor, 24 * scale_factor), self.bgcolor)
alpha_over(img_leg, leg_outer_t, coord_leg[0], leg_outer_t)
alpha_over(img_leg, leg_lr_t, coord_leg[1], leg_lr_t)
alpha_over(img_leg, leg_ud_t, coord_leg[2], leg_ud_t)
# Compose pivot texture (& combine with leg)
img_pivot = Image.new("RGBA", (24 * scale_factor, 24 * scale_factor), self.bgcolor)
alpha_over(img_pivot, pivot_ud_t, (20, 18), pivot_ud_t)
alpha_over(img_pivot, pivot_lr_t, (23, 24), pivot_lr_t) # Fix gaps between face edges
alpha_over(img_pivot, pivot_lr_t, (24, 24), pivot_lr_t)
alpha_over(img_pivot, img_leg, pos_leg, img_leg)
alpha_over(img_pivot, pivot_outer_t, (21, 21), pivot_outer_t)
if hasattr(Image, "LANCZOS"): # workaround for older Pillow
img_pivot = img_pivot.resize((24, 24), Image.LANCZOS)
else:
img_pivot = img_pivot.resize((24, 24))
# Combine leg, side, round & pivot
img = Image.new("RGBA", (24, 24), self.bgcolor)
img_final = img.copy()
alpha_over(img, img_pivot, (1, -5), img_pivot)
alpha_over(img, round_ud_t, (0, 2), round_ud_t) # Fix gaps between face edges
alpha_over(img, side_t, (3, 6), side_t)
alpha_over(img, round_ud_t, (0, 1), round_ud_t)
alpha_over(img, round_lr_t, (10, 6), round_lr_t)
alpha_over(img, img_pivot, (-5, -1), img_pivot)
if (data & 0b11) in [1, 3]:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img_final, img, offset_final, img)
return img_final
# crops with 8 data values (like wheat)
@material(blockid=59, data=list(range(8)), transparent=True, nospawn=True)
def crops8(self, blockid, data):
raw_crop = self.load_image_texture("assets/minecraft/textures/block/wheat_stage%d.png" % data)
crop1 = self.transform_image_top(raw_crop)
crop2 = self.transform_image_side(raw_crop)
crop3 = crop2.transpose(Image.FLIP_LEFT_RIGHT)
img = Image.new("RGBA", (24,24), self.bgcolor)
alpha_over(img, crop1, (0,12), crop1)
alpha_over(img, crop2, (6,3), crop2)
alpha_over(img, crop3, (6,3), crop3)
return img
# farmland and grass path (15/16 blocks)
@material(blockid=[60, 208], data=list(range(2)), solid=True, transparent=True, nospawn=True)
def farmland(self, blockid, data):
if blockid == 60:
side = self.load_image_texture("assets/minecraft/textures/block/dirt.png").copy()
if data == 0:
top = self.load_image_texture("assets/minecraft/textures/block/farmland.png")
else:
top = self.load_image_texture("assets/minecraft/textures/block/farmland_moist.png")
# dirt.png is 16 pixels tall, so we need to crop it before building full block
side = side.crop((0, 1, 16, 16))
else:
top = self.load_image_texture("assets/minecraft/textures/block/dirt_path_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/dirt_path_side.png")
# side already has 1 transparent pixel at the top, so it doesn't need to be modified
# just shift the top image down 1 pixel
return self.build_full_block((top, 1), side, side, side, side)
# signposts
@material(blockid=[63,11401,11402,11403,11404,11405,11406,12505,12506], data=list(range(16)), transparent=True)
def signpost(self, blockid, data):
# first rotations
if self.rotation == 1:
data = (data + 4) % 16
elif self.rotation == 2:
data = (data + 8) % 16
elif self.rotation == 3:
data = (data + 12) % 16
sign_texture = {
# (texture on sign, texture on stick)
63: ("oak_planks.png", "oak_log.png"),
11401: ("oak_planks.png", "oak_log.png"),
11402: ("spruce_planks.png", "spruce_log.png"),
11403: ("birch_planks.png", "birch_log.png"),
11404: ("jungle_planks.png", "jungle_log.png"),
11405: ("acacia_planks.png", "acacia_log.png"),
11406: ("dark_oak_planks.png", "dark_oak_log.png"),
12505: ("crimson_planks.png", "crimson_stem.png"),
12506: ("warped_planks.png", "warped_stem.png"),
}
texture_path, texture_stick_path = ["assets/minecraft/textures/block/" + x for x in sign_texture[blockid]]
texture = self.load_image_texture(texture_path).copy()
# cut the planks to the size of a signpost
ImageDraw.Draw(texture).rectangle((0,12,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
# If the signpost is looking directly to the image, draw some
# random dots, they will look as text.
if data in (0,1,2,3,4,5,15):
for i in range(15):
x = randint(4,11)
y = randint(3,7)
texture.putpixel((x,y),(0,0,0,255))
# Minecraft uses wood texture for the signpost stick
texture_stick = self.load_image_texture(texture_stick_path)
texture_stick = texture_stick.resize((12,12), Image.ANTIALIAS)
ImageDraw.Draw(texture_stick).rectangle((2,0,12,12),outline=(0,0,0,0),fill=(0,0,0,0))
img = Image.new("RGBA", (24,24), self.bgcolor)
# W N ~90 E S ~270
angles = (330.,345.,0.,15.,30.,55.,95.,120.,150.,165.,180.,195.,210.,230.,265.,310.)
angle = math.radians(angles[data])
post = self.transform_image_angle(texture, angle)
# choose the position of the "3D effect"
incrementx = 0
if data in (1,6,7,8,9,14):
incrementx = -1
elif data in (3,4,5,11,12,13):
incrementx = +1
alpha_over(img, texture_stick,(11, 8),texture_stick)
# post2 is a brighter signpost pasted with a small shift,
# gives to the signpost some 3D effect.
post2 = ImageEnhance.Brightness(post).enhance(1.2)
alpha_over(img, post2,(incrementx, -3),post2)
alpha_over(img, post, (0,-2), post)
return img
# wooden and iron door
# uses pseudo-ancildata found in iterate.c
@material(blockid=[64,71,193,194,195,196,197, 499, 500], data=list(range(32)), transparent=True)
def door(self, blockid, data):
#Masked to not clobber block top/bottom & swung info
if self.rotation == 1:
if (data & 0b00011) == 0: data = data & 0b11100 | 1
elif (data & 0b00011) == 1: data = data & 0b11100 | 2
elif (data & 0b00011) == 2: data = data & 0b11100 | 3
elif (data & 0b00011) == 3: data = data & 0b11100 | 0
elif self.rotation == 2:
if (data & 0b00011) == 0: data = data & 0b11100 | 2
elif (data & 0b00011) == 1: data = data & 0b11100 | 3
elif (data & 0b00011) == 2: data = data & 0b11100 | 0
elif (data & 0b00011) == 3: data = data & 0b11100 | 1
elif self.rotation == 3:
if (data & 0b00011) == 0: data = data & 0b11100 | 3
elif (data & 0b00011) == 1: data = data & 0b11100 | 0
elif (data & 0b00011) == 2: data = data & 0b11100 | 1
elif (data & 0b00011) == 3: data = data & 0b11100 | 2
if data & 0x8 == 0x8: # top of the door
if blockid == 64: # classic wood door
raw_door = self.load_image_texture("assets/minecraft/textures/block/oak_door_top.png")
elif blockid == 71: # iron door
raw_door = self.load_image_texture("assets/minecraft/textures/block/iron_door_top.png")
elif blockid == 193: # spruce door
raw_door = self.load_image_texture("assets/minecraft/textures/block/spruce_door_top.png")
elif blockid == 194: # birch door
raw_door = self.load_image_texture("assets/minecraft/textures/block/birch_door_top.png")
elif blockid == 195: # jungle door
raw_door = self.load_image_texture("assets/minecraft/textures/block/jungle_door_top.png")
elif blockid == 196: # acacia door
raw_door = self.load_image_texture("assets/minecraft/textures/block/acacia_door_top.png")
elif blockid == 197: # dark_oak door
raw_door = self.load_image_texture("assets/minecraft/textures/block/dark_oak_door_top.png")
elif blockid == 499: # crimson door
raw_door = self.load_image_texture("assets/minecraft/textures/block/crimson_door_top.png")
elif blockid == 500: # warped door
raw_door = self.load_image_texture("assets/minecraft/textures/block/warped_door_top.png")
else: # bottom of the door
if blockid == 64:
raw_door = self.load_image_texture("assets/minecraft/textures/block/oak_door_bottom.png")
elif blockid == 71: # iron door
raw_door = self.load_image_texture("assets/minecraft/textures/block/iron_door_bottom.png")
elif blockid == 193: # spruce door
raw_door = self.load_image_texture("assets/minecraft/textures/block/spruce_door_bottom.png")
elif blockid == 194: # birch door
raw_door = self.load_image_texture("assets/minecraft/textures/block/birch_door_bottom.png")
elif blockid == 195: # jungle door
raw_door = self.load_image_texture("assets/minecraft/textures/block/jungle_door_bottom.png")
elif blockid == 196: # acacia door
raw_door = self.load_image_texture("assets/minecraft/textures/block/acacia_door_bottom.png")
elif blockid == 197: # dark_oak door
raw_door = self.load_image_texture("assets/minecraft/textures/block/dark_oak_door_bottom.png")
elif blockid == 499: # crimson door
raw_door = self.load_image_texture("assets/minecraft/textures/block/crimson_door_bottom.png")
elif blockid == 500: # warped door
raw_door = self.load_image_texture("assets/minecraft/textures/block/warped_door_bottom.png")
# if you want to render all doors as closed, then force
# force closed to be True
if data & 0x4 == 0x4:
closed = False
else:
closed = True
if data & 0x10 == 0x10:
# hinge on the left (facing same door direction)
hinge_on_left = True
else:
# hinge on the right (default single door)
hinge_on_left = False
# mask out the high bits to figure out the orientation
img = Image.new("RGBA", (24,24), self.bgcolor)
if (data & 0x03) == 0: # facing west when closed
if hinge_on_left:
if closed:
tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT))
alpha_over(img, tex, (0,6), tex)
else:
# flip first to set the doornob on the correct side
tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT))
tex = tex.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, tex, (12,6), tex)
else:
if closed:
tex = self.transform_image_side(raw_door)
alpha_over(img, tex, (0,6), tex)
else:
# flip first to set the doornob on the correct side
tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT))
tex = tex.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, tex, (0,0), tex)
if (data & 0x03) == 1: # facing north when closed
if hinge_on_left:
if closed:
tex = self.transform_image_side(raw_door).transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, tex, (0,0), tex)
else:
# flip first to set the doornob on the correct side
tex = self.transform_image_side(raw_door)
alpha_over(img, tex, (0,6), tex)
else:
if closed:
tex = self.transform_image_side(raw_door).transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, tex, (0,0), tex)
else:
# flip first to set the doornob on the correct side
tex = self.transform_image_side(raw_door)
alpha_over(img, tex, (12,0), tex)
if (data & 0x03) == 2: # facing east when closed
if hinge_on_left:
if closed:
tex = self.transform_image_side(raw_door)
alpha_over(img, tex, (12,0), tex)
else:
# flip first to set the doornob on the correct side
tex = self.transform_image_side(raw_door)
tex = tex.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, tex, (0,0), tex)
else:
if closed:
tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT))
alpha_over(img, tex, (12,0), tex)
else:
# flip first to set the doornob on the correct side
tex = self.transform_image_side(raw_door).transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, tex, (12,6), tex)
if (data & 0x03) == 3: # facing south when closed
if hinge_on_left:
if closed:
tex = self.transform_image_side(raw_door).transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, tex, (12,6), tex)
else:
# flip first to set the doornob on the correct side
tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT))
alpha_over(img, tex, (12,0), tex)
else:
if closed:
tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT))
tex = tex.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, tex, (12,6), tex)
else:
# flip first to set the doornob on the correct side
tex = self.transform_image_side(raw_door.transpose(Image.FLIP_LEFT_RIGHT))
alpha_over(img, tex, (0,6), tex)
return img
# ladder
@material(blockid=65, data=[2, 3, 4, 5], transparent=True)
def ladder(self, blockid, data):
# first rotations
if self.rotation == 1:
if data == 2: data = 5
elif data == 3: data = 4
elif data == 4: data = 2
elif data == 5: data = 3
elif self.rotation == 2:
if data == 2: data = 3
elif data == 3: data = 2
elif data == 4: data = 5
elif data == 5: data = 4
elif self.rotation == 3:
if data == 2: data = 4
elif data == 3: data = 5
elif data == 4: data = 3
elif data == 5: data = 2
img = Image.new("RGBA", (24,24), self.bgcolor)
raw_texture = self.load_image_texture("assets/minecraft/textures/block/ladder.png")
if data == 5:
# normally this ladder would be obsured by the block it's attached to
# but since ladders can apparently be placed on transparent blocks, we
# have to render this thing anyway. same for data == 2
tex = self.transform_image_side(raw_texture)
alpha_over(img, tex, (0,6), tex)
return img
if data == 2:
tex = self.transform_image_side(raw_texture).transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, tex, (12,6), tex)
return img
if data == 3:
tex = self.transform_image_side(raw_texture).transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, tex, (0,0), tex)
return img
if data == 4:
tex = self.transform_image_side(raw_texture)
alpha_over(img, tex, (12,0), tex)
return img
# wall signs
@material(blockid=[68,11407,11408,11409,11410,11411,11412,12507,12508], data=[2, 3, 4, 5], transparent=True)
def wall_sign(self, blockid, data): # wall sign
# first rotations
if self.rotation == 1:
if data == 2: data = 5
elif data == 3: data = 4
elif data == 4: data = 2
elif data == 5: data = 3
elif self.rotation == 2:
if data == 2: data = 3
elif data == 3: data = 2
elif data == 4: data = 5
elif data == 5: data = 4
elif self.rotation == 3:
if data == 2: data = 4
elif data == 3: data = 5
elif data == 4: data = 3
elif data == 5: data = 2
sign_texture = {
68: "oak_planks.png",
11407: "oak_planks.png",
11408: "spruce_planks.png",
11409: "birch_planks.png",
11410: "jungle_planks.png",
11411: "acacia_planks.png",
11412: "dark_oak_planks.png",
12507: "crimson_planks.png",
12508: "warped_planks.png",
}
texture_path = "assets/minecraft/textures/block/" + sign_texture[blockid]
texture = self.load_image_texture(texture_path).copy()
# cut the planks to the size of a signpost
ImageDraw.Draw(texture).rectangle((0,12,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
# draw some random black dots, they will look as text
""" don't draw text at the moment, they are used in blank for decoration
if data in (3,4):
for i in range(15):
x = randint(4,11)
y = randint(3,7)
texture.putpixel((x,y),(0,0,0,255))
"""
img = Image.new("RGBA", (24,24), self.bgcolor)
incrementx = 0
if data == 2: # east
incrementx = +1
sign = self.build_full_block(None, None, None, None, texture)
elif data == 3: # west
incrementx = -1
sign = self.build_full_block(None, texture, None, None, None)
elif data == 4: # north
incrementx = +1
sign = self.build_full_block(None, None, texture, None, None)
elif data == 5: # south
incrementx = -1
sign = self.build_full_block(None, None, None, texture, None)
sign2 = ImageEnhance.Brightness(sign).enhance(1.2)
alpha_over(img, sign2,(incrementx, 2),sign2)
alpha_over(img, sign, (0,3), sign)
return img
# levers
@material(blockid=69, data=list(range(16)), transparent=True)
def levers(self, blockid, data):
if data & 8 == 8: powered = True
else: powered = False
data = data & 7
# first rotations
if self.rotation == 1:
# on wall levers
if data == 1: data = 3
elif data == 2: data = 4
elif data == 3: data = 2
elif data == 4: data = 1
# on floor levers
elif data == 5: data = 6
elif data == 6: data = 5
elif self.rotation == 2:
if data == 1: data = 2
elif data == 2: data = 1
elif data == 3: data = 4
elif data == 4: data = 3
elif data == 5: data = 5
elif data == 6: data = 6
elif self.rotation == 3:
if data == 1: data = 4
elif data == 2: data = 3
elif data == 3: data = 1
elif data == 4: data = 2
elif data == 5: data = 6
elif data == 6: data = 5
# generate the texture for the base of the lever
t_base = self.load_image_texture("assets/minecraft/textures/block/stone.png").copy()
ImageDraw.Draw(t_base).rectangle((0,0,15,3),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(t_base).rectangle((0,12,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(t_base).rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(t_base).rectangle((11,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
# generate the texture for the stick
stick = self.load_image_texture("assets/minecraft/textures/block/lever.png").copy()
c_stick = Image.new("RGBA", (16,16), self.bgcolor)
tmp = ImageEnhance.Brightness(stick).enhance(0.8)
alpha_over(c_stick, tmp, (1,0), tmp)
alpha_over(c_stick, stick, (0,0), stick)
t_stick = self.transform_image_side(c_stick.rotate(45, Image.NEAREST))
# where the lever will be composed
img = Image.new("RGBA", (24,24), self.bgcolor)
# wall levers
if data == 1: # facing SOUTH
# levers can't be placed in transparent blocks, so this
# direction is almost invisible
return None
elif data == 2: # facing NORTH
base = self.transform_image_side(t_base)
# paste it twice with different brightness to make a fake 3D effect
alpha_over(img, base, (12,-1), base)
alpha = base.split()[3]
base = ImageEnhance.Brightness(base).enhance(0.9)
base.putalpha(alpha)
alpha_over(img, base, (11,0), base)
# paste the lever stick
pos = (7,-7)
if powered:
t_stick = t_stick.transpose(Image.FLIP_TOP_BOTTOM)
pos = (7,6)
alpha_over(img, t_stick, pos, t_stick)
elif data == 3: # facing WEST
base = self.transform_image_side(t_base)
# paste it twice with different brightness to make a fake 3D effect
base = base.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, base, (0,-1), base)
alpha = base.split()[3]
base = ImageEnhance.Brightness(base).enhance(0.9)
base.putalpha(alpha)
alpha_over(img, base, (1,0), base)
# paste the lever stick
t_stick = t_stick.transpose(Image.FLIP_LEFT_RIGHT)
pos = (5,-7)
if powered:
t_stick = t_stick.transpose(Image.FLIP_TOP_BOTTOM)
pos = (6,6)
alpha_over(img, t_stick, pos, t_stick)
elif data == 4: # facing EAST
# levers can't be placed in transparent blocks, so this
# direction is almost invisible
return None
# floor levers
elif data == 5: # pointing south when off
# lever base, fake 3d again
base = self.transform_image_top(t_base)
alpha = base.split()[3]
tmp = ImageEnhance.Brightness(base).enhance(0.8)
tmp.putalpha(alpha)
alpha_over(img, tmp, (0,12), tmp)
alpha_over(img, base, (0,11), base)
# lever stick
pos = (3,2)
if not powered:
t_stick = t_stick.transpose(Image.FLIP_LEFT_RIGHT)
pos = (11,2)
alpha_over(img, t_stick, pos, t_stick)
elif data == 6: # pointing east when off
# lever base, fake 3d again
base = self.transform_image_top(t_base.rotate(90))
alpha = base.split()[3]
tmp = ImageEnhance.Brightness(base).enhance(0.8)
tmp.putalpha(alpha)
alpha_over(img, tmp, (0,12), tmp)
alpha_over(img, base, (0,11), base)
# lever stick
pos = (2,3)
if not powered:
t_stick = t_stick.transpose(Image.FLIP_LEFT_RIGHT)
pos = (10,2)
alpha_over(img, t_stick, pos, t_stick)
return img
# wooden and stone pressure plates, and weighted pressure plates
@material(blockid=[70, 72,147,148,11301,11302,11303,11304,11305, 1033,11517,11518], data=[0,1], transparent=True)
def pressure_plate(self, blockid, data):
texture_name = {70:"assets/minecraft/textures/block/stone.png", # stone
72:"assets/minecraft/textures/block/oak_planks.png", # oak
11301:"assets/minecraft/textures/block/spruce_planks.png", # spruce
11302:"assets/minecraft/textures/block/birch_planks.png", # birch
11303:"assets/minecraft/textures/block/jungle_planks.png", # jungle
11304:"assets/minecraft/textures/block/acacia_planks.png", # acacia
11305:"assets/minecraft/textures/block/dark_oak_planks.png", # dark oak
11517:"assets/minecraft/textures/block/crimson_planks.png", # crimson
11518:"assets/minecraft/textures/block/warped_planks.png", # warped
147:"assets/minecraft/textures/block/gold_block.png", # light golden
148:"assets/minecraft/textures/block/iron_block.png", # heavy iron
1033:"assets/minecraft/textures/block/polished_blackstone.png"
}[blockid]
t = self.load_image_texture(texture_name).copy()
# cut out the outside border, pressure plates are smaller
# than a normal block
ImageDraw.Draw(t).rectangle((0,0,15,15),outline=(0,0,0,0))
# create the textures and a darker version to make a 3d by
# pasting them with an offstet of 1 pixel
img = Image.new("RGBA", (24,24), self.bgcolor)
top = self.transform_image_top(t)
alpha = top.split()[3]
topd = ImageEnhance.Brightness(top).enhance(0.8)
topd.putalpha(alpha)
#show it 3d or 2d if unpressed or pressed
if data == 0:
alpha_over(img,topd, (0,12),topd)
alpha_over(img,top, (0,11),top)
elif data == 1:
alpha_over(img,top, (0,12),top)
return img
# normal and glowing redstone ore
block(blockid=[73, 74], top_image="assets/minecraft/textures/block/redstone_ore.png")
# stone and wood buttons
@material(blockid=(77,143,11326,11327,11328,11329,11330,1034,11515,11516), data=list(range(16)), transparent=True)
def buttons(self, blockid, data):
# 0x8 is set if the button is pressed mask this info and render
# it as unpressed
data = data & 0x7
if self.rotation == 1:
if data == 1: data = 3
elif data == 2: data = 4
elif data == 3: data = 2
elif data == 4: data = 1
elif data == 5: data = 6
elif data == 6: data = 5
elif self.rotation == 2:
if data == 1: data = 2
elif data == 2: data = 1
elif data == 3: data = 4
elif data == 4: data = 3
elif self.rotation == 3:
if data == 1: data = 4
elif data == 2: data = 3
elif data == 3: data = 1
elif data == 4: data = 2
elif data == 5: data = 6
elif data == 6: data = 5
texturepath = {77:"assets/minecraft/textures/block/stone.png",
143:"assets/minecraft/textures/block/oak_planks.png",
11326:"assets/minecraft/textures/block/spruce_planks.png",
11327:"assets/minecraft/textures/block/birch_planks.png",
11328:"assets/minecraft/textures/block/jungle_planks.png",
11329:"assets/minecraft/textures/block/acacia_planks.png",
11330:"assets/minecraft/textures/block/dark_oak_planks.png",
1034:"assets/minecraft/textures/block/polished_blackstone.png",
11515:"assets/minecraft/textures/block/crimson_planks.png",
11516:"assets/minecraft/textures/block/warped_planks.png"
}[blockid]
t = self.load_image_texture(texturepath).copy()
# generate the texture for the button
ImageDraw.Draw(t).rectangle((0,0,15,5),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(t).rectangle((0,10,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(t).rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(t).rectangle((11,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
img = Image.new("RGBA", (24,24), self.bgcolor)
if data < 5:
button = self.transform_image_side(t)
if data == 1: # facing SOUTH
# buttons can't be placed in transparent blocks, so this
# direction can't be seen
return None
elif data == 2: # facing NORTH
# paste it twice with different brightness to make a 3D effect
alpha_over(img, button, (12,-1), button)
alpha = button.split()[3]
button = ImageEnhance.Brightness(button).enhance(0.9)
button.putalpha(alpha)
alpha_over(img, button, (11,0), button)
elif data == 3: # facing WEST
# paste it twice with different brightness to make a 3D effect
button = button.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, button, (0,-1), button)
alpha = button.split()[3]
button = ImageEnhance.Brightness(button).enhance(0.9)
button.putalpha(alpha)
alpha_over(img, button, (1,0), button)
elif data == 4: # facing EAST
# buttons can't be placed in transparent blocks, so this
# direction can't be seen
return None
else:
if data == 5: # long axis east-west
button = self.transform_image_top(t)
else: # long axis north-south
button = self.transform_image_top(t.rotate(90))
# paste it twice with different brightness to make a 3D effect
alpha_over(img, button, (0,12), button)
alpha = button.split()[3]
button = ImageEnhance.Brightness(button).enhance(0.9)
button.putalpha(alpha)
alpha_over(img, button, (0,11), button)
return img
# end rod
@material(blockid=198, data=list(range(6)), transparent=True, solid=True)
def end_rod(self, blockid, data):
tex = self.load_image_texture("assets/minecraft/textures/block/end_rod.png")
img = Image.new("RGBA", (24, 24), self.bgcolor)
mask = tex.crop((0, 0, 2, 15))
sidetex = Image.new(tex.mode, tex.size, self.bgcolor)
alpha_over(sidetex, mask, (14, 0), mask)
mask = tex.crop((2, 3, 6, 7))
bottom = Image.new(tex.mode, tex.size, self.bgcolor)
alpha_over(bottom, mask, (5, 6), mask)
if data == 1 or data == 0:
side = self.transform_image_side(sidetex)
otherside = side.transpose(Image.FLIP_LEFT_RIGHT)
bottom = self.transform_image_top(bottom)
if data == 1: # up
mask = tex.crop((2, 0, 4, 2))
top = Image.new(tex.mode, tex.size, self.bgcolor)
alpha_over(top, mask, (7, 2), mask)
top = self.transform_image_top(top)
alpha_over(img, bottom, (0, 11), bottom)
alpha_over(img, side, (0, 0), side)
alpha_over(img, otherside, (11, 0), otherside)
alpha_over(img, top, (3, 1), top)
elif data == 0: # down
alpha_over(img, side, (0, 0), side)
alpha_over(img, otherside, (11, 0), otherside)
alpha_over(img, bottom, (0, 0), bottom)
else:
otherside = self.transform_image_top(sidetex)
sidetex = sidetex.rotate(90)
side = self.transform_image_side(sidetex)
bottom = self.transform_image_side(bottom)
bottom = bottom.transpose(Image.FLIP_LEFT_RIGHT)
def draw_south():
alpha_over(img, bottom, (0, 0), bottom)
alpha_over(img, side, (7, 8), side)
alpha_over(img, otherside, (-3, 9), otherside)
def draw_north():
alpha_over(img, side, (7, 8), side)
alpha_over(img, otherside, (-3, 9), otherside)
alpha_over(img, bottom, (12, 6), bottom)
def draw_west():
_bottom = bottom.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, _bottom, (13, 0), _bottom)
_side = side.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, _side, (7, 8), _side)
_otherside = otherside.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, _otherside, (4, 9), _otherside)
def draw_east():
_side = side.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, _side, (7, 8), _side)
_otherside = otherside.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, _otherside, (4, 9), _otherside)
_bottom = bottom.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, _bottom, (0, 6), _bottom)
draw_funcs = [ draw_south, draw_west, draw_north, draw_east ]
if data == 3: # south
draw_funcs[self.rotation]()
elif data == 2: # north
draw_funcs[(self.rotation + 2) % len(draw_funcs)]()
elif data == 4: # west
draw_funcs[(self.rotation + 1) % len(draw_funcs)]()
elif data == 5: # east
draw_funcs[(self.rotation + 3) % len(draw_funcs)]()
return img
# snow
@material(blockid=78, data=list(range(1, 9)), transparent=True, solid=True)
def snow(self, blockid, data):
tex = self.load_image_texture("assets/minecraft/textures/block/snow.png")
y = 16 - (data * 2)
mask = tex.crop((0, y, 16, 16))
sidetex = Image.new(tex.mode, tex.size, self.bgcolor)
alpha_over(sidetex, mask, (0,y,16,16), mask)
img = Image.new("RGBA", (24,24), self.bgcolor)
top = self.transform_image_top(tex)
side = self.transform_image_side(sidetex)
otherside = side.transpose(Image.FLIP_LEFT_RIGHT)
sidealpha = side.split()[3]
side = ImageEnhance.Brightness(side).enhance(0.9)
side.putalpha(sidealpha)
othersidealpha = otherside.split()[3]
otherside = ImageEnhance.Brightness(otherside).enhance(0.8)
otherside.putalpha(othersidealpha)
alpha_over(img, side, (0, 6), side)
alpha_over(img, otherside, (12, 6), otherside)
alpha_over(img, top, (0, 12 - int(12 / 8 * data)), top)
return img
# snow block
block(blockid=80, top_image="assets/minecraft/textures/block/snow.png")
# cactus
@material(blockid=81, data=list(range(15)), transparent=True, solid=True, nospawn=True)
def cactus(self, blockid, data):
top = self.load_image_texture("assets/minecraft/textures/block/cactus_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/cactus_side.png")
img = Image.new("RGBA", (24,24), self.bgcolor)
top = self.transform_image_top(top)
side = self.transform_image_side(side)
otherside = side.transpose(Image.FLIP_LEFT_RIGHT)
sidealpha = side.split()[3]
side = ImageEnhance.Brightness(side).enhance(0.9)
side.putalpha(sidealpha)
othersidealpha = otherside.split()[3]
otherside = ImageEnhance.Brightness(otherside).enhance(0.8)
otherside.putalpha(othersidealpha)
alpha_over(img, side, (1,6), side)
alpha_over(img, otherside, (11,6), otherside)
alpha_over(img, top, (0,0), top)
return img
# clay block
block(blockid=82, top_image="assets/minecraft/textures/block/clay.png")
# sugar cane
@material(blockid=83, data=list(range(16)), transparent=True)
def sugar_cane(self, blockid, data):
tex = self.load_image_texture("assets/minecraft/textures/block/sugar_cane.png")
return self.build_sprite(tex)
# jukebox
@material(blockid=84, data=list(range(16)), solid=True)
def jukebox(self, blockid, data):
return self.build_block(self.load_image_texture("assets/minecraft/textures/block/jukebox_top.png"), self.load_image_texture("assets/minecraft/textures/block/note_block.png"))
# nether and normal fences
@material(blockid=[85, 188, 189, 190, 191, 192, 113, 511, 512], data=list(range(16)), transparent=True, nospawn=True)
def fence(self, blockid, data):
# create needed images for Big stick fence
if blockid == 85: # normal fence
fence_top = self.load_image_texture("assets/minecraft/textures/block/oak_planks.png").copy()
fence_side = self.load_image_texture("assets/minecraft/textures/block/oak_planks.png").copy()
fence_small_side = self.load_image_texture("assets/minecraft/textures/block/oak_planks.png").copy()
elif blockid == 188: # spruce fence
fence_top = self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png").copy()
fence_side = self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png").copy()
fence_small_side = self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png").copy()
elif blockid == 189: # birch fence
fence_top = self.load_image_texture("assets/minecraft/textures/block/birch_planks.png").copy()
fence_side = self.load_image_texture("assets/minecraft/textures/block/birch_planks.png").copy()
fence_small_side = self.load_image_texture("assets/minecraft/textures/block/birch_planks.png").copy()
elif blockid == 190: # jungle fence
fence_top = self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png").copy()
fence_side = self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png").copy()
fence_small_side = self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png").copy()
elif blockid == 191: # big/dark oak fence
fence_top = self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png").copy()
fence_side = self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png").copy()
fence_small_side = self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png").copy()
elif blockid == 192: # acacia fence
fence_top = self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png").copy()
fence_side = self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png").copy()
fence_small_side = self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png").copy()
elif blockid == 511: # crimson_fence
fence_top = self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png").copy()
fence_side = self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png").copy()
fence_small_side = self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png").copy()
elif blockid == 512: # warped fence
fence_top = self.load_image_texture("assets/minecraft/textures/block/warped_planks.png").copy()
fence_side = self.load_image_texture("assets/minecraft/textures/block/warped_planks.png").copy()
fence_small_side = self.load_image_texture("assets/minecraft/textures/block/warped_planks.png").copy()
else: # netherbrick fence
fence_top = self.load_image_texture("assets/minecraft/textures/block/nether_bricks.png").copy()
fence_side = self.load_image_texture("assets/minecraft/textures/block/nether_bricks.png").copy()
fence_small_side = self.load_image_texture("assets/minecraft/textures/block/nether_bricks.png").copy()
# generate the textures of the fence
ImageDraw.Draw(fence_top).rectangle((0,0,5,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(fence_top).rectangle((10,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(fence_top).rectangle((0,0,15,5),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(fence_top).rectangle((0,10,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(fence_side).rectangle((0,0,5,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(fence_side).rectangle((10,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
# Create the sides and the top of the big stick
fence_side = self.transform_image_side(fence_side)
fence_other_side = fence_side.transpose(Image.FLIP_LEFT_RIGHT)
fence_top = self.transform_image_top(fence_top)
# Darken the sides slightly. These methods also affect the alpha layer,
# so save them first (we don't want to "darken" the alpha layer making
# the block transparent)
sidealpha = fence_side.split()[3]
fence_side = ImageEnhance.Brightness(fence_side).enhance(0.9)
fence_side.putalpha(sidealpha)
othersidealpha = fence_other_side.split()[3]
fence_other_side = ImageEnhance.Brightness(fence_other_side).enhance(0.8)
fence_other_side.putalpha(othersidealpha)
# Compose the fence big stick
fence_big = Image.new("RGBA", (24,24), self.bgcolor)
alpha_over(fence_big,fence_side, (5,4),fence_side)
alpha_over(fence_big,fence_other_side, (7,4),fence_other_side)
alpha_over(fence_big,fence_top, (0,0),fence_top)
# Now render the small sticks.
# Create needed images
ImageDraw.Draw(fence_small_side).rectangle((0,0,15,0),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(fence_small_side).rectangle((0,4,15,6),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(fence_small_side).rectangle((0,10,15,16),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(fence_small_side).rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(fence_small_side).rectangle((11,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
# Create the sides and the top of the small sticks
fence_small_side = self.transform_image_side(fence_small_side)
fence_small_other_side = fence_small_side.transpose(Image.FLIP_LEFT_RIGHT)
# Darken the sides slightly. These methods also affect the alpha layer,
# so save them first (we don't want to "darken" the alpha layer making
# the block transparent)
sidealpha = fence_small_other_side.split()[3]
fence_small_other_side = ImageEnhance.Brightness(fence_small_other_side).enhance(0.9)
fence_small_other_side.putalpha(sidealpha)
sidealpha = fence_small_side.split()[3]
fence_small_side = ImageEnhance.Brightness(fence_small_side).enhance(0.9)
fence_small_side.putalpha(sidealpha)
# Create img to compose the fence
img = Image.new("RGBA", (24,24), self.bgcolor)
# Position of fence small sticks in img.
# These postitions are strange because the small sticks of the
# fence are at the very left and at the very right of the 16x16 images
pos_top_left = (2,3)
pos_top_right = (10,3)
pos_bottom_right = (10,7)
pos_bottom_left = (2,7)
# +x axis points top right direction
# +y axis points bottom right direction
# First compose small sticks in the back of the image,
# then big stick and then small sticks in the front.
def draw_north():
alpha_over(img, fence_small_side, pos_top_left, fence_small_side)
def draw_east():
alpha_over(img, fence_small_other_side, pos_top_right, fence_small_other_side)
def draw_south():
alpha_over(img, fence_small_side, pos_bottom_right, fence_small_side)
def draw_west():
alpha_over(img, fence_small_other_side, pos_bottom_left, fence_small_other_side)
draw_funcs = [draw_north, draw_east, draw_south, draw_west]
if (data & 0b0001):
draw_funcs[(self.rotation + 0) % len(draw_funcs)]()
if (data & 0b0010):
draw_funcs[(self.rotation + 1) % len(draw_funcs)]()
alpha_over(img, fence_big, (0, 0), fence_big)
if (data & 0b0100):
draw_funcs[(self.rotation + 2) % len(draw_funcs)]()
if (data & 0b1000):
draw_funcs[(self.rotation + 3) % len(draw_funcs)]()
return img
# pumpkin
@material(blockid=[86, 91,11300], data=list(range(4)), solid=True)
def pumpkin(self, blockid, data): # pumpkins, jack-o-lantern
# rotation
if self.rotation == 1:
if data == 0: data = 1
elif data == 1: data = 2
elif data == 2: data = 3
elif data == 3: data = 0
elif self.rotation == 2:
if data == 0: data = 2
elif data == 1: data = 3
elif data == 2: data = 0
elif data == 3: data = 1
elif self.rotation == 3:
if data == 0: data = 3
elif data == 1: data = 0
elif data == 2: data = 1
elif data == 3: data = 2
# texture generation
top = self.load_image_texture("assets/minecraft/textures/block/pumpkin_top.png")
frontName = {86: "assets/minecraft/textures/block/pumpkin_side.png",
91: "assets/minecraft/textures/block/jack_o_lantern.png",
11300: "assets/minecraft/textures/block/carved_pumpkin.png"
}[blockid]
front = self.load_image_texture(frontName)
side = self.load_image_texture("assets/minecraft/textures/block/pumpkin_side.png")
if data == 0: # pointing west
img = self.build_full_block(top, None, None, side, front)
elif data == 1: # pointing north
img = self.build_full_block(top, None, None, front, side)
else: # in any other direction the front can't be seen
img = self.build_full_block(top, None, None, side, side)
return img
# netherrack
block(blockid=87, top_image="assets/minecraft/textures/block/netherrack.png")
# soul sand
block(blockid=88, top_image="assets/minecraft/textures/block/soul_sand.png")
# glowstone
block(blockid=89, top_image="assets/minecraft/textures/block/glowstone.png")
# shroomlight
block(blockid=1011, top_image="assets/minecraft/textures/block/shroomlight.png")
# portal
@material(blockid=90, data=[1, 2, 4, 5, 8, 10], transparent=True)
def portal(self, blockid, data):
# no rotations, uses pseudo data
portaltexture = self.load_portal()
img = Image.new("RGBA", (24,24), self.bgcolor)
side = self.transform_image_side(portaltexture)
otherside = side.transpose(Image.FLIP_TOP_BOTTOM)
if data in (1,4,5):
alpha_over(img, side, (5,4), side)
if data in (2,8,10):
alpha_over(img, otherside, (5,4), otherside)
return img
# cake!
@material(blockid=92, data=list(range(7)), transparent=True, nospawn=True)
def cake(self, blockid, data):
# cake textures
top = self.load_image_texture("assets/minecraft/textures/block/cake_top.png").copy()
side = self.load_image_texture("assets/minecraft/textures/block/cake_side.png").copy()
fullside = side.copy()
inside = self.load_image_texture("assets/minecraft/textures/block/cake_inner.png")
img = Image.new("RGBA", (24, 24), self.bgcolor)
if data == 0: # unbitten cake
top = self.transform_image_top(top)
side = self.transform_image_side(side)
otherside = side.transpose(Image.FLIP_LEFT_RIGHT)
# darken sides slightly
sidealpha = side.split()[3]
side = ImageEnhance.Brightness(side).enhance(0.9)
side.putalpha(sidealpha)
othersidealpha = otherside.split()[3]
otherside = ImageEnhance.Brightness(otherside).enhance(0.8)
otherside.putalpha(othersidealpha)
# composite the cake
alpha_over(img, side, (1, 6), side)
alpha_over(img, otherside, (11, 5), otherside) # workaround, fixes a hole
alpha_over(img, otherside, (12, 6), otherside)
alpha_over(img, top, (0, 6), top)
else:
# cut the textures for a bitten cake
bite_width = int(14 / 7) # Cake is 14px wide with 7 slices
coord = 1 + bite_width * data
ImageDraw.Draw(side).rectangle((16 - coord, 0, 16, 16), outline=(0, 0, 0, 0),
fill=(0, 0, 0, 0))
ImageDraw.Draw(top).rectangle((0, 0, coord - 1, 16), outline=(0, 0, 0, 0),
fill=(0, 0, 0, 0))
# the bitten part of the cake always points to the west
# composite the cake for every north orientation
if self.rotation == 0: # north top-left
# create right side
rs = self.transform_image_side(side).transpose(Image.FLIP_LEFT_RIGHT)
# create bitten side and its coords
deltax = bite_width * data
deltay = -1 * data
if data in [3, 4, 5, 6]:
deltax -= 1
ls = self.transform_image_side(inside)
# create top side
t = self.transform_image_top(top)
# darken sides slightly
sidealpha = ls.split()[3]
ls = ImageEnhance.Brightness(ls).enhance(0.9)
ls.putalpha(sidealpha)
othersidealpha = rs.split()[3]
rs = ImageEnhance.Brightness(rs).enhance(0.8)
rs.putalpha(othersidealpha)
# compose the cake
alpha_over(img, rs, (12, 6), rs)
alpha_over(img, ls, (1 + deltax, 6 + deltay), ls)
alpha_over(img, t, (1, 6), t)
elif self.rotation == 1: # north top-right
# bitten side not shown
# create left side
ls = self.transform_image_side(side.transpose(Image.FLIP_LEFT_RIGHT))
# create top
t = self.transform_image_top(top.rotate(-90))
# create right side
rs = self.transform_image_side(fullside).transpose(Image.FLIP_LEFT_RIGHT)
# darken sides slightly
sidealpha = ls.split()[3]
ls = ImageEnhance.Brightness(ls).enhance(0.9)
ls.putalpha(sidealpha)
othersidealpha = rs.split()[3]
rs = ImageEnhance.Brightness(rs).enhance(0.8)
rs.putalpha(othersidealpha)
# compose the cake
alpha_over(img, ls, (2, 6), ls)
alpha_over(img, t, (1, 6), t)
alpha_over(img, rs, (12, 6), rs)
elif self.rotation == 2: # north bottom-right
# bitten side not shown
# left side
ls = self.transform_image_side(fullside)
# top
t = self.transform_image_top(top.rotate(180))
# right side
rs = self.transform_image_side(side.transpose(Image.FLIP_LEFT_RIGHT))
rs = rs.transpose(Image.FLIP_LEFT_RIGHT)
# darken sides slightly
sidealpha = ls.split()[3]
ls = ImageEnhance.Brightness(ls).enhance(0.9)
ls.putalpha(sidealpha)
othersidealpha = rs.split()[3]
rs = ImageEnhance.Brightness(rs).enhance(0.8)
rs.putalpha(othersidealpha)
# compose the cake
alpha_over(img, ls, (2, 6), ls)
alpha_over(img, t, (1, 6), t)
alpha_over(img, rs, (12, 6), rs)
elif self.rotation == 3: # north bottom-left
# create left side
ls = self.transform_image_side(side)
# create top
t = self.transform_image_top(top.rotate(90))
# create right side and its coords
deltax = 12 - bite_width * data
deltay = -1 * data
if data in [3, 4, 5, 6]:
deltax += 1
rs = self.transform_image_side(inside).transpose(Image.FLIP_LEFT_RIGHT)
# darken sides slightly
sidealpha = ls.split()[3]
ls = ImageEnhance.Brightness(ls).enhance(0.9)
ls.putalpha(sidealpha)
othersidealpha = rs.split()[3]
rs = ImageEnhance.Brightness(rs).enhance(0.8)
rs.putalpha(othersidealpha)
# compose the cake
alpha_over(img, ls, (2, 6), ls)
alpha_over(img, t, (1, 6), t)
alpha_over(img, rs, (1 + deltax, 6 + deltay), rs)
return img
# redstone repeaters ON and OFF
@material(blockid=[93,94], data=list(range(16)), transparent=True, nospawn=True)
def repeater(self, blockid, data):
# rotation
# Masked to not clobber delay info
if self.rotation == 1:
if (data & 0b0011) == 0: data = data & 0b1100 | 1
elif (data & 0b0011) == 1: data = data & 0b1100 | 2
elif (data & 0b0011) == 2: data = data & 0b1100 | 3
elif (data & 0b0011) == 3: data = data & 0b1100 | 0
elif self.rotation == 2:
if (data & 0b0011) == 0: data = data & 0b1100 | 2
elif (data & 0b0011) == 1: data = data & 0b1100 | 3
elif (data & 0b0011) == 2: data = data & 0b1100 | 0
elif (data & 0b0011) == 3: data = data & 0b1100 | 1
elif self.rotation == 3:
if (data & 0b0011) == 0: data = data & 0b1100 | 3
elif (data & 0b0011) == 1: data = data & 0b1100 | 0
elif (data & 0b0011) == 2: data = data & 0b1100 | 1
elif (data & 0b0011) == 3: data = data & 0b1100 | 2
# generate the diode
top = self.load_image_texture("assets/minecraft/textures/block/repeater.png") if blockid == 93 else self.load_image_texture("assets/minecraft/textures/block/repeater_on.png")
side = self.load_image_texture("assets/minecraft/textures/block/smooth_stone_slab_side.png")
increment = 13
if (data & 0x3) == 0: # pointing east
pass
if (data & 0x3) == 1: # pointing south
top = top.rotate(270)
if (data & 0x3) == 2: # pointing west
top = top.rotate(180)
if (data & 0x3) == 3: # pointing north
top = top.rotate(90)
img = self.build_full_block( (top, increment), None, None, side, side)
# compose a "3d" redstone torch
t = self.load_image_texture("assets/minecraft/textures/block/redstone_torch_off.png").copy() if blockid == 93 else self.load_image_texture("assets/minecraft/textures/block/redstone_torch.png").copy()
torch = Image.new("RGBA", (24,24), self.bgcolor)
t_crop = t.crop((2,2,14,14))
slice = t_crop.copy()
ImageDraw.Draw(slice).rectangle((6,0,12,12),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(slice).rectangle((0,0,4,12),outline=(0,0,0,0),fill=(0,0,0,0))
alpha_over(torch, slice, (6,4))
alpha_over(torch, t_crop, (5,5))
alpha_over(torch, t_crop, (6,5))
alpha_over(torch, slice, (6,6))
# paste redstone torches everywhere!
# the torch is too tall for the repeater, crop the bottom.
ImageDraw.Draw(torch).rectangle((0,16,24,24),outline=(0,0,0,0),fill=(0,0,0,0))
# touch up the 3d effect with big rectangles, just in case, for other texture packs
ImageDraw.Draw(torch).rectangle((0,24,10,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(torch).rectangle((12,15,24,24),outline=(0,0,0,0),fill=(0,0,0,0))
# torch positions for every redstone torch orientation.
#
# This is a horrible list of torch orientations. I tried to
# obtain these orientations by rotating the positions for one
# orientation, but pixel rounding is horrible and messes the
# torches.
if (data & 0x3) == 0: # pointing east
if (data & 0xC) == 0: # one tick delay
moving_torch = (1,1)
static_torch = (-3,-1)
elif (data & 0xC) == 4: # two ticks delay
moving_torch = (2,2)
static_torch = (-3,-1)
elif (data & 0xC) == 8: # three ticks delay
moving_torch = (3,2)
static_torch = (-3,-1)
elif (data & 0xC) == 12: # four ticks delay
moving_torch = (4,3)
static_torch = (-3,-1)
elif (data & 0x3) == 1: # pointing south
if (data & 0xC) == 0: # one tick delay
moving_torch = (1,1)
static_torch = (5,-1)
elif (data & 0xC) == 4: # two ticks delay
moving_torch = (0,2)
static_torch = (5,-1)
elif (data & 0xC) == 8: # three ticks delay
moving_torch = (-1,2)
static_torch = (5,-1)
elif (data & 0xC) == 12: # four ticks delay
moving_torch = (-2,3)
static_torch = (5,-1)
elif (data & 0x3) == 2: # pointing west
if (data & 0xC) == 0: # one tick delay
moving_torch = (1,1)
static_torch = (5,3)
elif (data & 0xC) == 4: # two ticks delay
moving_torch = (0,0)
static_torch = (5,3)
elif (data & 0xC) == 8: # three ticks delay
moving_torch = (-1,0)
static_torch = (5,3)
elif (data & 0xC) == 12: # four ticks delay
moving_torch = (-2,-1)
static_torch = (5,3)
elif (data & 0x3) == 3: # pointing north
if (data & 0xC) == 0: # one tick delay
moving_torch = (1,1)
static_torch = (-3,3)
elif (data & 0xC) == 4: # two ticks delay
moving_torch = (2,0)
static_torch = (-3,3)
elif (data & 0xC) == 8: # three ticks delay
moving_torch = (3,0)
static_torch = (-3,3)
elif (data & 0xC) == 12: # four ticks delay
moving_torch = (4,-1)
static_torch = (-3,3)
# this paste order it's ok for east and south orientation
# but it's wrong for north and west orientations. But using the
# default texture pack the torches are small enough to no overlap.
alpha_over(img, torch, static_torch, torch)
alpha_over(img, torch, moving_torch, torch)
return img
# redstone comparator (149 is inactive, 150 is active)
@material(blockid=[149,150], data=list(range(16)), transparent=True, nospawn=True)
def comparator(self, blockid, data):
# rotation
# add self.rotation to the lower 2 bits, mod 4
data = data & 0b1100 | (((data & 0b11) + self.rotation) % 4)
top = self.load_image_texture("assets/minecraft/textures/block/comparator.png") if blockid == 149 else self.load_image_texture("assets/minecraft/textures/block/comparator_on.png")
side = self.load_image_texture("assets/minecraft/textures/block/smooth_stone_slab_side.png")
increment = 13
if (data & 0x3) == 0: # pointing north
pass
static_torch = (-3,-1)
torch = ((0,2),(6,-1))
if (data & 0x3) == 1: # pointing east
top = top.rotate(270)
static_torch = (5,-1)
torch = ((-4,-1),(0,2))
if (data & 0x3) == 2: # pointing south
top = top.rotate(180)
static_torch = (5,3)
torch = ((0,-4),(-4,-1))
if (data & 0x3) == 3: # pointing west
top = top.rotate(90)
static_torch = (-3,3)
torch = ((1,-4),(6,-1))
def build_torch(active):
# compose a "3d" redstone torch
t = self.load_image_texture("assets/minecraft/textures/block/redstone_torch_off.png").copy() if not active else self.load_image_texture("assets/minecraft/textures/block/redstone_torch.png").copy()
torch = Image.new("RGBA", (24,24), self.bgcolor)
t_crop = t.crop((2,2,14,14))
slice = t_crop.copy()
ImageDraw.Draw(slice).rectangle((6,0,12,12),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(slice).rectangle((0,0,4,12),outline=(0,0,0,0),fill=(0,0,0,0))
alpha_over(torch, slice, (6,4))
alpha_over(torch, t_crop, (5,5))
alpha_over(torch, t_crop, (6,5))
alpha_over(torch, slice, (6,6))
return torch
active_torch = build_torch(True)
inactive_torch = build_torch(False)
back_torch = active_torch if (blockid == 150 or data & 0b1000 == 0b1000) else inactive_torch
static_torch_img = active_torch if (data & 0b100 == 0b100) else inactive_torch
img = self.build_full_block( (top, increment), None, None, side, side)
alpha_over(img, static_torch_img, static_torch, static_torch_img)
alpha_over(img, back_torch, torch[0], back_torch)
alpha_over(img, back_torch, torch[1], back_torch)
return img
# trapdoor
# the trapdoor is looks like a sprite when opened, that's not good
@material(blockid=[96,167,11332,11333,11334,11335,11336,12501,12502], data=list(range(16)), transparent=True, nospawn=True)
def trapdoor(self, blockid, data):
# rotation
# Masked to not clobber opened/closed info
if self.rotation == 1:
if (data & 0b0011) == 0: data = data & 0b1100 | 3
elif (data & 0b0011) == 1: data = data & 0b1100 | 2
elif (data & 0b0011) == 2: data = data & 0b1100 | 0
elif (data & 0b0011) == 3: data = data & 0b1100 | 1
elif self.rotation == 2:
if (data & 0b0011) == 0: data = data & 0b1100 | 1
elif (data & 0b0011) == 1: data = data & 0b1100 | 0
elif (data & 0b0011) == 2: data = data & 0b1100 | 3
elif (data & 0b0011) == 3: data = data & 0b1100 | 2
elif self.rotation == 3:
if (data & 0b0011) == 0: data = data & 0b1100 | 2
elif (data & 0b0011) == 1: data = data & 0b1100 | 3
elif (data & 0b0011) == 2: data = data & 0b1100 | 1
elif (data & 0b0011) == 3: data = data & 0b1100 | 0
# texture generation
texturepath = {96:"assets/minecraft/textures/block/oak_trapdoor.png",
167:"assets/minecraft/textures/block/iron_trapdoor.png",
11332:"assets/minecraft/textures/block/spruce_trapdoor.png",
11333:"assets/minecraft/textures/block/birch_trapdoor.png",
11334:"assets/minecraft/textures/block/jungle_trapdoor.png",
11335:"assets/minecraft/textures/block/acacia_trapdoor.png",
11336:"assets/minecraft/textures/block/dark_oak_trapdoor.png",
12501:"assets/minecraft/textures/block/crimson_trapdoor.png",
12502:"assets/minecraft/textures/block/warped_trapdoor.png",
}[blockid]
if data & 0x4 == 0x4: # opened trapdoor
if data & 0x08 == 0x08: texture = self.load_image_texture(texturepath).transpose(Image.FLIP_TOP_BOTTOM)
else: texture = self.load_image_texture(texturepath)
if data & 0x3 == 0: # west
img = self.build_full_block(None, None, None, None, texture)
if data & 0x3 == 1: # east
img = self.build_full_block(None, texture, None, None, None)
if data & 0x3 == 2: # south
img = self.build_full_block(None, None, texture, None, None)
if data & 0x3 == 3: # north
img = self.build_full_block(None, None, None, texture, None)
elif data & 0x4 == 0: # closed trapdoor
texture = self.load_image_texture(texturepath)
if data & 0x8 == 0x8: # is a top trapdoor
img = Image.new("RGBA", (24,24), self.bgcolor)
t = self.build_full_block((texture, 12), None, None, texture, texture)
alpha_over(img, t, (0,-9),t)
else: # is a bottom trapdoor
img = self.build_full_block((texture, 12), None, None, texture, texture)
return img
# block with hidden silverfish (stone, cobblestone and stone brick)
@material(blockid=97, data=list(range(3)), solid=True)
def hidden_silverfish(self, blockid, data):
if data == 0: # stone
t = self.load_image_texture("assets/minecraft/textures/block/stone.png")
elif data == 1: # cobblestone
t = self.load_image_texture("assets/minecraft/textures/block/cobblestone.png")
elif data == 2: # stone brick
t = self.load_image_texture("assets/minecraft/textures/block/stone_bricks.png")
img = self.build_block(t, t)
return img
# stone brick
@material(blockid=98, data=list(range(4)), solid=True)
def stone_brick(self, blockid, data):
if data == 0: # normal
t = self.load_image_texture("assets/minecraft/textures/block/stone_bricks.png")
elif data == 1: # mossy
t = self.load_image_texture("assets/minecraft/textures/block/mossy_stone_bricks.png")
elif data == 2: # cracked
t = self.load_image_texture("assets/minecraft/textures/block/cracked_stone_bricks.png")
elif data == 3: # "circle" stone brick
t = self.load_image_texture("assets/minecraft/textures/block/chiseled_stone_bricks.png")
img = self.build_full_block(t, None, None, t, t)
return img
# huge brown/red mushrooms, and mushroom stems
@material(blockid=[99, 100, 139], data=list(range(64)), solid=True)
def huge_mushroom(self, blockid, data):
# Re-arrange the bits in data based on self.rotation
# rotation bit: 654321
# 0 DUENWS
# 1 DUNWSE
# 2 DUWSEN
# 3 DUSENW
if self.rotation in [1, 2, 3]:
bit_map = {1: [6, 5, 3, 2, 1, 4],
2: [6, 5, 2, 1, 4, 3],
3: [6, 5, 1, 4, 3, 2]}
new_data = 0
# Add the ith bit to new_data then shift left one at a time,
# re-ordering data's bits in the order specified in bit_map
for i in bit_map[self.rotation]:
new_data = new_data << 1
new_data |= (data >> (i - 1)) & 1
data = new_data
# texture generation
texture_map = {99: "brown_mushroom_block",
100: "red_mushroom_block",
139: "mushroom_stem"}
cap = self.load_image_texture("assets/minecraft/textures/block/%s.png" % texture_map[blockid])
porous = self.load_image_texture("assets/minecraft/textures/block/mushroom_block_inside.png")
# Faces visible after amending data for rotation are: up, West, and South
side_up = cap if data & 0b010000 else porous # Up
side_west = cap if data & 0b000010 else porous # West
side_south = cap if data & 0b000001 else porous # South
side_south = side_south.transpose(Image.FLIP_LEFT_RIGHT)
return self.build_full_block(side_up, None, None, side_west, side_south)
# iron bars and glass pane
# TODO glass pane is not a sprite, it has a texture for the side,
# at the moment is not used
@material(blockid=[101,102, 160], data=list(range(256)), transparent=True, nospawn=True)
def panes(self, blockid, data):
# no rotation, uses pseudo data
if blockid == 101:
# iron bars
t = self.load_image_texture("assets/minecraft/textures/block/iron_bars.png")
elif blockid == 160:
t = self.load_image_texture("assets/minecraft/textures/block/%s_stained_glass.png" % color_map[data & 0xf])
else:
# glass panes
t = self.load_image_texture("assets/minecraft/textures/block/glass.png")
left = t.copy()
right = t.copy()
center = t.copy()
# generate the four small pieces of the glass pane
ImageDraw.Draw(right).rectangle((0,0,7,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(left).rectangle((8,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(center).rectangle((0,0,6,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(center).rectangle((9,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
up_center = self.transform_image_side(center)
up_left = self.transform_image_side(left)
up_right = self.transform_image_side(right).transpose(Image.FLIP_TOP_BOTTOM)
dw_right = self.transform_image_side(right)
dw_left = self.transform_image_side(left).transpose(Image.FLIP_TOP_BOTTOM)
# Create img to compose the texture
img = Image.new("RGBA", (24,24), self.bgcolor)
# +x axis points top right direction
# +y axis points bottom right direction
# First compose things in the back of the image,
# then things in the front.
# the lower 4 bits encode color, the upper 4 encode adjencies
data = data >> 4
if data == 0:
alpha_over(img, up_center, (6, 3), up_center) # center
else:
def draw_top_left():
alpha_over(img, up_left, (6, 3), up_left) # top left
def draw_top_right():
alpha_over(img, up_right, (6, 3), up_right) # top right
def draw_bottom_right():
alpha_over(img, dw_right, (6, 3), dw_right) # bottom right
def draw_bottom_left():
alpha_over(img, dw_left, (6, 3), dw_left) # bottom left
draw_funcs = [draw_top_left, draw_top_right, draw_bottom_right, draw_bottom_left]
if (data & 0b0001) == 1:
draw_funcs[(self.rotation + 0) % len(draw_funcs)]()
if (data & 0b0010) == 2:
draw_funcs[(self.rotation + 1) % len(draw_funcs)]()
if (data & 0b0100) == 4:
draw_funcs[(self.rotation + 2) % len(draw_funcs)]()
if (data & 0b1000) == 8:
draw_funcs[(self.rotation + 3) % len(draw_funcs)]()
return img
# melon
block(blockid=103, top_image="assets/minecraft/textures/block/melon_top.png", side_image="assets/minecraft/textures/block/melon_side.png", solid=True)
# pumpkin and melon stem
# TODO To render it as in game needs from pseudo data and ancil data:
# once fully grown the stem bends to the melon/pumpkin block,
# at the moment only render the growing stem
@material(blockid=[104,105], data=list(range(8)), transparent=True)
def stem(self, blockid, data):
# the ancildata value indicates how much of the texture
# is shown.
# not fully grown stem or no pumpkin/melon touching it,
# straight up stem
t = self.load_image_texture("assets/minecraft/textures/block/melon_stem.png").copy()
img = Image.new("RGBA", (16,16), self.bgcolor)
alpha_over(img, t, (0, int(16 - 16*((data + 1)/8.))), t)
img = self.build_sprite(t)
if data & 7 == 7:
# fully grown stem gets brown color!
# there is a conditional in rendermode-normal.c to not
# tint the data value 7
img = self.tint_texture(img, (211,169,116))
return img
# nether vines
billboard(blockid=1012, imagename="assets/minecraft/textures/block/twisting_vines.png")
billboard(blockid=1013, imagename="assets/minecraft/textures/block/twisting_vines_plant.png")
billboard(blockid=1014, imagename="assets/minecraft/textures/block/weeping_vines.png")
billboard(blockid=1015, imagename="assets/minecraft/textures/block/weeping_vines_plant.png")
# vines
@material(blockid=106, data=list(range(32)), transparent=True, solid=False, nospawn=True)
def vines(self, blockid, data):
# Re-arrange the bits in data based on self.rotation
# rotation bit: 54321
# 0 UENWS
# 1 UNWSE
# 2 UWSEN
# 3 USENW
if self.rotation in [1, 2, 3]:
bit_map = {1: [5, 3, 2, 1, 4],
2: [5, 2, 1, 4, 3],
3: [5, 1, 4, 3, 2]}
new_data = 0
# Add the ith bit to new_data then shift left one at a time,
# re-ordering data's bits in the order specified in bit_map
for i in bit_map[self.rotation]:
new_data = new_data << 1
new_data |= (data >> (i - 1)) & 1
data = new_data
# decode data and prepare textures
raw_texture = self.load_image_texture("assets/minecraft/textures/block/vine.png")
side_up = raw_texture if data & 0b10000 else None # Up
side_east = raw_texture if data & 0b01000 else None # East
side_north = raw_texture if data & 0b00100 else None # North
side_west = raw_texture if data & 0b00010 else None # West
side_south = raw_texture if data & 0b00001 else None # South
return self.build_full_block(side_up, side_north, side_east, side_west, side_south)
# fence gates
@material(blockid=[107, 183, 184, 185, 186, 187, 513, 514], data=list(range(8)), transparent=True, nospawn=True)
def fence_gate(self, blockid, data):
# rotation
opened = False
if data & 0x4:
data = data & 0x3
opened = True
if self.rotation == 1:
if data == 0: data = 1
elif data == 1: data = 2
elif data == 2: data = 3
elif data == 3: data = 0
elif self.rotation == 2:
if data == 0: data = 2
elif data == 1: data = 3
elif data == 2: data = 0
elif data == 3: data = 1
elif self.rotation == 3:
if data == 0: data = 3
elif data == 1: data = 0
elif data == 2: data = 1
elif data == 3: data = 2
if opened:
data = data | 0x4
# create the closed gate side
if blockid == 107: # Oak
gate_side = self.load_image_texture("assets/minecraft/textures/block/oak_planks.png").copy()
elif blockid == 183: # Spruce
gate_side = self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png").copy()
elif blockid == 184: # Birch
gate_side = self.load_image_texture("assets/minecraft/textures/block/birch_planks.png").copy()
elif blockid == 185: # Jungle
gate_side = self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png").copy()
elif blockid == 186: # Dark Oak
gate_side = self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png").copy()
elif blockid == 187: # Acacia
gate_side = self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png").copy()
elif blockid == 513: # Crimson
gate_side = self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png").copy()
elif blockid == 514: # Warped
gate_side = self.load_image_texture("assets/minecraft/textures/block/warped_planks.png").copy()
else:
return None
gate_side_draw = ImageDraw.Draw(gate_side)
gate_side_draw.rectangle((7,0,15,0),outline=(0,0,0,0),fill=(0,0,0,0))
gate_side_draw.rectangle((7,4,9,6),outline=(0,0,0,0),fill=(0,0,0,0))
gate_side_draw.rectangle((7,10,15,16),outline=(0,0,0,0),fill=(0,0,0,0))
gate_side_draw.rectangle((0,12,15,16),outline=(0,0,0,0),fill=(0,0,0,0))
gate_side_draw.rectangle((0,0,4,15),outline=(0,0,0,0),fill=(0,0,0,0))
gate_side_draw.rectangle((14,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
# darken the sides slightly, as with the fences
sidealpha = gate_side.split()[3]
gate_side = ImageEnhance.Brightness(gate_side).enhance(0.9)
gate_side.putalpha(sidealpha)
# create the other sides
mirror_gate_side = self.transform_image_side(gate_side.transpose(Image.FLIP_LEFT_RIGHT))
gate_side = self.transform_image_side(gate_side)
gate_other_side = gate_side.transpose(Image.FLIP_LEFT_RIGHT)
mirror_gate_other_side = mirror_gate_side.transpose(Image.FLIP_LEFT_RIGHT)
# Create img to compose the fence gate
img = Image.new("RGBA", (24,24), self.bgcolor)
if data & 0x4:
# opened
data = data & 0x3
if data == 0:
alpha_over(img, gate_side, (2,8), gate_side)
alpha_over(img, gate_side, (13,3), gate_side)
elif data == 1:
alpha_over(img, gate_other_side, (-1,3), gate_other_side)
alpha_over(img, gate_other_side, (10,8), gate_other_side)
elif data == 2:
alpha_over(img, mirror_gate_side, (-1,7), mirror_gate_side)
alpha_over(img, mirror_gate_side, (10,2), mirror_gate_side)
elif data == 3:
alpha_over(img, mirror_gate_other_side, (2,1), mirror_gate_other_side)
alpha_over(img, mirror_gate_other_side, (13,7), mirror_gate_other_side)
else:
# closed
# positions for pasting the fence sides, as with fences
pos_top_left = (2,3)
pos_top_right = (10,3)
pos_bottom_right = (10,7)
pos_bottom_left = (2,7)
if data == 0 or data == 2:
alpha_over(img, gate_other_side, pos_top_right, gate_other_side)
alpha_over(img, mirror_gate_other_side, pos_bottom_left, mirror_gate_other_side)
elif data == 1 or data == 3:
alpha_over(img, gate_side, pos_top_left, gate_side)
alpha_over(img, mirror_gate_side, pos_bottom_right, mirror_gate_side)
return img
# mycelium
block(blockid=110, top_image="assets/minecraft/textures/block/mycelium_top.png", side_image="assets/minecraft/textures/block/mycelium_side.png")
# warped_nylium & crimson_nylium
block(blockid=1006, top_image="assets/minecraft/textures/block/warped_nylium.png", side_image="assets/minecraft/textures/block/warped_nylium_side.png")
block(blockid=1007, top_image="assets/minecraft/textures/block/crimson_nylium.png", side_image="assets/minecraft/textures/block/crimson_nylium_side.png")
# lilypad
# At the moment of writing this lilypads has no ancil data and their
# orientation depends on their position on the map. So it uses pseudo
# ancildata.
@material(blockid=111, data=list(range(4)), transparent=True)
def lilypad(self, blockid, data):
t = self.load_image_texture("assets/minecraft/textures/block/lily_pad.png").copy()
if data == 0:
t = t.rotate(180)
elif data == 1:
t = t.rotate(270)
elif data == 2:
t = t
elif data == 3:
t = t.rotate(90)
return self.build_full_block(None, None, None, None, None, t)
# nether bricks
@material(blockid=112, data=list(range(3)), solid=True)
def nether_bricks(self, blockid, data):
if data == 0: # normal
t = self.load_image_texture("assets/minecraft/textures/block/nether_bricks.png")
elif data == 1: # cracked
t = self.load_image_texture("assets/minecraft/textures/block/cracked_nether_bricks.png")
elif data == 2: # chiseled
t = self.load_image_texture("assets/minecraft/textures/block/chiseled_nether_bricks.png")
img = self.build_full_block(t, None, None, t, t)
return img
# nether wart
@material(blockid=115, data=list(range(4)), transparent=True)
def nether_wart(self, blockid, data):
if data == 0: # just come up
t = self.load_image_texture("assets/minecraft/textures/block/nether_wart_stage0.png")
elif data in (1, 2):
t = self.load_image_texture("assets/minecraft/textures/block/nether_wart_stage1.png")
else: # fully grown
t = self.load_image_texture("assets/minecraft/textures/block/nether_wart_stage2.png")
# use the same technic as tall grass
img = self.build_billboard(t)
return img
# enchantment table
# TODO there's no book at the moment
@material(blockid=116, transparent=True, nodata=True)
def enchantment_table(self, blockid, data):
# no book at the moment
top = self.load_image_texture("assets/minecraft/textures/block/enchanting_table_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/enchanting_table_side.png")
img = self.build_full_block((top, 4), None, None, side, side)
return img
# brewing stand
# TODO this is a place holder, is a 2d image pasted
@material(blockid=117, data=list(range(5)), transparent=True)
def brewing_stand(self, blockid, data):
base = self.load_image_texture("assets/minecraft/textures/block/brewing_stand_base.png")
img = self.build_full_block(None, None, None, None, None, base)
t = self.load_image_texture("assets/minecraft/textures/block/brewing_stand.png")
stand = self.build_billboard(t)
alpha_over(img,stand,(0,-2))
return img
# cauldron
@material(blockid=118, data=list(range(16)), transparent=True, solid=True, nospawn=True)
def cauldron(self, blockid, data):
side = self.load_image_texture("assets/minecraft/textures/block/cauldron_side.png").copy()
top = self.load_image_texture("assets/minecraft/textures/block/cauldron_top.png")
filltype = (data & (3 << 2)) >> 2
if filltype == 3:
water = self.transform_image_top(self.load_image_texture("assets/minecraft/textures/block/powder_snow.png"))
elif filltype == 2:
water = self.transform_image_top(self.load_image_texture("assets/minecraft/textures/block/lava_still.png"))
else: # filltype == 1 or 0
water = self.transform_image_top(self.load_image_texture("water.png"))
# Side texture isn't transparent between the feet, so adjust the texture
ImageDraw.Draw(side).rectangle((5, 14, 11, 16), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0))
level = (data & 3)
if level == 0: # Empty
img = self.build_full_block(top, side, side, side, side)
else: # Part or fully filled
# Is filled in increments of a third, with level indicating how many thirds are filled
img = self.build_full_block(None, side, side, None, None)
alpha_over(img, water, (0, 12 - level * 4), water)
img2 = self.build_full_block(top, None, None, side, side)
alpha_over(img, img2, (0, 0), img2)
return img
# end portal and end_gateway
@material(blockid=[119,209], transparent=True, nodata=True)
def end_portal(self, blockid, data):
img = Image.new("RGBA", (24,24), self.bgcolor)
# generate a black texure with white, blue and grey dots resembling stars
t = Image.new("RGBA", (16,16), (0,0,0,255))
for color in [(155,155,155,255), (100,255,100,255), (255,255,255,255)]:
for i in range(6):
x = randint(0,15)
y = randint(0,15)
t.putpixel((x,y),color)
if blockid == 209: # end_gateway
return self.build_block(t, t)
t = self.transform_image_top(t)
alpha_over(img, t, (0,0), t)
return img
# end portal frame (data range 8 to get all orientations of filled)
@material(blockid=120, data=list(range(8)), transparent=True, solid=True, nospawn=True)
def end_portal_frame(self, blockid, data):
# Do rotation, only seems to affect ender eye & top of frame
data = data & 0b100 | ((self.rotation + (data & 0b11)) % 4)
top = self.load_image_texture("assets/minecraft/textures/block/end_portal_frame_top.png").copy()
top = top.rotate((data % 4) * 90)
side = self.load_image_texture("assets/minecraft/textures/block/end_portal_frame_side.png")
img = self.build_full_block((top, 4), None, None, side, side)
if data & 0x4 == 0x4: # ender eye on it
# generate the eye
eye_t = self.load_image_texture("assets/minecraft/textures/block/end_portal_frame_eye.png").copy()
eye_t_s = eye_t.copy()
# cut out from the texture the side and the top of the eye
ImageDraw.Draw(eye_t).rectangle((0, 0, 15, 4), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0))
ImageDraw.Draw(eye_t_s).rectangle((0, 4, 15, 15), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0))
# transform images and paste
eye = self.transform_image_top(eye_t.rotate((data % 4) * 90))
eye_s = self.transform_image_side(eye_t_s)
eye_os = eye_s.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, eye_s, (5, 5), eye_s)
alpha_over(img, eye_os, (9, 5), eye_os)
alpha_over(img, eye, (0, 0), eye)
return img
# end stone
block(blockid=121, top_image="assets/minecraft/textures/block/end_stone.png")
# dragon egg
# NOTE: this isn't a block, but I think it's better than nothing
block(blockid=122, top_image="assets/minecraft/textures/block/dragon_egg.png")
# inactive redstone lamp
block(blockid=123, top_image="assets/minecraft/textures/block/redstone_lamp.png")
# active redstone lamp
block(blockid=124, top_image="assets/minecraft/textures/block/redstone_lamp_on.png")
# daylight sensor.
@material(blockid=[151,178], transparent=True)
def daylight_sensor(self, blockid, data):
if blockid == 151: # daylight sensor
top = self.load_image_texture("assets/minecraft/textures/block/daylight_detector_top.png")
else: # inverted daylight sensor
top = self.load_image_texture("assets/minecraft/textures/block/daylight_detector_inverted_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/daylight_detector_side.png")
# cut the side texture in half
mask = side.crop((0,8,16,16))
side = Image.new(side.mode, side.size, self.bgcolor)
alpha_over(side, mask,(0,0,16,8), mask)
# plain slab
top = self.transform_image_top(top)
side = self.transform_image_side(side)
otherside = side.transpose(Image.FLIP_LEFT_RIGHT)
sidealpha = side.split()[3]
side = ImageEnhance.Brightness(side).enhance(0.9)
side.putalpha(sidealpha)
othersidealpha = otherside.split()[3]
otherside = ImageEnhance.Brightness(otherside).enhance(0.8)
otherside.putalpha(othersidealpha)
img = Image.new("RGBA", (24,24), self.bgcolor)
alpha_over(img, side, (0,12), side)
alpha_over(img, otherside, (12,12), otherside)
alpha_over(img, top, (0,6), top)
return img
# wooden double and normal slabs
# these are the new wooden slabs, blockids 43 44 still have wooden
# slabs, but those are unobtainable without cheating
@material(blockid=[125, 126], data=list(range(16)), transparent=(44,), solid=True)
def wooden_slabs(self, blockid, data):
texture = data & 7
if texture== 0: # oak
top = side = self.load_image_texture("assets/minecraft/textures/block/oak_planks.png")
elif texture== 1: # spruce
top = side = self.load_image_texture("assets/minecraft/textures/block/spruce_planks.png")
elif texture== 2: # birch
top = side = self.load_image_texture("assets/minecraft/textures/block/birch_planks.png")
elif texture== 3: # jungle
top = side = self.load_image_texture("assets/minecraft/textures/block/jungle_planks.png")
elif texture== 4: # acacia
top = side = self.load_image_texture("assets/minecraft/textures/block/acacia_planks.png")
elif texture== 5: # dark wood
top = side = self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png")
elif texture== 6: # crimson
top = side = self.load_image_texture("assets/minecraft/textures/block/crimson_planks.png")
elif texture== 7: # warped
top = side = self.load_image_texture("assets/minecraft/textures/block/warped_planks.png")
else:
return None
if blockid == 125: # double slab
return self.build_block(top, side)
return self.build_slab_block(top, side, data & 8 == 8);
# emerald ore
block(blockid=129, top_image="assets/minecraft/textures/block/emerald_ore.png")
# emerald block
block(blockid=133, top_image="assets/minecraft/textures/block/emerald_block.png")
# cocoa plant
@material(blockid=127, data=list(range(12)), transparent=True)
def cocoa_plant(self, blockid, data):
orientation = data & 3
# rotation
if self.rotation == 1:
if orientation == 0: orientation = 1
elif orientation == 1: orientation = 2
elif orientation == 2: orientation = 3
elif orientation == 3: orientation = 0
elif self.rotation == 2:
if orientation == 0: orientation = 2
elif orientation == 1: orientation = 3
elif orientation == 2: orientation = 0
elif orientation == 3: orientation = 1
elif self.rotation == 3:
if orientation == 0: orientation = 3
elif orientation == 1: orientation = 0
elif orientation == 2: orientation = 1
elif orientation == 3: orientation = 2
size = data & 12
if size == 8: # big
t = self.load_image_texture("assets/minecraft/textures/block/cocoa_stage2.png")
c_left = (0,3)
c_right = (8,3)
c_top = (5,2)
elif size == 4: # normal
t = self.load_image_texture("assets/minecraft/textures/block/cocoa_stage1.png")
c_left = (-2,2)
c_right = (8,2)
c_top = (5,2)
elif size == 0: # small
t = self.load_image_texture("assets/minecraft/textures/block/cocoa_stage0.png")
c_left = (-3,2)
c_right = (6,2)
c_top = (5,2)
# let's get every texture piece necessary to do this
stalk = t.copy()
ImageDraw.Draw(stalk).rectangle((0,0,11,16),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(stalk).rectangle((12,4,16,16),outline=(0,0,0,0),fill=(0,0,0,0))
top = t.copy() # warning! changes with plant size
ImageDraw.Draw(top).rectangle((0,7,16,16),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(top).rectangle((7,0,16,6),outline=(0,0,0,0),fill=(0,0,0,0))
side = t.copy() # warning! changes with plant size
ImageDraw.Draw(side).rectangle((0,0,6,16),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(side).rectangle((0,0,16,3),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(side).rectangle((0,14,16,16),outline=(0,0,0,0),fill=(0,0,0,0))
# first compose the block of the cocoa plant
block = Image.new("RGBA", (24,24), self.bgcolor)
tmp = self.transform_image_side(side).transpose(Image.FLIP_LEFT_RIGHT)
alpha_over (block, tmp, c_right,tmp) # right side
tmp = tmp.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over (block, tmp, c_left,tmp) # left side
tmp = self.transform_image_top(top)
alpha_over(block, tmp, c_top,tmp)
if size == 0:
# fix a pixel hole
block.putpixel((6,9), block.getpixel((6,10)))
# compose the cocoa plant
img = Image.new("RGBA", (24,24), self.bgcolor)
if orientation in (2,3): # south and west
tmp = self.transform_image_side(stalk).transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, block,(-1,-2), block)
alpha_over(img, tmp, (4,-2), tmp)
if orientation == 3:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
elif orientation in (0,1): # north and east
tmp = self.transform_image_side(stalk.transpose(Image.FLIP_LEFT_RIGHT))
alpha_over(img, block,(-1,5), block)
alpha_over(img, tmp, (2,12), tmp)
if orientation == 0:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return img
# command block
@material(blockid=[137,210,211], solid=True, nodata=True)
def command_block(self, blockid, data):
if blockid == 210:
front = self.load_image_texture("assets/minecraft/textures/block/repeating_command_block_front.png")
side = self.load_image_texture("assets/minecraft/textures/block/repeating_command_block_side.png")
back = self.load_image_texture("assets/minecraft/textures/block/repeating_command_block_back.png")
elif blockid == 211:
front = self.load_image_texture("assets/minecraft/textures/block/chain_command_block_front.png")
side = self.load_image_texture("assets/minecraft/textures/block/chain_command_block_side.png")
back = self.load_image_texture("assets/minecraft/textures/block/chain_command_block_back.png")
else:
front = self.load_image_texture("assets/minecraft/textures/block/command_block_front.png")
side = self.load_image_texture("assets/minecraft/textures/block/command_block_side.png")
back = self.load_image_texture("assets/minecraft/textures/block/command_block_back.png")
return self.build_full_block(side, side, back, front, side)
# beacon block
# at the moment of writing this, it seems the beacon block doens't use
# the data values
@material(blockid=138, transparent=True, nodata = True)
def beacon(self, blockid, data):
# generate the three pieces of the block
t = self.load_image_texture("assets/minecraft/textures/block/glass.png")
glass = self.build_block(t,t)
t = self.load_image_texture("assets/minecraft/textures/block/obsidian.png")
obsidian = self.build_full_block((t,12),None, None, t, t)
obsidian = obsidian.resize((20,20), Image.ANTIALIAS)
t = self.load_image_texture("assets/minecraft/textures/block/beacon.png")
crystal = self.build_block(t,t)
crystal = crystal.resize((16,16),Image.ANTIALIAS)
# compose the block
img = Image.new("RGBA", (24,24), self.bgcolor)
alpha_over(img, obsidian, (2, 4), obsidian)
alpha_over(img, crystal, (4,3), crystal)
alpha_over(img, glass, (0,0), glass)
return img
# cobblestone and mossy cobblestone walls, chorus plants, mossy stone brick walls
# one additional bit of data value added for mossy and cobblestone
@material(blockid=[199]+list(range(1792, 1812 + 1)), data=list(range(32)), transparent=True, nospawn=True)
def cobblestone_wall(self, blockid, data):
walls_id_to_tex = {
199: "assets/minecraft/textures/block/chorus_plant.png", # chorus plants
1792: "assets/minecraft/textures/block/andesite.png",
1793: "assets/minecraft/textures/block/bricks.png",
1794: "assets/minecraft/textures/block/cobblestone.png",
1795: "assets/minecraft/textures/block/diorite.png",
1796: "assets/minecraft/textures/block/end_stone_bricks.png",
1797: "assets/minecraft/textures/block/granite.png",
1798: "assets/minecraft/textures/block/mossy_cobblestone.png",
1799: "assets/minecraft/textures/block/mossy_stone_bricks.png",
1800: "assets/minecraft/textures/block/nether_bricks.png",
1801: "assets/minecraft/textures/block/prismarine.png",
1802: "assets/minecraft/textures/block/red_nether_bricks.png",
1803: "assets/minecraft/textures/block/red_sandstone.png",
1804: "assets/minecraft/textures/block/sandstone.png",
1805: "assets/minecraft/textures/block/stone_bricks.png",
1806: "assets/minecraft/textures/block/blackstone.png",
1807: "assets/minecraft/textures/block/polished_blackstone.png",
1808: "assets/minecraft/textures/block/polished_blackstone_bricks.png",
1809: "assets/minecraft/textures/block/cobbled_deepslate.png",
1810: "assets/minecraft/textures/block/polished_deepslate.png",
1811: "assets/minecraft/textures/block/deepslate_bricks.png",
1812: "assets/minecraft/textures/block/deepslate_tiles.png",
}
t = self.load_image_texture(walls_id_to_tex[blockid]).copy()
wall_pole_top = t.copy()
wall_pole_side = t.copy()
wall_side_top = t.copy()
wall_side = t.copy()
# _full is used for walls without pole
wall_side_top_full = t.copy()
wall_side_full = t.copy()
# generate the textures of the wall
ImageDraw.Draw(wall_pole_top).rectangle((0,0,3,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(wall_pole_top).rectangle((12,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(wall_pole_top).rectangle((0,0,15,3),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(wall_pole_top).rectangle((0,12,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(wall_pole_side).rectangle((0,0,3,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(wall_pole_side).rectangle((12,0,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
# Create the sides and the top of the pole
wall_pole_side = self.transform_image_side(wall_pole_side)
wall_pole_other_side = wall_pole_side.transpose(Image.FLIP_LEFT_RIGHT)
wall_pole_top = self.transform_image_top(wall_pole_top)
# Darken the sides slightly. These methods also affect the alpha layer,
# so save them first (we don't want to "darken" the alpha layer making
# the block transparent)
sidealpha = wall_pole_side.split()[3]
wall_pole_side = ImageEnhance.Brightness(wall_pole_side).enhance(0.8)
wall_pole_side.putalpha(sidealpha)
othersidealpha = wall_pole_other_side.split()[3]
wall_pole_other_side = ImageEnhance.Brightness(wall_pole_other_side).enhance(0.7)
wall_pole_other_side.putalpha(othersidealpha)
# Compose the wall pole
wall_pole = Image.new("RGBA", (24,24), self.bgcolor)
alpha_over(wall_pole,wall_pole_side, (3,4),wall_pole_side)
alpha_over(wall_pole,wall_pole_other_side, (9,4),wall_pole_other_side)
alpha_over(wall_pole,wall_pole_top, (0,0),wall_pole_top)
# create the sides and the top of a wall attached to a pole
ImageDraw.Draw(wall_side).rectangle((0,0,15,2),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(wall_side).rectangle((0,0,11,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(wall_side_top).rectangle((0,0,11,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(wall_side_top).rectangle((0,0,15,4),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(wall_side_top).rectangle((0,11,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
# full version, without pole
ImageDraw.Draw(wall_side_full).rectangle((0,0,15,2),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(wall_side_top_full).rectangle((0,4,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
ImageDraw.Draw(wall_side_top_full).rectangle((0,4,15,15),outline=(0,0,0,0),fill=(0,0,0,0))
# compose the sides of a wall atached to a pole
tmp = Image.new("RGBA", (24,24), self.bgcolor)
wall_side = self.transform_image_side(wall_side)
wall_side_top = self.transform_image_top(wall_side_top)
# Darken the sides slightly. These methods also affect the alpha layer,
# so save them first (we don't want to "darken" the alpha layer making
# the block transparent)
sidealpha = wall_side.split()[3]
wall_side = ImageEnhance.Brightness(wall_side).enhance(0.7)
wall_side.putalpha(sidealpha)
alpha_over(tmp,wall_side, (0,0),wall_side)
alpha_over(tmp,wall_side_top, (-5,3),wall_side_top)
wall_side = tmp
wall_other_side = wall_side.transpose(Image.FLIP_LEFT_RIGHT)
# compose the sides of the full wall
tmp = Image.new("RGBA", (24,24), self.bgcolor)
wall_side_full = self.transform_image_side(wall_side_full)
wall_side_top_full = self.transform_image_top(wall_side_top_full.rotate(90))
# Darken the sides slightly. These methods also affect the alpha layer,
# so save them first (we don't want to "darken" the alpha layer making
# the block transparent)
sidealpha = wall_side_full.split()[3]
wall_side_full = ImageEnhance.Brightness(wall_side_full).enhance(0.7)
wall_side_full.putalpha(sidealpha)
alpha_over(tmp,wall_side_full, (4,0),wall_side_full)
alpha_over(tmp,wall_side_top_full, (3,-4),wall_side_top_full)
wall_side_full = tmp
wall_other_side_full = wall_side_full.transpose(Image.FLIP_LEFT_RIGHT)
# Create img to compose the wall
img = Image.new("RGBA", (24,24), self.bgcolor)
# Position wall imgs around the wall bit stick
pos_top_left = (-5,-2)
pos_bottom_left = (-8,4)
pos_top_right = (5,-3)
pos_bottom_right = (7,4)
# +x axis points top right direction
# +y axis points bottom right direction
# There are two special cases for wall without pole.
# Normal case:
# First compose the walls in the back of the image,
# then the pole and then the walls in the front.
if (data == 0b1010) or (data == 0b11010):
alpha_over(img, wall_other_side_full,(0,2), wall_other_side_full)
elif (data == 0b0101) or (data == 0b10101):
alpha_over(img, wall_side_full,(0,2), wall_side_full)
else:
if (data & 0b0001) == 1:
alpha_over(img,wall_side, pos_top_left,wall_side) # top left
if (data & 0b1000) == 8:
alpha_over(img,wall_other_side, pos_top_right,wall_other_side) # top right
alpha_over(img,wall_pole,(0,0),wall_pole)
if (data & 0b0010) == 2:
alpha_over(img,wall_other_side, pos_bottom_left,wall_other_side) # bottom left
if (data & 0b0100) == 4:
alpha_over(img,wall_side, pos_bottom_right,wall_side) # bottom right
return img
# carrots, potatoes
@material(blockid=[141,142], data=list(range(8)), transparent=True, nospawn=True)
def crops4(self, blockid, data):
# carrots and potatoes have 8 data, but only 4 visual stages
stage = {0:0,
1:0,
2:1,
3:1,
4:2,
5:2,
6:2,
7:3}[data]
if blockid == 141: # carrots
raw_crop = self.load_image_texture("assets/minecraft/textures/block/carrots_stage%d.png" % stage)
else: # potatoes
raw_crop = self.load_image_texture("assets/minecraft/textures/block/potatoes_stage%d.png" % stage)
crop1 = self.transform_image_top(raw_crop)
crop2 = self.transform_image_side(raw_crop)
crop3 = crop2.transpose(Image.FLIP_LEFT_RIGHT)
img = Image.new("RGBA", (24,24), self.bgcolor)
alpha_over(img, crop1, (0,12), crop1)
alpha_over(img, crop2, (6,3), crop2)
alpha_over(img, crop3, (6,3), crop3)
return img
# anvils
@material(blockid=145, data=list(range(12)), transparent=True, nospawn=True)
def anvil(self, blockid, data):
# anvils only have two orientations, invert it for rotations 1 and 3
orientation = data & 0x1
if self.rotation in (1, 3):
if orientation == 1:
orientation = 0
else:
orientation = 1
# get the correct textures
# the bits 0x4 and 0x8 determine how damaged is the anvil
if (data & 0xc) == 0: # non damaged anvil
top = self.load_image_texture("assets/minecraft/textures/block/anvil_top.png")
elif (data & 0xc) == 0x4: # slightly damaged
top = self.load_image_texture("assets/minecraft/textures/block/chipped_anvil_top.png")
elif (data & 0xc) == 0x8: # very damaged
top = self.load_image_texture("assets/minecraft/textures/block/damaged_anvil_top.png")
# everything else use this texture
big_side = self.load_image_texture("assets/minecraft/textures/block/anvil.png").copy()
small_side = self.load_image_texture("assets/minecraft/textures/block/anvil.png").copy()
base = self.load_image_texture("assets/minecraft/textures/block/anvil.png").copy()
small_base = self.load_image_texture("assets/minecraft/textures/block/anvil.png").copy()
# cut needed patterns
ImageDraw.Draw(big_side).rectangle((0, 8, 15, 15), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0))
ImageDraw.Draw(small_side).rectangle((0, 0, 2, 15), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0))
ImageDraw.Draw(small_side).rectangle((13, 0, 15, 15), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0))
ImageDraw.Draw(small_side).rectangle((0, 8, 15, 15), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0))
ImageDraw.Draw(base).rectangle((0, 0, 15, 15), outline=(0, 0, 0, 0))
ImageDraw.Draw(base).rectangle((1, 1, 14, 14), outline=(0, 0, 0, 0))
ImageDraw.Draw(small_base).rectangle((0, 0, 15, 15), outline=(0, 0, 0, 0))
ImageDraw.Draw(small_base).rectangle((1, 1, 14, 14), outline=(0, 0, 0, 0))
ImageDraw.Draw(small_base).rectangle((2, 2, 13, 13), outline=(0, 0, 0, 0))
ImageDraw.Draw(small_base).rectangle((3, 3, 12, 12), outline=(0, 0, 0, 0))
# check orientation and compose the anvil
if orientation == 1: # bottom-left top-right
top = top.rotate(90)
left_side = small_side
left_pos = (1, 6)
right_side = big_side
right_pos = (10, 5)
else: # top-left bottom-right
right_side = small_side
right_pos = (12, 6)
left_side = big_side
left_pos = (3, 5)
img = Image.new("RGBA", (24, 24), self.bgcolor)
# darken sides
alpha = big_side.split()[3]
big_side = ImageEnhance.Brightness(big_side).enhance(0.8)
big_side.putalpha(alpha)
alpha = small_side.split()[3]
small_side = ImageEnhance.Brightness(small_side).enhance(0.9)
small_side.putalpha(alpha)
alpha = base.split()[3]
base_d = ImageEnhance.Brightness(base).enhance(0.8)
base_d.putalpha(alpha)
# compose
base = self.transform_image_top(base)
base_d = self.transform_image_top(base_d)
small_base = self.transform_image_top(small_base)
top = self.transform_image_top(top)
alpha_over(img, base_d, (0, 12), base_d)
alpha_over(img, base_d, (0, 11), base_d)
alpha_over(img, base_d, (0, 10), base_d)
alpha_over(img, small_base, (0, 10), small_base)
alpha_over(img, top, (0, 1), top) # Fix gap between block edges
alpha_over(img, top, (0, 0), top)
left_side = self.transform_image_side(left_side)
right_side = self.transform_image_side(right_side).transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, left_side, left_pos, left_side)
alpha_over(img, right_side, right_pos, right_side)
return img
# block of redstone
block(blockid=152, top_image="assets/minecraft/textures/block/redstone_block.png")
# nether quartz ore
block(blockid=153, top_image="assets/minecraft/textures/block/nether_quartz_ore.png")
# block of quartz
@material(blockid=155, data=list(range(5)), solid=True)
def quartz_block(self, blockid, data):
if data in (0,1): # normal and chiseled quartz block
if data == 0:
top = self.load_image_texture("assets/minecraft/textures/block/quartz_block_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/quartz_block_side.png")
else:
top = self.load_image_texture("assets/minecraft/textures/block/chiseled_quartz_block_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/chiseled_quartz_block.png")
return self.build_block(top, side)
# pillar quartz block with orientation
top = self.load_image_texture("assets/minecraft/textures/block/quartz_pillar_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/quartz_pillar.png").copy()
if data == 2: # vertical
return self.build_block(top, side)
elif data == 3: # north-south oriented
if self.rotation in (0,2):
return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90))
return self.build_full_block(side, None, None, side.rotate(90), top)
elif data == 4: # east-west oriented
if self.rotation in (0,2):
return self.build_full_block(side, None, None, side.rotate(90), top)
return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90))
# hopper
@material(blockid=154, data=list(range(4)), transparent=True)
def hopper(self, blockid, data):
#build the top
side = self.load_image_texture("assets/minecraft/textures/block/hopper_outside.png")
top = self.load_image_texture("assets/minecraft/textures/block/hopper_top.png")
bottom = self.load_image_texture("assets/minecraft/textures/block/hopper_inside.png")
hop_top = self.build_full_block((top,10), side, side, side, side, side)
#build a solid block for mid/top
hop_mid = self.build_full_block((top,5), side, side, side, side, side)
hop_bot = self.build_block(side,side)
hop_mid = hop_mid.resize((17,17),Image.ANTIALIAS)
hop_bot = hop_bot.resize((10,10),Image.ANTIALIAS)
#compose the final block
img = Image.new("RGBA", (24,24), self.bgcolor)
alpha_over(img, hop_bot, (7,14), hop_bot)
alpha_over(img, hop_mid, (3,3), hop_mid)
alpha_over(img, hop_top, (0,-6), hop_top)
return img
# slime block
block(blockid=165, top_image="assets/minecraft/textures/block/slime_block.png")
# prismarine block
@material(blockid=168, data=list(range(3)), solid=True)
def prismarine_block(self, blockid, data):
if data == 0: # prismarine
t = self.load_image_texture("assets/minecraft/textures/block/prismarine.png")
elif data == 1: # prismarine bricks
t = self.load_image_texture("assets/minecraft/textures/block/prismarine_bricks.png")
elif data == 2: # dark prismarine
t = self.load_image_texture("assets/minecraft/textures/block/dark_prismarine.png")
img = self.build_block(t, t)
return img
# sea lantern
block(blockid=169, top_image="assets/minecraft/textures/block/sea_lantern.png")
# hay block
@material(blockid=170, data=list(range(9)), solid=True)
def hayblock(self, blockid, data):
top = self.load_image_texture("assets/minecraft/textures/block/hay_block_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/hay_block_side.png")
if self.rotation == 1:
if data == 4: data = 8
elif data == 8: data = 4
elif self.rotation == 3:
if data == 4: data = 8
elif data == 8: data = 4
# choose orientation and paste textures
if data == 4: # east-west orientation
return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90))
elif data == 8: # north-south orientation
return self.build_full_block(side, None, None, side.rotate(90), top)
else:
return self.build_block(top, side)
# carpet - wool block that's small?
@material(blockid=171, data=list(range(17)), transparent=True)
def carpet(self, blockid, data):
if data < 16:
texture = self.load_image_texture("assets/minecraft/textures/block/%s_wool.png" % color_map[data])
elif data == 16:
texture = self.load_image_texture("assets/minecraft/textures/block/moss_block.png")
return self.build_full_block((texture,15),texture,texture,texture,texture)
#clay block
block(blockid=172, top_image="assets/minecraft/textures/block/terracotta.png")
#stained hardened clay
@material(blockid=159, data=list(range(16)), solid=True)
def stained_clay(self, blockid, data):
texture = self.load_image_texture("assets/minecraft/textures/block/%s_terracotta.png" % color_map[data])
return self.build_block(texture,texture)
#coal block
block(blockid=173, top_image="assets/minecraft/textures/block/coal_block.png")
# packed ice block
block(blockid=174, top_image="assets/minecraft/textures/block/packed_ice.png")
#blue ice
block(blockid=11312, top_image="assets/minecraft/textures/block/blue_ice.png")
#smooth stones
block(blockid=11313, top_image="assets/minecraft/textures/block/smooth_stone.png") # stone
block(blockid=11314, top_image="assets/minecraft/textures/block/sandstone_top.png") # sandstone
block(blockid=11315, top_image="assets/minecraft/textures/block/red_sandstone_top.png") # red sandstone
#coral blocks
block(blockid=11316, top_image="assets/minecraft/textures/block/brain_coral_block.png")
block(blockid=11317, top_image="assets/minecraft/textures/block/bubble_coral_block.png")
block(blockid=11318, top_image="assets/minecraft/textures/block/fire_coral_block.png")
block(blockid=11319, top_image="assets/minecraft/textures/block/horn_coral_block.png")
block(blockid=11320, top_image="assets/minecraft/textures/block/tube_coral_block.png")
#dead coral blocks
block(blockid=11321, top_image="assets/minecraft/textures/block/dead_brain_coral_block.png")
block(blockid=11322, top_image="assets/minecraft/textures/block/dead_bubble_coral_block.png")
block(blockid=11323, top_image="assets/minecraft/textures/block/dead_fire_coral_block.png")
block(blockid=11324, top_image="assets/minecraft/textures/block/dead_horn_coral_block.png")
block(blockid=11325, top_image="assets/minecraft/textures/block/dead_tube_coral_block.png")
@material(blockid=175, data=list(range(16)), transparent=True)
def flower(self, blockid, data):
double_plant_map = ["sunflower", "lilac", "tall_grass", "large_fern", "rose_bush", "peony", "peony", "peony"]
plant = double_plant_map[data & 0x7]
if data & 0x8:
part = "top"
else:
part = "bottom"
png = "assets/minecraft/textures/block/%s_%s.png" % (plant,part)
texture = self.load_image_texture(png)
img = self.build_billboard(texture)
#sunflower top
if data == 8:
bloom_tex = self.load_image_texture("assets/minecraft/textures/block/sunflower_front.png")
alpha_over(img, bloom_tex.resize((14, 11), Image.ANTIALIAS), (5,5))
return img
# chorus flower
@material(blockid=200, data=list(range(6)), solid=True)
def chorus_flower(self, blockid, data):
# aged 5, dead
if data == 5:
texture = self.load_image_texture("assets/minecraft/textures/block/chorus_flower_dead.png")
else:
texture = self.load_image_texture("assets/minecraft/textures/block/chorus_flower.png")
return self.build_block(texture,texture)
# purpur block
block(blockid=201, top_image="assets/minecraft/textures/block/purpur_block.png")
# purpur pillar
@material(blockid=202, data=list(range(3)), solid=True)
def purpur_pillar(self, blockid, data):
top=self.load_image_texture("assets/minecraft/textures/block/purpur_pillar_top.png")
side=self.load_image_texture("assets/minecraft/textures/block/purpur_pillar.png")
return self.build_axis_block(top, side, data)
# end brick
block(blockid=206, top_image="assets/minecraft/textures/block/end_stone_bricks.png")
# frosted ice
@material(blockid=212, data=list(range(4)), solid=True)
def frosted_ice(self, blockid, data):
img = self.load_image_texture("assets/minecraft/textures/block/frosted_ice_%d.png" % data)
return self.build_block(img, img)
# magma block
block(blockid=213, top_image="assets/minecraft/textures/block/magma.png")
# nether wart block
block(blockid=214, top_image="assets/minecraft/textures/block/nether_wart_block.png")
# warped wart block
block(blockid=1010, top_image="assets/minecraft/textures/block/warped_wart_block.png")
# red nether brick
block(blockid=215, top_image="assets/minecraft/textures/block/red_nether_bricks.png")
@material(blockid=216, data=list(range(12)), solid=True)
def boneblock(self, blockid, data):
# extract orientation
boneblock_orientation = data & 12
if self.rotation == 1:
if boneblock_orientation == 4: boneblock_orientation = 8
elif boneblock_orientation == 8: boneblock_orientation = 4
elif self.rotation == 3:
if boneblock_orientation == 4: boneblock_orientation = 8
elif boneblock_orientation == 8: boneblock_orientation = 4
top = self.load_image_texture("assets/minecraft/textures/block/bone_block_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/bone_block_side.png")
# choose orientation and paste textures
if boneblock_orientation == 0:
return self.build_block(top, side)
elif boneblock_orientation == 4: # east-west orientation
return self.build_full_block(side.rotate(90), None, None, top, side.rotate(90))
elif boneblock_orientation == 8: # north-south orientation
return self.build_full_block(side, None, None, side.rotate(270), top)
# observer
@material(blockid=218, data=[0, 1, 2, 3, 4, 5, 8, 9, 10, 11, 12, 13], solid=True, nospawn=True)
def observer(self, blockid, data):
# Do rotation
if self.rotation in [1, 2, 3] and (data & 0b111) in [2, 3, 4, 5]:
rotation_map = {1: {2: 5, 3: 4, 4: 2, 5: 3},
2: {2: 3, 3: 2, 4: 5, 5: 4},
3: {2: 4, 3: 5, 4: 3, 5: 2}}
data = (data & 0b1000) | rotation_map[self.rotation][data & 0b111]
front = self.load_image_texture("assets/minecraft/textures/block/observer_front.png")
side = self.load_image_texture("assets/minecraft/textures/block/observer_side.png")
top = self.load_image_texture("assets/minecraft/textures/block/observer_top.png")
file_name_back = "observer_back_on" if data & 0b1000 else "observer_back"
back = self.load_image_texture("assets/minecraft/textures/block/%s.png" % file_name_back)
if data & 0b0111 == 0: # Down
img = self.build_full_block(back, None, None, side.rotate(90), top)
elif data & 0b0111 == 1: # Up
img = self.build_full_block(front.rotate(180), None, None, side.rotate(90), top.rotate(180))
elif data & 0b0111 == 2: # East
img = self.build_full_block(top.rotate(180), None, None, side, back)
elif data & 0b0111 == 3: # West
img = self.build_full_block(top, None, None, side, front)
elif data & 0b0111 == 4: # North
img = self.build_full_block(top.rotate(270), None, None, front, side)
elif data & 0b0111 == 5: # South
img = self.build_full_block(top.rotate(90), None, None, back, side)
return img
# shulker box
@material(blockid=list(range(219, 235)) + [257], data=list(range(6)), solid=True, nospawn=True)
def shulker_box(self, blockid, data):
# Do rotation
if self.rotation in [1, 2, 3] and data in [2, 3, 4, 5]:
rotation_map = {1: {2: 5, 3: 4, 4: 2, 5: 3},
2: {2: 3, 3: 2, 4: 5, 5: 4},
3: {2: 4, 3: 5, 4: 3, 5: 2}}
data = rotation_map[self.rotation][data]
if blockid == 257:
# Uncolored shulker box
file_name = "shulker.png"
else:
file_name = "shulker_%s.png" % color_map[blockid - 219]
shulker_t = self.load_image("assets/minecraft/textures/entity/shulker/%s" % file_name).copy()
w, h = shulker_t.size
res = w // 4
# Cut out the parts of the shulker texture we need for the box
top = shulker_t.crop((res, 0, res * 2, res))
bottom = shulker_t.crop((res * 2, int(res * 1.75), res * 3, int(res * 2.75)))
side_top = shulker_t.crop((0, res, res, int(res * 1.75)))
side_bottom = shulker_t.crop((0, int(res * 2.75), res, int(res * 3.25)))
side = Image.new('RGBA', (res, res))
side.paste(side_top, (0, 0), side_top)
side.paste(side_bottom, (0, res // 2), side_bottom)
if data == 0: # down
side = side.rotate(180)
img = self.build_full_block(bottom, None, None, side, side)
elif data == 1: # up
img = self.build_full_block(top, None, None, side, side)
elif data == 2: # east
img = self.build_full_block(side, None, None, side.rotate(90), bottom)
elif data == 3: # west
img = self.build_full_block(side.rotate(180), None, None, side.rotate(270), top)
elif data == 4: # north
img = self.build_full_block(side.rotate(90), None, None, top, side.rotate(270))
elif data == 5: # south
img = self.build_full_block(side.rotate(270), None, None, bottom, side.rotate(90))
return img
# structure block
@material(blockid=255, data=list(range(4)), solid=True)
def structure_block(self, blockid, data):
if data == 0:
img = self.load_image_texture("assets/minecraft/textures/block/structure_block_save.png")
elif data == 1:
img = self.load_image_texture("assets/minecraft/textures/block/structure_block_load.png")
elif data == 2:
img = self.load_image_texture("assets/minecraft/textures/block/structure_block_corner.png")
elif data == 3:
img = self.load_image_texture("assets/minecraft/textures/block/structure_block_data.png")
return self.build_block(img, img)
# Jigsaw block
@material(blockid=256, data=list(range(6)), solid=True)
def jigsaw_block(self, blockid, data):
# Do rotation
if self.rotation in [1, 2, 3] and data in [2, 3, 4, 5]:
rotation_map = {1: {2: 5, 3: 4, 4: 2, 5: 3},
2: {2: 3, 3: 2, 4: 5, 5: 4},
3: {2: 4, 3: 5, 4: 3, 5: 2}}
data = rotation_map[self.rotation][data]
top = self.load_image_texture("assets/minecraft/textures/block/jigsaw_top.png")
bottom = self.load_image_texture("assets/minecraft/textures/block/jigsaw_bottom.png")
side = self.load_image_texture("assets/minecraft/textures/block/jigsaw_side.png")
if data == 0: # Down
img = self.build_full_block(bottom.rotate(self.rotation * 90), None, None,
side.rotate(180), side.rotate(180))
elif data == 1: # Up
img = self.build_full_block(top.rotate(self.rotation * 90), None, None, side, side)
elif data == 2: # North
img = self.build_full_block(side, None, None, side.rotate(90), bottom.rotate(180))
elif data == 3: # South
img = self.build_full_block(side.rotate(180), None, None, side.rotate(270), top.rotate(270))
elif data == 4: # West
img = self.build_full_block(side.rotate(90), None, None, top.rotate(180), side.rotate(270))
elif data == 5: # East
img = self.build_full_block(side.rotate(270), None, None, bottom.rotate(180),
side.rotate(90))
return img
# beetroots(207), berry bushes (11505)
@material(blockid=[207, 11505], data=list(range(4)), transparent=True, nospawn=True)
def crops(self, blockid, data):
crops_id_to_tex = {
207: "assets/minecraft/textures/block/beetroots_stage%d.png",
11505: "assets/minecraft/textures/block/sweet_berry_bush_stage%d.png",
}
raw_crop = self.load_image_texture(crops_id_to_tex[blockid] % data)
crop1 = self.transform_image_top(raw_crop)
crop2 = self.transform_image_side(raw_crop)
crop3 = crop2.transpose(Image.FLIP_LEFT_RIGHT)
img = Image.new("RGBA", (24,24), self.bgcolor)
alpha_over(img, crop1, (0,12), crop1)
alpha_over(img, crop2, (6,3), crop2)
alpha_over(img, crop3, (6,3), crop3)
return img
# Concrete
@material(blockid=251, data=list(range(16)), solid=True)
def concrete(self, blockid, data):
texture = self.load_image_texture("assets/minecraft/textures/block/%s_concrete.png" % color_map[data])
return self.build_block(texture, texture)
# Concrete Powder
@material(blockid=252, data=list(range(16)), solid=True)
def concrete(self, blockid, data):
texture = self.load_image_texture("assets/minecraft/textures/block/%s_concrete_powder.png" % color_map[data])
return self.build_block(texture, texture)
# Glazed Terracotta
@material(blockid=list(range(235, 251)), data=list(range(4)), solid=True)
def glazed_terracotta(self, blockid, data):
# Do rotation
data = (self.rotation + data) % 4
texture = self.load_image_texture("assets/minecraft/textures/block/%s_glazed_terracotta.png" %
color_map[blockid - 235]).copy()
texture_side4 = texture.transpose(Image.FLIP_LEFT_RIGHT)
if data == 0: # South
return self.build_full_block(texture, None, None, texture, texture_side4.rotate(270))
elif data == 1: # West
return self.build_full_block(texture.rotate(270), None, None, texture.rotate(90),
texture_side4.rotate(180))
elif data == 2: # North
return self.build_full_block(texture.rotate(180), None, None, texture.rotate(180),
texture_side4.rotate(90))
elif data == 3: # East
return self.build_full_block(texture.rotate(90), None, None, texture.rotate(270),
texture_side4)
# dried kelp block
@material(blockid=11331, data=[0], solid=True)
def sandstone(self, blockid, data):
top = self.load_image_texture("assets/minecraft/textures/block/dried_kelp_top.png")
return self.build_block(top, self.load_image_texture("assets/minecraft/textures/block/dried_kelp_side.png"))
# scaffolding
block(blockid=11414, top_image="assets/minecraft/textures/block/scaffolding_top.png", side_image="assets/minecraft/textures/block/scaffolding_side.png", solid=False, transparent=True)
# beehive and bee_nest
@material(blockid=[11501, 11502], data=list(range(8)), solid=True)
def beehivenest(self, blockid, data):
if blockid == 11501: #beehive
t_top = self.load_image("assets/minecraft/textures/block/beehive_end.png")
t_side = self.load_image("assets/minecraft/textures/block/beehive_side.png")
t_front = self.load_image("assets/minecraft/textures/block/beehive_front.png")
t_front_honey = self.load_image("assets/minecraft/textures/block/beehive_front_honey.png")
elif blockid == 11502: #bee_nest
t_top = self.load_image("assets/minecraft/textures/block/bee_nest_top.png")
t_side = self.load_image("assets/minecraft/textures/block/bee_nest_side.png")
t_front = self.load_image("assets/minecraft/textures/block/bee_nest_front.png")
t_front_honey = self.load_image("assets/minecraft/textures/block/bee_nest_front_honey.png")
if data >= 4:
front = t_front_honey
else:
front = t_front
if self.rotation == 0: # rendering north upper-left
if data == 0 or data == 4: # south
return self.build_full_block(t_top, t_side, t_side, t_side, front)
elif data == 1 or data == 5: # west
return self.build_full_block(t_top, t_side, t_side, front, t_side)
elif data == 2 or data == 6: # north
return self.build_full_block(t_top, t_side, front, t_side, t_side)
elif data == 3 or data == 7: # east
return self.build_full_block(t_top, front, t_side, t_side, t_side)
elif self.rotation == 1: # north upper-right
if data == 0 or data == 4: # south
return self.build_full_block(t_top, t_side, t_side, front, t_side)
elif data == 1 or data == 5: # west
return self.build_full_block(t_top, t_side, front, t_side, t_side)
elif data == 2 or data == 6: # north
return self.build_full_block(t_top, front, t_side, t_side, t_side)
elif data == 3 or data == 7: # east
return self.build_full_block(t_top, t_side, t_side, t_side, front)
elif self.rotation == 2: # north lower-right
if data == 0 or data == 4: # south
return self.build_full_block(t_top, t_side, front, t_side, t_side)
elif data == 1 or data == 5: # west
return self.build_full_block(t_top, front, t_side, t_side, t_side)
elif data == 2 or data == 6: # north
return self.build_full_block(t_top, t_side, t_side, t_side, front)
elif data == 3 or data == 7: # east
return self.build_full_block(t_top, t_side, t_side, front, t_side)
elif self.rotation == 3: # north lower-left
if data == 0 or data == 4: # south
return self.build_full_block(t_top, front, t_side, t_side, t_side)
elif data == 1 or data == 5: # west
return self.build_full_block(t_top, t_side, t_side, t_side, front)
elif data == 2 or data == 6: # north
return self.build_full_block(t_top, t_side, t_side, front, t_side)
elif data == 3 or data == 7: # east
return self.build_full_block(t_top, t_side, front, t_side, t_side)
# honeycomb_block
block(blockid=11503, top_image="assets/minecraft/textures/block/honeycomb_block.png")
# honey_block
block(blockid=11504, top_image="assets/minecraft/textures/block/honey_block_top.png", side_image="assets/minecraft/textures/block/honey_block_side.png")
# Barrel
@material(blockid=11418, data=list(range(12)), solid=True)
def barrel(self, blockid, data):
t_bottom = self.load_image("assets/minecraft/textures/block/barrel_bottom.png")
t_side = self.load_image("assets/minecraft/textures/block/barrel_side.png")
if data & 0x01:
t_top = self.load_image("assets/minecraft/textures/block/barrel_top_open.png")
else:
t_top = self.load_image("assets/minecraft/textures/block/barrel_top.png")
data = data >> 1
if data == 0: # up
return self.build_full_block(t_top, None, None, t_side, t_side)
elif data == 1: # down
t_side = t_side.rotate(180)
return self.build_full_block(t_bottom, None, None, t_side, t_side)
elif data == 2: # south
return self.build_full_block(t_side.rotate(180), None, None, t_side.rotate(270), t_top)
elif data == 3: # east
return self.build_full_block(t_side.rotate(270), None, None, t_bottom, t_side.rotate(90))
elif data == 4: # north
return self.build_full_block(t_side, None, None, t_side.rotate(90), t_bottom)
else: # west
return self.build_full_block(t_side.rotate(90), None, None, t_top, t_side.rotate(270))
# Campfire (11506) and soul campfire (1003)
@material(blockid=[11506, 1003], data=list(range(8)), solid=True, transparent=True, nospawn=True)
def campfire(self, blockid, data):
# Do rotation, mask to not clobber lit data
data = data & 0b100 | ((self.rotation + (data & 0b11)) % 4)
block_name = "campfire" if blockid == 11506 else "soul_campfire"
# Load textures
# Fire & lit log textures contain multiple tiles, since both are
# 16px wide rely on load_image_texture() to crop appropriately
fire_raw_t = self.load_image_texture("assets/minecraft/textures/block/" + block_name
+ "_fire.png")
log_raw_t = self.load_image_texture("assets/minecraft/textures/block/campfire_log.png")
log_lit_raw_t = self.load_image_texture("assets/minecraft/textures/block/" + block_name
+ "_log_lit.png")
def create_tile(img_src, coord_crop, coord_paste, rot):
# Takes an image, crops a region, optionally rotates the
# texture, then finally pastes it onto a 16x16 image
img_out = Image.new("RGBA", (16, 16), self.bgcolor)
img_in = img_src.crop(coord_crop)
if rot != 0:
img_in = img_in.rotate(rot, expand=True)
img_out.paste(img_in, coord_paste)
return img_out
# Generate bottom
bottom_t = log_lit_raw_t if data & 0b100 else log_raw_t
bottom_t = create_tile(bottom_t, (0, 8, 16, 14), (0, 5), 0)
bottom_t = self.transform_image_top(bottom_t)
# Generate two variants of a log: one with a lit side, one without
log_t = Image.new("RGBA", (24, 24), self.bgcolor)
log_end_t = create_tile(log_raw_t, (0, 4, 4, 8), (12, 6), 0)
log_side_t = create_tile(log_raw_t, (0, 0, 16, 4), (0, 6), 0)
log_side_lit_t = create_tile(log_lit_raw_t, (0, 0, 16, 4), (0, 6), 0)
log_end_t = self.transform_image_side(log_end_t)
log_top_t = self.transform_image_top(log_side_t)
log_side_t = self.transform_image_side(log_side_t).transpose(Image.FLIP_LEFT_RIGHT)
log_side_lit_t = self.transform_image_side(log_side_lit_t).transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(log_t, log_top_t, (-2, 2), log_top_t) # Fix some holes at the edges
alpha_over(log_t, log_top_t, (-2, 1), log_top_t)
log_lit_t = log_t.copy()
# Unlit log
alpha_over(log_t, log_side_t, (5, 0), log_side_t)
alpha_over(log_t, log_end_t, (-7, 0), log_end_t)
# Lit log. For unlit fires, just reference the unlit log texture
if data & 0b100:
alpha_over(log_lit_t, log_side_lit_t, (5, 0), log_side_lit_t)
alpha_over(log_lit_t, log_end_t, (-7, 0), log_end_t)
else:
log_lit_t = log_t
# Log parts. Because fire needs to be in the middle of the logs,
# split the logs into two parts: Those appearing behind the fire
# and those appearing in front of the fire
logs_back_t = Image.new("RGBA", (24, 24), self.bgcolor)
logs_front_t = Image.new("RGBA", (24, 24), self.bgcolor)
# Back logs
alpha_over(logs_back_t, log_lit_t, (-1, 7), log_lit_t)
log_tmp_t = logs_back_t.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(logs_back_t, log_tmp_t, (1, -3), log_tmp_t)
# Front logs
alpha_over(logs_front_t, log_t, (7, 10), log_t)
# Due to the awkward drawing order, take a small part of the back
# logs that need to be drawn on top of the front logs despite
# the front logs being drawn last
ImageDraw.Draw(log_tmp_t).rectangle((0, 0, 18, 24), outline=(0, 0, 0, 0), fill=(0, 0, 0, 0))
alpha_over(logs_front_t, log_tmp_t, (1, -3), log_tmp_t)
log_tmp_t = Image.new("RGBA", (24, 24), self.bgcolor)
alpha_over(log_tmp_t, log_lit_t, (7, 10), log_lit_t)
log_tmp_t = log_tmp_t.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(logs_front_t, log_tmp_t, (1, -3), log_tmp_t)
# Compose final image
img = Image.new("RGBA", (24, 24), self.bgcolor)
alpha_over(img, bottom_t, (0, 12), bottom_t)
alpha_over(img, logs_back_t, (0, 0), logs_back_t)
if data & 0b100:
fire_t = fire_raw_t.copy()
if data & 0b11 in [0, 2]: # North, South
fire_t = fire_t.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, fire_t, (4, 4), fire_t)
alpha_over(img, logs_front_t, (0, 0), logs_front_t)
if data & 0b11 in [0, 2]: # North, South
img = img.transpose(Image.FLIP_LEFT_RIGHT)
return img
# Bell
@material(blockid=11507, data=list(range(16)), solid=True, transparent=True, nospawn=True)
def bell(self, blockid, data):
# Do rotation, mask to not clobber attachment data
data = data & 0b1100 | ((self.rotation + (data & 0b11)) % 4)
# Load textures
bell_raw_t = self.load_image("assets/minecraft/textures/entity/bell/bell_body.png")
bar_raw_t = self.load_image_texture("assets/minecraft/textures/block/dark_oak_planks.png")
post_raw_t = self.load_image_texture("assets/minecraft/textures/block/stone.png")
def create_tile(img_src, coord_crop, coord_paste, rot):
# Takes an image, crops a region, optionally rotates the
# texture, then finally pastes it onto a 16x16 image
img_out = Image.new("RGBA", (16, 16), self.bgcolor)
img_in = img_src.crop(coord_crop)
if rot != 0:
img_in = img_in.rotate(rot, expand=True)
img_out.paste(img_in, coord_paste)
return img_out
# 0 = floor, 1 = ceiling, 2 = single wall, 3 = double wall
bell_type = (data & 0b1100) >> 2
# Should the bar/post texture be flipped? Yes if either:
# - Attached to floor and East or West facing
# - Not attached to floor and North or South facing
flip_part = ((bell_type == 0 and data & 0b11 in [1, 3]) or
(bell_type != 0 and data & 0b11 in [0, 2]))
# Generate bell
# Bell side textures varies based on self.rotation
bell_sides_idx = [(0 - self.rotation) % 4, (3 - self.rotation) % 4]
# Upper sides
bell_coord = [x * 6 for x in bell_sides_idx]
bell_ul_t = create_tile(bell_raw_t, (bell_coord[0], 6, bell_coord[0] + 6, 13), (5, 4), 180)
bell_ur_t = create_tile(bell_raw_t, (bell_coord[1], 6, bell_coord[1] + 6, 13), (5, 4), 180)
bell_ul_t = self.transform_image_side(bell_ul_t)
bell_ur_t = self.transform_image_side(bell_ur_t.transpose(Image.FLIP_LEFT_RIGHT))
bell_ur_t = bell_ur_t.transpose(Image.FLIP_LEFT_RIGHT)
# Lower sides
bell_coord = [x * 8 for x in bell_sides_idx]
bell_ll_t = create_tile(bell_raw_t, (bell_coord[0], 21, bell_coord[0] + 8, 23), (4, 11), 180)
bell_lr_t = create_tile(bell_raw_t, (bell_coord[1], 21, bell_coord[1] + 8, 23), (4, 11), 180)
bell_ll_t = self.transform_image_side(bell_ll_t)
bell_lr_t = self.transform_image_side(bell_lr_t.transpose(Image.FLIP_LEFT_RIGHT))
bell_lr_t = bell_lr_t.transpose(Image.FLIP_LEFT_RIGHT)
# Upper top
top_rot = (180 + self.rotation * 90) % 360
bell_ut_t = create_tile(bell_raw_t, (6, 0, 12, 6), (5, 5), top_rot)
bell_ut_t = self.transform_image_top(bell_ut_t)
# Lower top
bell_lt_t = create_tile(bell_raw_t, (8, 13, 16, 21), (4, 4), top_rot)
bell_lt_t = self.transform_image_top(bell_lt_t)
bell_t = Image.new("RGBA", (24, 24), self.bgcolor)
alpha_over(bell_t, bell_lt_t, (0, 8), bell_lt_t)
alpha_over(bell_t, bell_ll_t, (3, 4), bell_ll_t)
alpha_over(bell_t, bell_lr_t, (9, 4), bell_lr_t)
alpha_over(bell_t, bell_ut_t, (0, 3), bell_ut_t)
alpha_over(bell_t, bell_ul_t, (4, 4), bell_ul_t)
alpha_over(bell_t, bell_ur_t, (8, 4), bell_ur_t)
# Generate bar
if bell_type == 1: # Ceiling
# bar_coord: Left Right Top
bar_coord = [(4, 2, 6, 5), (6, 2, 8, 5), (1, 3, 3, 5)]
bar_tile_pos = [(7, 1), (7, 1), (7, 7)]
bar_over_pos = [(6, 3), (7, 2), (0, 0)]
else: # Floor, single wall, double wall
# Note: For a single wall bell, the position of the bar
# varies based on facing
if bell_type == 2 and data & 0b11 in [2, 3]: # Single wall, North/East facing
bar_x_sw = 3
bar_l_pos_sw = (6, 7)
else:
bar_x_sw = 0
bar_l_pos_sw = (4, 8)
bar_x = [2, None, bar_x_sw, 0][bell_type]
bar_len = [12, None, 13, 16][bell_type]
bar_l_pos = [(6, 7), None, bar_l_pos_sw, (4, 8)][bell_type]
bar_long_coord = (bar_x, 3, bar_x + bar_len, 5)
bar_coord = [(5, 4, 7, 6), bar_long_coord, bar_long_coord]
bar_tile_pos = [(2, 1), (bar_x, 1), (bar_x, 7)]
bar_over_pos = [bar_l_pos, (7, 3), (0, 1)]
bar_l_t = create_tile(bar_raw_t, bar_coord[0], bar_tile_pos[0], 0)
bar_r_t = create_tile(bar_raw_t, bar_coord[1], bar_tile_pos[1], 0)
bar_t_t = create_tile(bar_raw_t, bar_coord[2], bar_tile_pos[2], 0)
bar_l_t = self.transform_image_side(bar_l_t)
bar_r_t = self.transform_image_side(bar_r_t.transpose(Image.FLIP_LEFT_RIGHT))
bar_r_t = bar_r_t.transpose(Image.FLIP_LEFT_RIGHT)
bar_t_t = self.transform_image_top(bar_t_t)
bar_t = Image.new("RGBA", (24, 24), self.bgcolor)
alpha_over(bar_t, bar_t_t, bar_over_pos[2], bar_t_t)
alpha_over(bar_t, bar_l_t, bar_over_pos[0], bar_l_t)
alpha_over(bar_t, bar_r_t, bar_over_pos[1], bar_r_t)
if flip_part:
bar_t = bar_t.transpose(Image.FLIP_LEFT_RIGHT)
# Generate post, only applies to floor attached bell
if bell_type == 0:
post_l_t = create_tile(post_raw_t, (0, 1, 4, 16), (6, 1), 0)
post_r_t = create_tile(post_raw_t, (0, 1, 2, 16), (14, 1), 0)
post_t_t = create_tile(post_raw_t, (0, 0, 2, 4), (14, 6), 0)
post_l_t = self.transform_image_side(post_l_t)
post_r_t = self.transform_image_side(post_r_t.transpose(Image.FLIP_LEFT_RIGHT))
post_r_t = post_r_t.transpose(Image.FLIP_LEFT_RIGHT)
post_t_t = self.transform_image_top(post_t_t)
post_back_t = Image.new("RGBA", (24, 24), self.bgcolor)
post_front_t = Image.new("RGBA", (24, 24), self.bgcolor)
alpha_over(post_back_t, post_t_t, (0, 1), post_t_t)
alpha_over(post_back_t, post_l_t, (10, 0), post_l_t)
alpha_over(post_back_t, post_r_t, (7, 3), post_r_t)
alpha_over(post_back_t, post_r_t, (6, 3), post_r_t) # Fix some holes
alpha_over(post_front_t, post_back_t, (-10, 5), post_back_t)
if flip_part:
post_back_t = post_back_t.transpose(Image.FLIP_LEFT_RIGHT)
post_front_t = post_front_t.transpose(Image.FLIP_LEFT_RIGHT)
img = Image.new("RGBA", (24, 24), self.bgcolor)
if bell_type == 0:
alpha_over(img, post_back_t, (0, 0), post_back_t)
alpha_over(img, bell_t, (0, 0), bell_t)
alpha_over(img, bar_t, (0, 0), bar_t)
if bell_type == 0:
alpha_over(img, post_front_t, (0, 0), post_front_t)
return img
# Ancient Debris
block(blockid=[1000], top_image="assets/minecraft/textures/block/ancient_debris_top.png",
side_image="assets/minecraft/textures/block/ancient_debris_side.png")
# Basalt
@material(blockid=[1001, 1002], data=list(range(3)), solid=True)
def basalt(self, blockid, data):
block_name = "polished_basalt" if blockid == 1002 else "basalt"
top = self.load_image_texture("assets/minecraft/textures/block/" + block_name + "_top.png")
side = self.load_image_texture("assets/minecraft/textures/block/" + block_name + "_side.png")
return self.build_axis_block(top, side, data)
# Blackstone block
block(blockid=[1004], top_image="assets/minecraft/textures/block/blackstone_top.png",
side_image="assets/minecraft/textures/block/blackstone.png")
# Chain
@material(blockid=11419, data=list(range(3)), solid=True, transparent=True, nospawn=True)
def chain(self, blockid, data):
tex = self.load_image_texture("assets/minecraft/textures/block/chain.png")
sidetex = Image.new(tex.mode, tex.size, self.bgcolor)
mask = tex.crop((0, 0, 6, 16))
alpha_over(sidetex, mask, (5, 0), mask)
if data == 0: # y
return self.build_sprite(sidetex)
else:
img = Image.new("RGBA", (24, 24), self.bgcolor)
sidetex = sidetex.rotate(90)
side = self.transform_image_side(sidetex)
otherside = self.transform_image_top(sidetex)
def draw_x():
_side = side.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, _side, (6,3), _side)
alpha_over(img, otherside, (3,3), otherside)
def draw_z():
_otherside = otherside.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, side, (6,3), side)
alpha_over(img, _otherside, (0,6), _otherside)
draw_funcs = [draw_x, draw_z]
if data == 1: # x
draw_funcs[self.rotation % len(draw_funcs)]()
elif data == 2: # z
draw_funcs[(self.rotation + 1) % len(draw_funcs)]()
return img
# Respawn anchor
@material(blockid=1037, data=list(range(5)), solid=True)
def respawn_anchor(self, blockid, data):
top = self.load_image_texture("assets/minecraft/textures/block/respawn_anchor_top_off.png" if data ==
0 else "assets/minecraft/textures/block/respawn_anchor_top.png")
side = self.load_image_texture(
"assets/minecraft/textures/block/respawn_anchor_side%s.png" % (data))
return self.build_block(top, side)
# Netherite
block(blockid=[1005], top_image="assets/minecraft/textures/block/netherite_block.png")
# soul soil
block(blockid=1020, top_image="assets/minecraft/textures/block/soul_soil.png")
# nether gold ore
block(blockid=1021, top_image="assets/minecraft/textures/block/nether_gold_ore.png")
# Solid Nether stone blocks
block(blockid=1022, top_image="assets/minecraft/textures/block/polished_blackstone.png")
block(blockid=1023, top_image="assets/minecraft/textures/block/chiseled_polished_blackstone.png")
block(blockid=1024, top_image="assets/minecraft/textures/block/gilded_blackstone.png")
block(blockid=1025, top_image="assets/minecraft/textures/block/cracked_polished_blackstone_bricks.png")
block(blockid=1026, top_image="assets/minecraft/textures/block/polished_blackstone_bricks.png")
block(blockid=1035, top_image="assets/minecraft/textures/block/crying_obsidian.png")
block(blockid=1036, top_image="assets/minecraft/textures/block/lodestone_top.png", side_image="assets/minecraft/textures/block/lodestone_side.png")
block(blockid=1041, top_image="assets/minecraft/textures/block/quartz_bricks.png")
block(blockid=1042, top_image="assets/minecraft/textures/block/amethyst_block.png")
block(blockid=1043, top_image="assets/minecraft/textures/block/raw_iron_block.png")
block(blockid=1044, top_image="assets/minecraft/textures/block/raw_gold_block.png")
block(blockid=1045, top_image="assets/minecraft/textures/block/budding_amethyst.png")
# You have entered the COPPER ZONE
block(blockid=[1046, 1050], top_image="assets/minecraft/textures/block/copper_block.png")
block(blockid=[1047, 1051], top_image="assets/minecraft/textures/block/exposed_copper.png")
block(blockid=[1048, 1052], top_image="assets/minecraft/textures/block/weathered_copper.png")
block(blockid=[1049, 1053], top_image="assets/minecraft/textures/block/oxidized_copper.png")
# Cut variant
block(blockid=[1054, 1058], top_image="assets/minecraft/textures/block/cut_copper.png")
block(blockid=[1055, 1059], top_image="assets/minecraft/textures/block/exposed_cut_copper.png")
block(blockid=[1056, 1060], top_image="assets/minecraft/textures/block/weathered_cut_copper.png")
block(blockid=[1057, 1061], top_image="assets/minecraft/textures/block/oxidized_cut_copper.png")
block(blockid=1062, top_image="assets/minecraft/textures/block/raw_copper_block.png")
block(blockid=1063, top_image="assets/minecraft/textures/block/copper_ore.png")
# You are now leaving the COPPER ZONE
block(blockid=1080, top_image="assets/minecraft/textures/block/moss_block.png")
block(blockid=1081, top_image="assets/minecraft/textures/block/calcite.png")
block(blockid=1082, top_image="assets/minecraft/textures/block/rooted_dirt.png")
# deepslate
@material(blockid=1083, data=list(range(3)), solid=True)
def deepslate(self, blockid, data):
top=self.load_image_texture("assets/minecraft/textures/block/deepslate_top.png")
side=self.load_image_texture("assets/minecraft/textures/block/deepslate.png")
return self.build_axis_block(top, side, data)
block(blockid=1084, top_image="assets/minecraft/textures/block/cobbled_deepslate.png")
block(blockid=1085, top_image="assets/minecraft/textures/block/polished_deepslate.png")
block(blockid=1086, top_image="assets/minecraft/textures/block/deepslate_coal_ore.png")
block(blockid=1087, top_image="assets/minecraft/textures/block/deepslate_iron_ore.png")
block(blockid=1088, top_image="assets/minecraft/textures/block/deepslate_copper_ore.png")
block(blockid=1089, top_image="assets/minecraft/textures/block/deepslate_gold_ore.png")
block(blockid=1090, top_image="assets/minecraft/textures/block/deepslate_emerald_ore.png")
block(blockid=1091, top_image="assets/minecraft/textures/block/deepslate_lapis_ore.png")
block(blockid=1092, top_image="assets/minecraft/textures/block/deepslate_diamond_ore.png")
block(blockid=1093, top_image="assets/minecraft/textures/block/deepslate_redstone_ore.png")
block(blockid=1094, top_image="assets/minecraft/textures/block/deepslate_bricks.png")
block(blockid=1095, top_image="assets/minecraft/textures/block/cracked_deepslate_bricks.png")
block(blockid=1096, top_image="assets/minecraft/textures/block/deepslate_tiles.png")
block(blockid=1097, top_image="assets/minecraft/textures/block/cracked_deepslate_tiles.png")
block(blockid=1098, top_image="assets/minecraft/textures/block/chiseled_deepslate.png")
block(blockid=1107, top_image="assets/minecraft/textures/block/dripstone_block.png")
block(blockid=1108, top_image="assets/minecraft/textures/block/smooth_basalt.png")
block(blockid=1109, top_image="assets/minecraft/textures/block/tuff.png")
@material(blockid=1110, data=list(range(16)), transparent=True)
def pointed_dripstone(self, blockid, data):
up_down = "down" if data & 0b1000 else "up"
if (data & 4) == 4: # base
tex = self.load_image_texture("assets/minecraft/textures/block/pointed_dripstone_%s_base.png" % (up_down))
elif (data & 3) == 3: # frustum
tex = self.load_image_texture("assets/minecraft/textures/block/pointed_dripstone_%s_frustum.png" % (up_down))
elif (data & 2) == 2: # middle
tex = self.load_image_texture("assets/minecraft/textures/block/pointed_dripstone_%s_middle.png" % (up_down))
elif (data & 1) == 1: # tip_merge
tex = self.load_image_texture("assets/minecraft/textures/block/pointed_dripstone_%s_tip_merge.png" % (up_down))
else: # 0 - tip
tex = self.load_image_texture("assets/minecraft/textures/block/pointed_dripstone_%s_tip.png" % (up_down))
return self.build_sprite(tex)
block(blockid=1111, top_image="assets/minecraft/textures/block/powder_snow.png")
@material(blockid=1112, data=0, transparent=True)
def hangings_roots(self, blockid, data):
tex = self.load_image_texture("assets/minecraft/textures/block/hanging_roots.png")
return self.build_sprite(tex)
@material(blockid=[1113, 1114, 1115], data=list(range(6)), transparent=True)
def amethyst_bud(self, blockid, data):
if blockid == 1113:
tex = self.load_image_texture("assets/minecraft/textures/block/small_amethyst_bud.png")
elif blockid == 1114:
tex = self.load_image_texture("assets/minecraft/textures/block/medium_amethyst_bud.png")
elif blockid == 1115:
tex = self.load_image_texture("assets/minecraft/textures/block/large_amethyst_bud.png")
def draw_north():
rotated = tex.rotate(90)
side = self.transform_image_side(rotated)
otherside = self.transform_image_top(rotated)
otherside = otherside.transpose(Image.FLIP_TOP_BOTTOM)
alpha_over(img, side, (6, 3), side)
alpha_over(img, otherside, (0, 6), otherside)
def draw_south():
rotated = tex.rotate(-90)
side = self.transform_image_side(rotated)
otherside = self.transform_image_top(rotated)
otherside = otherside.transpose(Image.FLIP_TOP_BOTTOM)
alpha_over(img, side, (6, 3), side)
alpha_over(img, otherside, (0, 6), otherside)
def draw_west():
rotated = tex.rotate(-90)
side = self.transform_image_side(rotated)
side = side.transpose(Image.FLIP_LEFT_RIGHT)
otherside = self.transform_image_top(rotated)
otherside = otherside.transpose(Image.FLIP_LEFT_RIGHT)
otherside = otherside.transpose(Image.FLIP_TOP_BOTTOM)
alpha_over(img, side, (6, 3), side)
alpha_over(img, otherside, (0, 6), otherside)
def draw_east():
rotated = tex.rotate(90)
side = self.transform_image_side(rotated)
side = side.transpose(Image.FLIP_LEFT_RIGHT)
otherside = self.transform_image_top(rotated)
otherside = otherside.transpose(Image.FLIP_LEFT_RIGHT)
otherside = otherside.transpose(Image.FLIP_TOP_BOTTOM)
alpha_over(img, side, (6, 3), side)
alpha_over(img, otherside, (0, 6), otherside)
draw_funcs = [draw_east, draw_south, draw_west, draw_north]
if data == 0: # down
tex = tex.transpose(Image.FLIP_TOP_BOTTOM)
return self.build_sprite(tex)
elif data == 1: # up
return self.build_sprite(tex)
elif data == 5: # north
img = Image.new("RGBA", (24, 24), self.bgcolor)
draw_funcs[(self.rotation + 3) % len(draw_funcs)]()
return img
elif data == 3: # south
img = Image.new("RGBA", (24, 24), self.bgcolor)
draw_funcs[(self.rotation + 1) % len(draw_funcs)]()
return img
elif data == 4: # west
img = Image.new("RGBA", (24,24), self.bgcolor)
draw_funcs[(self.rotation + 2) % len(draw_funcs)]()
return img
elif data == 2: # east
img = Image.new("RGBA", (24, 24), self.bgcolor)
draw_funcs[(self.rotation + 0) % len(draw_funcs)]()
return img
return self.build_sprite(tex)
@material(blockid=[1116, 1117], data=list(range(2)), transparent=True)
def cave_vines(self, blockid, data):
if blockid == 1116:
if data == 1:
tex = self.load_image_texture("assets/minecraft/textures/block/cave_vines_plant_lit.png")
else:
tex = self.load_image_texture("assets/minecraft/textures/block/cave_vines_plant.png")
elif blockid == 1117:
if data == 1:
tex = self.load_image_texture("assets/minecraft/textures/block/cave_vines_lit.png")
else:
tex = self.load_image_texture("assets/minecraft/textures/block/cave_vines.png")
return self.build_sprite(tex)
@material(blockid=1118, data=list(range(6)), transparent=True, solid=True)
def lightning_rod(self, blockid, data):
tex = self.load_image_texture("assets/minecraft/textures/block/lightning_rod.png")
img = Image.new("RGBA", (24, 24), self.bgcolor)
mask = tex.crop((0, 4, 2, 16))
sidetex = Image.new(tex.mode, tex.size, self.bgcolor)
alpha_over(sidetex, mask, (14, 4), mask)
mask = tex.crop((0, 0, 4, 4))
toptex = Image.new(tex.mode, tex.size, self.bgcolor)
alpha_over(toptex, mask, (12, 0), mask)
mask = tex.crop((0, 4, 2, 6))
side_toptex = Image.new(tex.mode, tex.size, self.bgcolor)
alpha_over(side_toptex, mask, (12, 0), mask)
def draw_east():
toptex_rotated = toptex.rotate(90)
top_side = self.transform_image_side(toptex_rotated)
top_side = top_side.transpose(Image.FLIP_LEFT_RIGHT)
top_otherside = self.transform_image_top(toptex)
top_otherside = top_otherside.transpose(Image.FLIP_LEFT_RIGHT)
top_top = self.transform_image_side(toptex)
# top
alpha_over(img, top_otherside, (6, 6), top_otherside)
# side
alpha_over(img, top_side, (8, 7), top_side)
alpha_over(img, top_top, (6, 2), top_top)
roated_side = sidetex.rotate(90)
side = self.transform_image_side(roated_side)
side = side.transpose(Image.FLIP_TOP_BOTTOM)
otherside = self.transform_image_top(sidetex)
otherside = otherside.transpose(Image.FLIP_TOP_BOTTOM)
side_top = self.transform_image_side(side_toptex)
alpha_over(img, otherside, (-7, 4), otherside)
alpha_over(img, side, (5, -1), side)
alpha_over(img, side_top, (-2, 9), side_top)
def draw_south():
roated_side = sidetex.rotate(90)
side = self.transform_image_side(roated_side)
otherside = self.transform_image_top(sidetex)
alpha_over(img, side, (3, 6), side)
alpha_over(img, otherside, (-8, 6), otherside)
toptex_rotated = toptex.rotate(90)
top_side = self.transform_image_side(toptex_rotated)
top_otherside = self.transform_image_top(toptex)
top_top = self.transform_image_side(toptex)
top_top = top_top.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, top_side, (15, 12), top_side)
alpha_over(img, top_otherside, (5, 10), top_otherside)
alpha_over(img, top_top, (17, 7), top_top)
def draw_west():
roated_side = sidetex.rotate(90)
side = self.transform_image_side(roated_side)
side = side.transpose(Image.FLIP_LEFT_RIGHT)
otherside = self.transform_image_top(sidetex)
otherside = otherside.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, side, (10, 6), side)
alpha_over(img, otherside, (8, 6), otherside)
toptex_rotated = toptex.rotate(90)
top_side = self.transform_image_side(toptex_rotated)
top_side = top_side.transpose(Image.FLIP_LEFT_RIGHT)
top_otherside = self.transform_image_top(toptex)
top_otherside = top_otherside.transpose(Image.FLIP_LEFT_RIGHT)
top_top = self.transform_image_side(toptex)
# top
alpha_over(img, top_otherside, (-3, 10), top_otherside)
# side
alpha_over(img, top_side, (0, 11), top_side)
alpha_over(img, top_top, (-3, 7), top_top)
def draw_north():
roated_side = sidetex.rotate(90)
side = self.transform_image_side(roated_side)
otherside = self.transform_image_top(sidetex)
alpha_over(img, side, (4, 7), side)
alpha_over(img, otherside, (-6, 7), otherside)
toptex_rotated = toptex.rotate(90)
top_side = self.transform_image_side(toptex_rotated)
top_otherside = self.transform_image_top(toptex)
top_top = self.transform_image_side(toptex)
top_top = top_top.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, top_otherside, (-4, 6), top_otherside)
alpha_over(img, top_side, (5, 7), top_side)
alpha_over(img, top_top, (8, 3), top_top)
draw_funcs = [draw_east, draw_south, draw_west, draw_north]
if data == 1: # up
side = self.transform_image_side(sidetex)
otherside = side.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, side, (0, 6 - 4), side)
alpha_over(img, otherside, (12, 6 - 4), otherside)
top_top = self.transform_image_top(toptex)
top_side = self.transform_image_side(toptex)
top_otherside = top_side.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, top_side, (0, 6 - 4), top_side)
alpha_over(img, top_otherside, (12, 6 - 4), top_otherside)
alpha_over(img, top_top, (0, 5), top_top)
elif data == 0: # down
toptex_flipped = toptex.transpose(Image.FLIP_TOP_BOTTOM)
top_top = self.transform_image_top(toptex)
top_side = self.transform_image_side(toptex_flipped)
top_otherside = top_side.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, top_side, (0, 6 - 4), top_side)
alpha_over(img, top_otherside, (12, 6 - 4), top_otherside)
alpha_over(img, top_top, (0, 14), top_top)
flipped = sidetex.transpose(Image.FLIP_TOP_BOTTOM)
side_top = self.transform_image_top(side_toptex)
side = self.transform_image_side(flipped)
otherside = side.transpose(Image.FLIP_LEFT_RIGHT)
alpha_over(img, side, (0, 6 - 4), side)
alpha_over(img, otherside, (12, 6 - 4), otherside)
alpha_over(img, side_top, (2, 6), side_top)
elif data == 3: # south
draw_funcs[(self.rotation + 1) % len(draw_funcs)]()
elif data == 4: # west
draw_funcs[(self.rotation + 2) % len(draw_funcs)]()
elif data == 2: # east
draw_funcs[(self.rotation + 0) % len(draw_funcs)]()
elif data == 5: # north
draw_funcs[(self.rotation + 3) % len(draw_funcs)]()
return img
@material(blockid=1119, data=list(range(1 << 6)), transparent=True)
def glow_lichen(self, blockid, data):
tex = self.load_image_texture("assets/minecraft/textures/block/glow_lichen.png")
bottom = tex if data & 1 << 0 else None
top = tex if data & 1 << 1 else None
east = tex if data & 1 << 2 else None
south = tex if data & 1 << 3 else None
west = tex if data & 1 << 4 else None
north = tex if data & 1 << 5 else None
if self.rotation == 0:
return self.build_full_block(top, north, east, west, south, bottom)
elif self.rotation == 1:
return self.build_full_block(top, west, north, south, east, bottom)
elif self.rotation == 2:
return self.build_full_block(top, south, west, east, north, bottom)
else: # self.rotation == 3:
return self.build_full_block(top, east, south, north, west, bottom)
@material(blockid=1120, data=list(range(1)), transparent=True)
def spore_blossom(self, blockid, data):
leaf = self.load_image_texture("assets/minecraft/textures/block/spore_blossom.png")
base = self.load_image_texture("assets/minecraft/textures/block/spore_blossom_base.png")
img = Image.new("RGBA", (24, 24), self.bgcolor)
side_leaf = self.transform_image_top(leaf)
alpha_over(img, side_leaf, (-6, -5), side_leaf)
roated_leaf = leaf.rotate(90)
side_leaf = self.transform_image_top(roated_leaf)
alpha_over(img, side_leaf, (-7, 4), side_leaf)
roated_leaf = roated_leaf.rotate(90)
side_leaf = self.transform_image_top(roated_leaf)
alpha_over(img, side_leaf, (5, 4), side_leaf)
roated_leaf = roated_leaf.rotate(90)
side_leaf = self.transform_image_top(roated_leaf)
alpha_over(img, side_leaf, (5, -5), side_leaf)
base_top = self.transform_image_top(base)
alpha_over(img, base_top, (0, 0), base_top)
return img
|
overviewer/Minecraft-Overviewer
|
overviewer_core/textures.py
|
Python
|
gpl-3.0
| 282,304
|
[
"BLAST",
"CRYSTAL"
] |
59b726d7e0d802f50bee8286a96f691cf117a7f17d4ed073384db3c5a3b8fb87
|
from neuron import h,gui
from CPG_Network import ReflexNetwork
#h.load_file('stdgui.hoc')
rc = ReflexNetwork()
shape_window = h.PlotShape()
shape_window.exec_menu('Show Diam')
|
penguinscontrol/Spinal-Cord-Modeling
|
CPG/run_python.py
|
Python
|
gpl-2.0
| 176
|
[
"NEURON"
] |
74aeb04c323d2c8f42df506544a3db439e458f053b3f1b5d63624ac6ac45a3b9
|
###
### This script can be run with pvpython rather than pvbatch, as it does not
### need mpi.
###
### Purpose:
###
### Generate a static image dataset of volume rendering on the ne cooling data
###
### Example usages (assumes you are in directory with this script):
###
### 1) To run on the coarse mesh with tent-shaped opacity functions
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vel.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse" --inputpattern "101results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse/Output/vel/tent" --optype "tent"
###
### 2) To run on the coarse mesh with linear opacity functions
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vel.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse" --inputpattern "101results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/coarse/Output/vel/linear" --optype "linear"
###
### 3) To run on the fine mesh with tent-shaped opacity functions
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vel.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/fine" --inputpattern "fine_results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/fine/Output/vel/tent" --optype "tent"
###
### 4) To run on the fine mesh with linear opacity functions
###
### /home/scott/projects/ParaView/build/bin/pvpython volume-vel.py --inputdir "/media/scott/CINEMA FAT/ne-water-cool/fine" --inputpattern "fine_results_%d.vtk" --outputdir "/media/scott/CINEMA FAT/ne-water-cool/fine/Output/vel/linear" --optype "linear"
###
import sys, os, argparse
from paraview.simple import *
from paraview import data_exploration as wx
#import matplotlib.pyplot as plt
###############################################################################
# Helper function to generate the tent functions needed for scalar opacity
# function
###############################################################################
def createHatFunctions():
baseWidth = 0.20
spacing = baseWidth / 2.0
halfWidth = baseWidth / 2.0
numberCenters = 1.0 / baseWidth
centers = [ (baseWidth / 2.0) + (i * baseWidth) for i in range(int(numberCenters)) ]
hatFunctions = []
for c in centers:
startPoint = c - halfWidth
xPoints = [ 0.0, startPoint, startPoint + spacing, startPoint + (2 * spacing), 1.0 ]
yPoints = [ 0.0, 0.0, 1.0, 0.0, 0.0 ]
hatFunctions.append([xPoints, yPoints])
#plt.plot(xPoints, yPoints, marker='o')
#plt.show()
return hatFunctions
###############################################################################
# This method does all the processing
###############################################################################
def doProcessing(inputDir, inputPattern, outputDir, opacityFnType):
# -----------------------------------------------------------------------------
# Path to input/output data/directories
# -----------------------------------------------------------------------------
files_pattern = os.path.join(inputDir, inputPattern)
file_times = range(0, 101)
#file_times = [ 80 ]
filenames = [ (files_pattern % time) for time in file_times]
# -----------------------------------------------------------------------------
# Rendering configuration
# -----------------------------------------------------------------------------
resolution = 500
view_size = [resolution, resolution]
angle_steps = [15, 15]
#angle_steps = [90, 90]
distance = 24632.991324377483
rotation_axis = [0.0, 1.0, 0.0]
#center_of_rotation = [-1649.1046142578125, -752.328125, 1374.1217346191406]
center_of_rotation = [0.0, 0.0, 0.0]
view = GetRenderView()
view.ViewSize = view_size
view.Background = [0.0, 0.0, 0.0]
view.OrientationAxesVisibility = 0
view.CenterAxesVisibility = 0
# -----------------------------------------------------------------------------
# Output configuration
# -----------------------------------------------------------------------------
fng = wx.FileNameGenerator(outputDir, '{time}/{volumeIdx}/{theta}_{phi}.jpg')
exporter = wx.ThreeSixtyImageStackExporter(fng,
view,
center_of_rotation,
distance,
rotation_axis,
angle_steps)
# -----------------------------------------------------------------------------
# Pipeline configuration
# -----------------------------------------------------------------------------
# create a new 'Legacy VTK Reader'
readerProxy = LegacyVTKReader(FileNames=filenames)
# This translation transform is a workaround for a bug in the camera orbiting
# calculations made in ThreeSixtyImageStackExporter
transform1 = Transform(Input=readerProxy)
transform1.Transform = 'Transform'
transform1.Transform.Translate = [1649.1046142578125, 752.328125, -1374.1217346191406]
# create a new 'Cell Data to Point Data'
cellDatatoPointData1 = CellDatatoPointData(Input=transform1)
# get color transfer function/color map for 'vel'
velLUT = GetColorTransferFunction('vel')
velLUT.RGBPoints = [0.0, 0.0, 0.0, 1.0, 15000.0, 1.0, 0.0, 0.0]
velLUT.LockScalarRange = 1
velLUT.ColorSpace = 'HSV'
velLUT.NanColor = [0.498039, 0.498039, 0.498039]
velLUT.ScalarRangeInitialized = 1.0
# get opacity transfer function/opacity map for 'vel'
velPWF = GetOpacityTransferFunction('vel')
velPWF.Points = [0.0, 0.0, 0.5, 0.0, 15000.0, 1.0, 0.5, 0.0]
velPWF.ScalarRangeInitialized = 1
# show data from fine_results_
readerDisplay = Show(transform1)
readerDisplay.ColorArrayName = [None, '']
readerDisplay.Opacity = 0.15
readerDisplay.ScalarOpacityUnitDistance = 79.03822718592288
# show data from cellDatatoPointData1
cellDatatoPointData1Display = Show(cellDatatoPointData1)
cellDatatoPointData1Display.Representation = 'Volume'
cellDatatoPointData1Display.ColorArrayName = ['POINTS', 'vel']
cellDatatoPointData1Display.LookupTable = velLUT
cellDatatoPointData1Display.ScalarOpacityFunction = velPWF
cellDatatoPointData1Display.ScalarOpacityUnitDistance = 79.03822718592288
# -----------------------------------------------------------------------------
# Batch processing
# -----------------------------------------------------------------------------
if opacityFnType == 'tent':
hatFunctions = createHatFunctions()
Render()
for t in range(0, len(file_times), 1):
time = file_times[t]
GetAnimationScene().TimeKeeper.Time = float(time)
UpdatePipeline(time)
dataRange = [0.0, 15000.0]
print "Moving to timestep ",time,", new data range: ",dataRange
for volumeIdx in range(5):
curRange = dataRange[1] - dataRange[0]
pwfPoints = []
if opacityFnType == 'tent':
xPoints = hatFunctions[volumeIdx][0]
yPoints = hatFunctions[volumeIdx][1]
for i in range(len(xPoints)):
pwfPoints.append(dataRange[0] + (xPoints[i] * curRange))
pwfPoints.append(yPoints[i])
pwfPoints.append(0.5)
pwfPoints.append(0.0)
else:
curStep = dataRange[0] + (float(volumeIdx) * (curRange / 5.0))
pwfPoints = [ dataRange[0], 0.0, 0.5, 0.0,
curStep, 0.0, 0.5, 0.0,
dataRange[1], 1.0, 0.5, 0.0 ]
newPwf = CreatePiecewiseFunction( Points=pwfPoints )
cellDatatoPointData1Display.ScalarOpacityFunction = newPwf
fng.update_active_arguments(volumeIdx=volumeIdx)
fng.update_label_arguments(volumeIdx="Idx")
exporter.UpdatePipeline(time)
###############################################################################
# Main script entry point
###############################################################################
if __name__ == "__main__":
description = "Python script to generate volume rendered NE cooling data"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--inputdir", type=str, default="", help="Path to directory where input data files exist")
parser.add_argument("--inputpattern", type=str, default="", help="String pattern containing %d where pattern should be replaced with numbers")
parser.add_argument("--outputdir", type=str, default="", help="Path to directory where cinema dataset should be written")
parser.add_argument("--optype", type=str, default="", help="Opacity function type, should be either 'tent' or 'linear'")
args = parser.parse_args()
doProcessing(args.inputdir, args.inputpattern, args.outputdir, args.optype)
|
Kitware/cinema
|
scripts/data_generation/ne-cooling/volume-vel.py
|
Python
|
bsd-3-clause
| 9,113
|
[
"ParaView",
"VTK"
] |
772244f566ff0b693391583b9b3fd7810ed1acce3d3b74aba392bac12fd13618
|
#!/usr/bin/env python
# Copyright (C) 2012 Tianyang Li
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
"""
remove reads that were aligned >= 2 times and contigs associated with them
"""
import sys
import getopt
import pysam
from Bio import SeqIO
def main(args):
contigs, sam, fout_prefix, reads, fmt = None, None, None, None, None
try:
opts, args = getopt.getopt(args, 'c:s:o:r:f:')
except getopt.GetoptError as err:
print >> sys.stderr, str(err)
sys.exit(1)
for opt, arg in opts:
if opt == '-c':
contigs = arg
if opt == '-s':
sam = arg
if opt == '-o':
fout_prefix = arg
if opt == '-r':
reads = arg
if opt == '-f':
fmt = arg
if contigs == None or sam == None or fout_prefix == None or reads == None:
print >> sys.stderr, "missing options"
sys.exit(1)
read_aln_count = {}
for rec in SeqIO.parse(reads, fmt):
read_aln_count[rec.id] = []
# TODO: use pysam
dup_contig = set([])
with open(fout_prefix + "-good-reads-list", 'w') as good_reads:
for read in read_aln_count.keys():
contig_list = read_aln_count[read]
if len(contig_list) != 1:
more_flg = False
if len(contig_list) >= 2:
print "#######################"
more_flg = True
for bad_contig in contig_list:
dup_contig.add(bad_contig)
if more_flg:
print bad_contig
else:
good_reads.write("%s\n" % read)
contig_name = set([])
for rec in SeqIO.parse(contigs, 'fasta'):
contig_name.add(rec.id)
good_contigs = contig_name - dup_contig
with open(fout_prefix + "-good-contigs-list", 'w') as gc_fout:
for good_contig in good_contigs:
gc_fout.write("%s\n" % good_contig)
if __name__ == '__main__':
main(sys.argv[1:])
|
tianyang-li/meta-transcriptome
|
trans-len/strip_dup_al_reads.py
|
Python
|
gpl-3.0
| 2,598
|
[
"pysam"
] |
fc244d004b46832d2b80d13bfff05a1a448217fb60bb3fadac43d9621446fc53
|
BISMARK_METH_CODE_TYPE_MAP = {
"H": "CHH",
"h": "CHH",
"X": "CHG",
"x": "CHG",
"Z": "CpG",
"z": "CpG",
}
def meth_call_for_read(read, overlap=True, min_qual=20):
''' Do methyltaion calling per read.
Parameters
----------
read : pysam.AlignedSegment
The read from fetch()
overlap : bool
If it is True, only count the sites in read 2 for overlapped region.
min_qual : int
The minium quality score to do methyltaion calling.
yields
-------
int
reference ID
int
position
str
strand
str
methylation code
Notes
-----
* Because of technical problem, call read 2 sites for the overlapped sites
instead of read 1 sites.
* If the overlapped read 2 sites do not pass min_qual, it won't call read 1
sites instead.
'''
if read.is_paired:
if read.flag in (99, 147):
strand = '+'
else:
strand = '-'
else:
if read.is_reverse:
strand = '-'
else:
strand = '+'
positions = read.get_reference_positions()
is_left = read.reference_start < read.next_reference_start
meth_codes = read.get_tag('XM')
quals = read.query_qualities
for pos, meth_code, qual in zip(positions, meth_codes, quals):
if meth_code in BISMARK_METH_CODE_TYPE_MAP and qual>=min_qual:
if read.is_paired and overlap and pos>=read.next_reference_start and is_left:
continue
yield read.reference_id, pos, strand, meth_code
def meth_call_by_region(bam_filename, chrom=None, start=None, end=None):
''' Methylation call for a given region.
Parameters
----------
bam_filename : str
The alignment BAM filename.
chrom : str, optional
The chromsome name of the region.
start : int, optional
The start position of the region.
end : int, optional
The end position of the region.
Returns
-------
pandas.DataFrame
columns is ['chrom', 'pos', 'strand', 'meth_code', 'meth_count', 'total_count'].
'''
import pandas as pd
import pysam
import numpy as np
print 'Working on {}:{}-{}'.format(chrom, start, end)
with pysam.AlignmentFile(bam_filename) as samfile:
tid_chrom_d = {i: d['SN'] for i, d in enumerate(samfile.header['SQ'])}
# Values are tuples of meth_count and totoal counts.
coor_meth_calls_d = {}
for read in samfile.fetch(chrom, start, end):
for reference_id, pos, strand, meth_code in meth_call_for_read(read):
meth_calls = coor_meth_calls_d.setdefault(
(reference_id, pos, strand, meth_code.upper()),
[0, 0]
)
if meth_code.isupper():
meth_calls[0] += 1
meth_calls[1] += 1
result_df = pd.DataFrame()
if coor_meth_calls_d:
result_df = pd.DataFrame(
coor_meth_calls_d.values(),
columns=['meth_count', 'total_count'],
index=pd.MultiIndex.from_tuples(
coor_meth_calls_d.keys(),
names=['chrom', 'pos', 'strand', 'meth_code']
),
dtype=np.uint32,
)
result_df.reset_index(inplace=True)
result_df['chrom'] = result_df['chrom'].astype(np.uint32)
result_df.sort_values(['chrom', 'pos', 'strand'], inplace=True)
if start!=None:
idx_start = result_df['pos'].searchsorted(start, 'left')[0]
result_df = result_df[idx_start:]
if end!=None:
idx_end = result_df['pos'].searchsorted(end, 'right')[0]
result_df = result_df[:idx_end]
result_df['chrom'] = result_df['chrom'].replace(tid_chrom_d)
return result_df
def write_meth_data_by_regions(bam_filename, out_dir, regions, rand_str=''):
''' Write the region methylation calling DataFrame into a file.
Parameters
----------
bam_filename : str
The alignment BAM filename.
out_dir : str
The ouput directory.
regions : List of tuples
A list of (chromosome, start, end).
rand_str : str, optional
Add the rand_str in the prefix to tempfiles.
Notes
-----
* The output file is a temp file, and you have to delete it manually.
'''
import pandas as pd
import tempfile
import pysam
result_df = pd.DataFrame()
for chrom, start, end in regions:
result_df = result_df.append(meth_call_by_region(bam_filename, chrom, start, end))
if not result_df.empty:
for meth_code in result_df['meth_code'].unique():
meth_type = BISMARK_METH_CODE_TYPE_MAP[meth_code]
tmp_df = result_df[result_df['meth_code']==meth_code]
tmp_df = tmp_df[['chrom', 'pos', 'strand', 'meth_count', 'total_count']]
tmp_df = tmp_df.reset_index(drop=True)
# Mirror-seq can only detect CpGs so do not convert non-CpGs.
if meth_code=='Z':
mirror_seq_conversion(tmp_df)
prefix = 'tmp_{0}_'.format(rand_str)
suffix = '_{0}'.format(meth_type)
f = tempfile.NamedTemporaryFile(dir=out_dir, prefix=prefix, suffix=suffix, delete=False)
tmp_df.to_csv(f.name, compression='gzip', index=False)
def get_regions_chunks(bam_filename, nts_in_regions=100000000):
''' Iterate regions lists to roughly fit "nts_in_regions".
Parameters
----------
bam_filename : str
The alignment BAM filename.
nts_in_regions : int, optional
Number of total nucleotides in an iter of regions. It is an rough number
so it is possible to get more than the number.
Yields
------
List
List of tuples of chromosome, start, and end.
'''
import pysam
with pysam.AlignmentFile(bam_filename) as samfile:
chrom_sizes = [(d['SN'], d['LN']) for d in samfile.header['SQ']]
chrom_sizes = sorted(chrom_sizes, key=lambda x: x[1], reverse=True)
regions = []
nts = 0
for chrom, size in chrom_sizes:
chunk_num = size / nts_in_regions
if size % nts_in_regions != 0:
chunk_num += 1
start = 0
for i in range(chunk_num):
end = min(start + nts_in_regions, size)
region_size = end - start
nts += region_size
regions.append((chrom, start, end))
if nts>nts_in_regions:
yield regions
nts = 0
regions = []
start = end + 1
if regions:
yield regions
def parse_to_bed(data_filename, bed_filename, chunksize=1000000):
''' Parse the standard output format to BED format.
Parameters
----------
data_filename : str
The data filename.
bed_filename : str
The output BED filename.
chunksize : int, optional
The chunk size when reading files.
'''
import pandas as pd
import numpy as np
import subprocess
import os
with open(bed_filename, 'w') as fw:
for df in pd.read_csv(data_filename, chunksize=chunksize):
df['end'] = df['pos'] + 1
df['thick_start'] = 0
df['thick_end'] = 0
df['meth_ratio'] = df['meth_count'] / df['total_count']
df['score'] = (df['meth_ratio'] * 1000).round().astype(np.uint32)
df['name'] = df.apply(
lambda r: '{0}/{1}({2:.0%})'.format(
r['meth_count'],
r['total_count'],
r['meth_ratio']
),
axis=1,
)
df['rgb'] = df.apply(
lambda r: '255,{0:.0f},0'.format(255*r['meth_ratio']),
axis=1,
)
colnames = [
'chrom',
'pos',
'end',
'name',
'score',
'strand',
'thick_start',
'thick_end',
'rgb'
]
df[colnames].to_csv(fw, sep='\t', index=False, header=False)
temp_folder = os.path.dirname(bed_filename)
subprocess.check_call((
"sort",
"-T", temp_folder,
"-k1,1",
"-k2,2n",
bed_filename,
"-o", bed_filename,
))
subprocess.check_call(('gzip', '-f', bed_filename))
def mirror_seq_conversion(df):
''' Convert methylation ratios and strands.
Parameters
----------
df : pandas.DataFrame
with columns - strand, pos, meth_count, and total_count.
'''
import pandas as pd
pos_series = pd.concat((
df[df['strand']=='+']['pos'] + 1,
df[df['strand']=='-']['pos'] - 1,
))
df['pos'] = pos_series
df['strand'] = df['strand'].replace({'+': '--', '-': '++'}).replace({'--': '-', '++': '+'})
df['meth_count'] = df['total_count'] - df['meth_count']
def merge_n_parse(out_prefix, meth_type, filenames, create_bed_file):
''' The is a shortcut function, which is easier to be used by multiprocessing.
Parameters
----------
out_prefix : str
The output prefix.
meth_type : str
The methylation type. Eg: CpG, CHG, and CHH.
filenames : str
The csv filenames to be mreged.
create_bed_file : bool
Create a bed file or not.
'''
import pandas as pd
import os
import subprocess
full_filename = '{0}_{1}.csv'.format(out_prefix, meth_type)
header = True
for filename in filenames:
pd.read_csv(filename, compression='gzip').to_csv(full_filename, header=header, mode='a', index=False)
header = False
subprocess.check_call(('gzip', '-f', full_filename))
full_filename += '.gz'
if meth_type=='CpG' and create_bed_file:
bed_filename = full_filename.replace('.csv.gz', '.bed')
parse_to_bed(full_filename, bed_filename)
def get_bs_conv_rate(filenames):
'''Calculate the bisulfite conversion rate using CHH and CHG methylation tracks.
Parameters
----------
filenames : List of str
the filenames of non-CpGs.
Returns
-------
float
The estimated bisulfite conversion rate.
NOTES
-----
1. Get conversion rate for each non CpGs and average them.
2. The esitmated bisulfite conversion rate is rounded to 2 decimal.
3. Return None if no sites in all files.
'''
import pandas as pd
import os
meth_ratio_sum = 0
count = 0
for filename in filenames:
if os.path.exists(filename):
for df in pd.read_csv(filename, compression='gzip', usecols=['meth_count', 'total_count'], chunksize=1000000):
meth_ratio_sum += (df['meth_count'] / df['total_count']).sum()
count += len(df)
try:
bs_conv_rate = 1 - round(meth_ratio_sum / count, 2)
except ZeroDivisionError:
bs_conv_rate = None
return bs_conv_rate
def main(bam_filename, out_prefix, create_bed_file, nts_in_regions=100000000):
''' Run the entire methylation calling.
Parameters
----------
bam_filename : str
The alignment bam filename. The index file (.bai) must exist in the same folder.
out_prefix : str
The output file prefix. The output file is <out_prefix>_<METH_TYPE>.h5.
create_bed_file : bool
Create a bed file or not.
nts_in_regions : int, optional
Number of total nucleotides in an iter of regions. It is an rough number
so it is possible to get more than the number.
'''
from multiprocessing import Pool
import multiprocessing
import subprocess
import pysam
import os, string, random
import pandas as pd
print('Wokring on hydroxymethylation calling...')
out_dir = os.path.dirname(out_prefix)
rand_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8))
p = Pool()
for regions in get_regions_chunks(bam_filename, nts_in_regions):
p.apply_async(
write_meth_data_by_regions,
(bam_filename, out_dir, regions, rand_str),
)
p.close()
p.join()
prefix = 'tmp_{0}_'.format(rand_str)
meth_type_filenames_dict = {}
for filename in os.listdir(os.path.join('.', out_dir)):
if not filename.startswith(prefix):
continue
meth_type = filename.rsplit('_', 1)[-1]
filenames = meth_type_filenames_dict.setdefault(meth_type, [])
filenames.append(os.path.join(out_dir, filename))
print('Merge files...')
p = Pool()
for meth_type, filenames in meth_type_filenames_dict.iteritems():
p.apply_async(
merge_n_parse,
(out_prefix, meth_type, filenames, True),
)
p.close()
p.join()
cpg_filename = '{}_CpG.csv.gz'.format(out_prefix)
chg_filename = '{}_CHG.csv.gz'.format(out_prefix)
chh_filename = '{}_CHH.csv.gz'.format(out_prefix)
# Calculate bisulfite conversion rate.
conversion_rate = get_bs_conv_rate([
chg_filename,
chh_filename,
])
if conversion_rate is not None:
print('Bisuflite conversion rate: {:.0%}'.format(conversion_rate))
else:
print('Cannot estimate bisuflite conversion rate.')
# Remove tmp files after everthing is done.
for filenames in meth_type_filenames_dict.itervalues():
for filename in filenames:
os.remove(filename)
try:
os.remove(chg_filename)
except OSError:
pass
try:
os.remove(chh_filename)
except OSError:
pass
print('Done!')
|
Zymo-Research/mirror-seq
|
mirror_seq/hmc_calling.py
|
Python
|
apache-2.0
| 13,672
|
[
"pysam"
] |
57b3e755a5b7b6c91c09dbe83e610a174b411cc91a5f01e26c5e315c0e21ffb5
|
import math
import random
from test import redis, flushdb
NUM_VALUES = 100000
def cdf(x, values):
n1 = 0
n2 = 0
for v in values:
n1 += 1 if v < x else 0
n2 += 1 if v <= x else 0
return (n1 + n2) / 2.0 / len(values)
def run_test_for_dist(redis, distfn):
key = distfn.__name__
keydest = key + ':dest'
key0 = key + ':0'
key1 = key + ':1'
testkeys = [key, keydest, key0]
redis.tdigest_new(key)
redis.tdigest_new(key0)
redis.tdigest_new(key1)
quantiles = [0.001, 0.01, 0.1, 0.5, 0.9, 0.99, 0.999]
values = []
for i in xrange(NUM_VALUES):
v = distfn()
redis.tdigest_add(key, v, 1)
if 0 == i % 2:
redis.tdigest_add(key0, v, 1)
else:
redis.tdigest_add(key1, v, 1)
values.append(v)
redis.tdigest_merge(keydest, key0, key1)
redis.tdigest_merge(key0, key1)
values = sorted(values)
for k in testkeys:
soft_errs = 0
redis.tdigest_meta(k)
for i, q in enumerate(quantiles):
ix = NUM_VALUES * quantiles[i] - 0.5;
idx = int(math.floor(ix))
p = ix - idx;
x = values[idx] * (1 - p) + values[idx + 1] * p;
estimate_x = float(redis.tdigest_quantile(k, q)[0])
estimate_q = float(redis.tdigest_cdf(k, x)[0])
assert abs(q - estimate_q) < 0.005
if abs(cdf(estimate_x, values) - q) > 0.005:
soft_errs += 1
assert soft_errs < 3
def test_uniform(redis, flushdb):
def uniform():
return random.uniform(-1, 1)
run_test_for_dist(redis, uniform)
def test_gaussian(redis, flushdb):
def gaussian():
return random.gauss(0, 1)
run_test_for_dist(redis, gaussian)
def test_beta(redis, flushdb):
def beta():
return random.betavariate(2, 2)
run_test_for_dist(redis, beta)
def test_meta(redis, flushdb):
redis.tdigest_new('test_meta0')
redis.tdigest_new('test_meta1')
redis.tdigest_new('test_meta2', compression=100)
m0 = redis.tdigest_meta('test_meta0')
m1 = redis.tdigest_meta('test_meta1')
m2 = redis.tdigest_meta('test_meta2')
assert m0[0] == m1[0]
assert m2[0] == 100
assert m0[1] == m1[1] == m2[1] == 0
assert m0[2] == m1[2] == m2[2]
for i in xrange(100):
redis.tdigest_add('test_meta0', i, 1)
redis.tdigest_add('test_meta1', i, 1)
redis.tdigest_add('test_meta2', i, 1)
m0 = redis.tdigest_meta('test_meta0')
m1 = redis.tdigest_meta('test_meta1')
m2 = redis.tdigest_meta('test_meta2')
assert m0[0] == m1[0]
assert m2[0] == 100
assert m0[1] == m1[1] == m2[1] == 100
assert m0[2] == m1[2] == m2[2]
for i in xrange(1000):
redis.tdigest_add('test_meta0', i, 1)
redis.tdigest_add('test_meta1', i, 1)
redis.tdigest_add('test_meta2', i, 1)
m0 = redis.tdigest_meta('test_meta0')
m1 = redis.tdigest_meta('test_meta1')
m2 = redis.tdigest_meta('test_meta2')
assert m0[0] == m1[0]
assert m2[0] == 100
assert m0[1] == m1[1]
assert m0[1] > m2[1]
assert m0[2] == m1[2]
assert m0[2] > m2[2]
def test_mem_leak(redis, flushdb):
redis.tdigest_new('test_mem_leak0')
redis.tdigest_new('test_mem_leak1')
for i in xrange(1000):
redis.tdigest_add('test_mem_leak0', i, 1)
redis.tdigest_add('test_mem_leak1', i, 1)
# Compression forces storing < 1000 centroids
assert redis.tdigest_meta('test_mem_leak0')[1] < 1000
assert redis.tdigest_meta('test_mem_leak1')[1] < 1000
start_rss_mem = redis.info()['used_memory_rss']
for i in xrange(100000):
redis.tdigest_add('test_mem_leak0', i, 1)
redis.tdigest_add('test_mem_leak1', i, 1)
if i % 1000 == 0:
redis.tdigest_cdf('test_mem_leak0', random.randint(100, 1000))
redis.tdigest_cdf('test_mem_leak1', random.randint(100, 1000))
if i % 1000 == 500:
redis.tdigest_quantile('test_mem_leak0', 0.4)
redis.tdigest_quantile('test_mem_leak1', 0.8)
end_rss_mem = redis.info()['used_memory_rss']
# %age difference should be < 1%
percent_diff = abs(end_rss_mem - start_rss_mem) / float(end_rss_mem)
assert percent_diff < 0.01
def run_persistence_test(redis, reloadfn):
redis.tdigest_new('test_aof0')
for i in xrange(10):
redis.tdigest_add('test_aof0', i, 1)
assert redis.client.type('test_aof0') == 't-digest0'
reloadfn()
assert redis.client.type('test_aof0') == 't-digest0'
def test_aof(redis, flushdb):
run_persistence_test(redis, redis.reload_from_aof)
def test_rdb(redis, flushdb):
run_persistence_test(redis, redis.reload_from_rdb)
|
usmanm/redis-tdigest
|
test/test_integration.py
|
Python
|
mit
| 4,392
|
[
"Gaussian"
] |
2f0901df3c4d577d3bfac6ae43125d8be36e59f2857a9e1233d21127f2c68601
|
#! /usr/bin/env python
"""Unit tests for landlab.io.netcdf module."""
import os
import netCDF4 as nc
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from landlab import RasterModelGrid
from landlab.io.netcdf import NotRasterGridError, write_netcdf
from landlab.io.netcdf.read import _get_raster_spacing
_TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
def test_netcdf_write_int64_field_netcdf4(tmpdir):
"""Test write_netcdf with a grid that has an int64 field."""
field = RasterModelGrid((4, 3))
field.add_field("topographic__elevation", np.arange(12, dtype=np.int64), at="node")
with tmpdir.as_cwd():
write_netcdf("test.nc", field, format="NETCDF4")
root = nc.Dataset("test.nc", "r", format="NETCDF4")
for name in ["topographic__elevation"]:
assert name in root.variables
assert_array_equal(
np.reshape(root.variables[name], -1), field.at_node[name]
)
assert root.variables[name].dtype == "int64"
root.close()
def test_netcdf_write_uint8_field_netcdf4(tmpdir):
"""Test write_netcdf with a grid that has an uint8 field."""
field = RasterModelGrid((4, 3))
field.add_field("topographic__elevation", np.arange(12, dtype=np.uint8), at="node")
with tmpdir.as_cwd():
write_netcdf("test.nc", field, format="NETCDF4")
root = nc.Dataset("test.nc", "r", format="NETCDF4")
for name in ["topographic__elevation"]:
assert name in root.variables
assert_array_equal(
np.reshape(root.variables[name], -1), field.at_node[name]
)
assert root.variables[name].dtype == "uint8"
root.close()
def test_netcdf_write_as_netcdf3_64bit(tmpdir):
"""Test write_netcdf with output format 64-bit netcdf3."""
from scipy.io import netcdf
field = RasterModelGrid((4, 3))
field.add_field("topographic__elevation", np.arange(12.0), at="node")
field.add_field("uplift_rate", 2.0 * np.arange(12.0), at="node")
with tmpdir.as_cwd():
write_netcdf("test.nc", field, format="NETCDF3_64BIT")
f = netcdf.netcdf_file("test.nc", "r")
for name in ["topographic__elevation", "uplift_rate"]:
assert name in f.variables
assert_array_equal(
np.reshape(f.variables[name][:], -1), field.at_node[name]
)
f.close()
def test_netcdf_write_as_netcdf3_classic(tmpdir):
"""Test write_netcdf with output format classic netcdf3."""
from scipy.io import netcdf
field = RasterModelGrid((4, 3))
field.add_field("topographic__elevation", np.arange(12.0), at="node")
field.add_field("uplift_rate", 2.0 * np.arange(12.0), at="node")
with tmpdir.as_cwd():
write_netcdf("test.nc", field, format="NETCDF3_CLASSIC")
f = netcdf.netcdf_file("test.nc", "r")
for name in ["topographic__elevation", "uplift_rate"]:
assert name in f.variables
assert_array_equal(
np.reshape(f.variables[name][:], -1), field.at_node[name]
)
f.close()
def test_netcdf_write(tmpdir):
"""Test generic write_netcdf."""
field = RasterModelGrid((4, 3))
field.add_field("topographic__elevation", np.arange(12.0), at="node")
with tmpdir.as_cwd():
write_netcdf("test.nc", field, format="NETCDF4")
root = nc.Dataset("test.nc", "r", format="NETCDF4")
assert set(root.dimensions) == set(["ni", "nj", "nt"])
assert len(root.dimensions["ni"]) == 3
assert len(root.dimensions["nj"]) == 4
assert len(root.dimensions["nt"]) == 1
assert root.dimensions["nt"].isunlimited()
assert set(root.variables) == set(["x", "y", "topographic__elevation"])
assert_array_equal(
np.reshape(root.variables["x"], -1),
np.array([0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0, 0.0, 1.0, 2.0]),
)
assert_array_equal(
np.reshape(root.variables["y"], -1),
np.array([0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0]),
)
assert_array_equal(
np.reshape(root.variables["topographic__elevation"], -1),
field.at_node["topographic__elevation"],
)
root.close()
def test_netcdf_write_as_netcdf4_classic(tmpdir):
"""Test write_netcdf to netcdf4 classic format."""
field = RasterModelGrid((4, 3))
field.add_field("topographic__elevation", np.arange(12.0), at="node")
field.add_field("uplift_rate", np.arange(12.0), at="node")
with tmpdir.as_cwd():
write_netcdf("test.nc", field, format="NETCDF4_CLASSIC")
root = nc.Dataset("test.nc", "r", format="NETCDF4_CLASSIC")
for name in ["topographic__elevation", "uplift_rate"]:
assert name in root.variables
assert_array_equal(
np.reshape(root.variables[name], -1), field.at_node[name]
)
root.close()
def test_netcdf_write_names_keyword_as_list(tmpdir):
"""Test write_netcdf using a list for the *names* keyword."""
field = RasterModelGrid((4, 3))
field.add_field("topographic__elevation", np.arange(12.0), at="node")
field.add_field("uplift_rate", np.arange(12.0), at="node")
with tmpdir.as_cwd():
write_netcdf(
"test.nc", field, names=["topographic__elevation"], format="NETCDF4"
)
root = nc.Dataset("test.nc", "r", format="NETCDF4")
assert "topographic__elevation" in root.variables
assert "uplift_rate" not in root.variables
assert_array_equal(
np.reshape(root.variables["topographic__elevation"], -1),
field.at_node["topographic__elevation"],
)
root.close()
def test_netcdf_write_names_keyword_as_str(tmpdir):
"""Test write_netcdf using a ``str`` for the *names* keyword."""
field = RasterModelGrid((4, 3))
field.add_field("topographic__elevation", np.arange(12.0), at="node")
field.add_field("uplift_rate", np.arange(12.0), at="node")
with tmpdir.as_cwd():
write_netcdf("test.nc", field, names="uplift_rate", format="NETCDF4")
root = nc.Dataset("test.nc", "r", format="NETCDF4")
assert "topographic__elevation" not in root.variables
assert "uplift_rate" in root.variables
assert_array_equal(
np.reshape(root.variables["uplift_rate"], -1), field.at_node["uplift_rate"]
)
root.close()
def test_netcdf_write_names_keyword_as_none(tmpdir):
"""Test write_netcdf using ``None`` for the *names* keyword."""
field = RasterModelGrid((4, 3))
field.add_field("topographic__elevation", np.arange(12.0), at="node")
field.add_field("uplift_rate", np.arange(12.0), at="node")
with tmpdir.as_cwd():
write_netcdf("test.nc", field, names=None, format="NETCDF4")
root = nc.Dataset("test.nc", "r", format="NETCDF4")
for name in ["topographic__elevation", "uplift_rate"]:
assert name in root.variables
assert_array_equal(
np.reshape(root.variables[name], -1), field.at_node[name]
)
root.close()
def test_2d_unit_spacing():
"""Test write_netcdf with a 2D grid with unit spacing."""
(x, y) = np.meshgrid(np.arange(5.0), np.arange(4.0))
spacing = _get_raster_spacing((y, x))
assert spacing == 1.0
def test_2d_non_unit_spacing():
"""Test _get_raster_spacing with a 2D grid with non-unit spacing."""
(x, y) = np.meshgrid(np.arange(5.0) * 2, np.arange(4.0) * 2)
spacing = _get_raster_spacing((y, x))
assert spacing == 2.0
def test_2d_uneven_spacing_axis_0():
"""Test _get_raster_spacing with a 2D grid with uneven spacing in y."""
(x, y) = np.meshgrid(np.logspace(0.0, 2.0, num=5), np.arange(4.0))
with pytest.raises(NotRasterGridError):
_get_raster_spacing((y, x))
def test_2d_uneven_spacing_axis_1():
"""Test _get_raster_spacing with a 2D grid with uneven spacing in x."""
(x, y) = np.meshgrid(np.arange(4.0), np.logspace(0.0, 2.0, num=5))
with pytest.raises(NotRasterGridError):
_get_raster_spacing((y, x))
def test_2d_switched_coords():
"""Test _get_raster_spacing with a 2D grid when the spacing is switched."""
(x, y) = np.meshgrid(np.arange(5.0), np.arange(4.0))
spacing = _get_raster_spacing((x, y))
assert spacing == 0.0
def test_1d_unit_spacing():
"""Test _get_raster_spacing with a 1D grid with unit spacing."""
spacing = _get_raster_spacing((np.arange(5.0),))
assert spacing == 1.0
def test_1d_non_unit_spacing():
"""Test _get_raster_spacing with a 1D grid with non-unit spacing."""
spacing = _get_raster_spacing((np.arange(5.0) * 2,))
assert spacing == 2.0
def test_1d_uneven_spacing():
"""Test _get_raster_spacing with a 1D grid with uneven spacing in y."""
with pytest.raises(NotRasterGridError):
_get_raster_spacing((np.logspace(0.0, 2.0, num=5),))
def test_netcdf_write_at_cells(tmpdir):
"""Test write_netcdf using with cell fields"""
field = RasterModelGrid((4, 3))
field.add_field(
"topographic__elevation", np.arange(field.number_of_cells), at="cell"
)
field.add_field("uplift_rate", np.arange(field.number_of_cells), at="cell")
with tmpdir.as_cwd():
write_netcdf("test-cells.nc", field, format="NETCDF4")
root = nc.Dataset("test-cells.nc", "r", format="NETCDF4")
for name in ["topographic__elevation", "uplift_rate"]:
assert name in root.variables
assert_array_equal(
np.reshape(root.variables[name], -1), field.at_cell[name]
)
assert set(root.dimensions) == set(["nv", "ni", "nj", "nt"])
assert len(root.dimensions["nv"]) == 4
assert len(root.dimensions["ni"]) == 1
assert len(root.dimensions["nj"]) == 2
assert len(root.dimensions["nt"]) == 1
assert root.dimensions["nt"].isunlimited()
assert set(root.variables) == set(
["x_bnds", "y_bnds", "topographic__elevation", "uplift_rate"]
)
root.close()
def test_write_llc():
pass
|
landlab/landlab
|
tests/io/netcdf/test_write_netcdf.py
|
Python
|
mit
| 10,251
|
[
"NetCDF"
] |
39f09920e5f183547afc7993b736f5df529510e6461f59dc41e29263d01186c9
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import idl_schema
import json_parse
from js_externs_generator import JsExternsGenerator
from datetime import datetime
import model
import sys
import unittest
# The contents of a fake idl file.
fake_idl = """
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// A totally fake API.
namespace fakeApi {
enum Greek {
ALPHA,
BETA,
GAMMA,
DELTA
};
dictionary Bar {
long num;
};
dictionary Baz {
DOMString str;
long num;
boolean b;
Greek letter;
Greek? optionalLetter;
long[] arr;
Bar[]? optionalObjArr;
Greek[] enumArr;
any[] anythingGoes;
Bar obj;
long? maybe;
(DOMString or Greek or long[]) choice;
object plainObj;
};
callback VoidCallback = void();
callback BazGreekCallback = void(Baz baz, Greek greek);
interface Functions {
// Does something exciting! And what's more, this is a multiline function
// comment! It goes onto multiple lines!
// |baz| : The baz to use.
static void doSomething(Baz baz, VoidCallback callback);
// |callback| : The callback which will most assuredly in all cases be
// called; that is, of course, iff such a callback was provided and is
// not at all null.
static void bazGreek(optional BazGreekCallback callback);
[deprecated="Use a new method."] static DOMString returnString();
};
interface Events {
// Fired when we realize it's a trap!
static void onTrapDetected(Baz baz);
};
};
"""
# The output we expect from our fake idl file.
expected_output = ("""// Copyright %s The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file was generated by:
// %s.
// NOTE: The format of types has changed. 'FooType' is now
// 'chrome.fakeApi.FooType'.
// Please run the closure compiler before committing changes.
// See https://code.google.com/p/chromium/wiki/ClosureCompilation.
/** @fileoverview Externs generated from namespace: fakeApi */
/**
* @const
*/
chrome.fakeApi = {};
/**
* @enum {string}
* @see https://developer.chrome.com/extensions/fakeApi#type-Greek
*/
chrome.fakeApi.Greek = {
ALPHA: 'ALPHA',
BETA: 'BETA',
GAMMA: 'GAMMA',
DELTA: 'DELTA',
};
/**
* @typedef {{
* num: number
* }}
* @see https://developer.chrome.com/extensions/fakeApi#type-Bar
*/
chrome.fakeApi.Bar;
/**
* @typedef {{
* str: string,
* num: number,
* b: boolean,
* letter: !chrome.fakeApi.Greek,
* optionalLetter: (!chrome.fakeApi.Greek|undefined),
* arr: !Array<number>,
* optionalObjArr: (!Array<!chrome.fakeApi.Bar>|undefined),
* enumArr: !Array<!chrome.fakeApi.Greek>,
* anythingGoes: !Array<*>,
* obj: !chrome.fakeApi.Bar,
* maybe: (number|undefined),
* choice: (string|!chrome.fakeApi.Greek|!Array<number>),
* plainObj: Object
* }}
* @see https://developer.chrome.com/extensions/fakeApi#type-Baz
*/
chrome.fakeApi.Baz;
/**
* Does something exciting! And what's more, this is a multiline function
* comment! It goes onto multiple lines!
* @param {!chrome.fakeApi.Baz} baz The baz to use.
* @param {function():void} callback
* @see https://developer.chrome.com/extensions/fakeApi#method-doSomething
*/
chrome.fakeApi.doSomething = function(baz, callback) {};
/**
* @param {function(!chrome.fakeApi.Baz, !chrome.fakeApi.Greek):void=} callback
* The callback which will most assuredly in all cases be called; that is,
* of course, iff such a callback was provided and is not at all null.
* @see https://developer.chrome.com/extensions/fakeApi#method-bazGreek
*/
chrome.fakeApi.bazGreek = function(callback) {};
/**
* @return {string}
* @deprecated Use a new method.
* @see https://developer.chrome.com/extensions/fakeApi#method-returnString
*/
chrome.fakeApi.returnString = function() {};
/**
* Fired when we realize it's a trap!
* @type {!ChromeEvent}
* @see https://developer.chrome.com/extensions/fakeApi#event-onTrapDetected
*/
chrome.fakeApi.onTrapDetected;""" % (datetime.now().year, sys.argv[0]))
fake_json = """// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
[
{
"namespace": "fakeJson",
"description": "Fake JSON API Stuff",
"types": [ {
"id": "CrazyEnum",
"type": "string",
"enum": ["camelCaseEnum", "Non-Characters", "5NumFirst", \
"3Just-plainOld_MEAN"]
} ],
"functions": [ {
"name": "funcWithInlineObj",
"type": "function",
"parameters": [
{
"type": "object",
"name": "inlineObj",
"description": "Evil inline object! With a super duper duper long\
string description that causes problems!",
"properties": {
"foo": {
"type": "boolean",
"optional": "true",
"description": "The foo."
},
"bar": {
"type": "integer",
"description": "The bar."
},
"baz": {
"type": "object",
"description": "Inception object.",
"properties": {
"depth": {
"type": "integer"
}
}
}
}
},
{
"name": "callback",
"type": "function",
"parameters": [
{
"type": "object",
"name": "returnObj",
"properties": {
"str": { "type": "string"}
}
}
],
"description": "The callback to this heinous method"
}
],
"returns": {
"type": "object",
"properties": {
"str": { "type": "string" },
"int": { "type": "number" }
}
}
} ]
}
]"""
json_expected = ("""// Copyright %s The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file was generated by:
// %s.
// NOTE: The format of types has changed. 'FooType' is now
// 'chrome.fakeJson.FooType'.
// Please run the closure compiler before committing changes.
// See https://code.google.com/p/chromium/wiki/ClosureCompilation.
/** @fileoverview Externs generated from namespace: fakeJson */
/**
* @const
*/
chrome.fakeJson = {};
/**
* @enum {string}
* @see https://developer.chrome.com/extensions/fakeJson#type-CrazyEnum
*/
chrome.fakeJson.CrazyEnum = {
CAMEL_CASE_ENUM: 'camelCaseEnum',
NON_CHARACTERS: 'Non-Characters',
_5NUM_FIRST: '5NumFirst',
_3JUST_PLAIN_OLD_MEAN: '3Just-plainOld_MEAN',
};
/**
* @param {{
* foo: (boolean|undefined),
* bar: number,
* baz: {
* depth: number
* }
* }} inlineObj Evil inline object! With a super duper duper long string
* description that causes problems!
* @param {function({
* str: string
* }):void} callback The callback to this heinous method
* @return {{
* str: string,
* int: number
* }}
* @see https://developer.chrome.com/extensions/fakeJson#method-funcWithInlineObj
*/
chrome.fakeJson.funcWithInlineObj = function(inlineObj, callback) {};""" %
(datetime.now().year, sys.argv[0]))
class JsExternGeneratorTest(unittest.TestCase):
def _GetNamespace(self, fake_content, filename, is_idl):
"""Returns a namespace object for the given content"""
api_def = (idl_schema.Process(fake_content, filename) if is_idl
else json_parse.Parse(fake_content))
m = model.Model()
return m.AddNamespace(api_def[0], filename)
def setUp(self):
self.maxDiff = None # Lets us see the full diff when inequal.
def testBasic(self):
namespace = self._GetNamespace(fake_idl, 'fake_api.idl', True)
self.assertMultiLineEqual(expected_output,
JsExternsGenerator().Generate(namespace).Render())
def testJsonWithInlineObjects(self):
namespace = self._GetNamespace(fake_json, 'fake_api.json', False)
self.assertMultiLineEqual(json_expected,
JsExternsGenerator().Generate(namespace).Render())
if __name__ == '__main__':
unittest.main()
|
hujiajie/chromium-crosswalk
|
tools/json_schema_compiler/js_externs_generator_test.py
|
Python
|
bsd-3-clause
| 8,543
|
[
"exciting"
] |
f30350432281ef0cebb247c043c484c86427cd69b6bc03070ed9eb305b93e6e0
|
import sys
import warnings
import numpy as np
from ase.io.trajectory import Trajectory
from ase.constraints import FixedLine, FixAtoms
from ase.optimize import QuasiNewton
from anh_base import BaseAnalysis
class TransAnalysis(BaseAnalysis):
"""Module for calculate the partition function of rotational modes!
"""
def __init__(
self,
an_mode,
atoms,
an_filename=None,
settings={},
log=sys.stdout,
):
super(TransAnalysis, self).__init__()
self.an_mode = an_mode
self.atoms = atoms
self.an_filename = an_filename
self.settings = settings
self.log = log
# Checks
assert self.an_mode['type'] == 'translation'
# settings
self.fit_forces = settings.get('fit_forces', False)
self.E_max_kT = settings.get('E_max_kT', 5)
self.use_forces = settings.get('use_forces', False)
self.initialize()
def initial_sampling(self):
"""Start initial sampling of the mode. This can be done before extra
samples are introduced.
"""
# initializing
if len(self.an_mode.get('displacements', [])) == 0:
self.an_mode['displacements'] = self.get_initial_points(
self.settings.get('n_initial', 5))
self.add_displacement_energy(None) # adding ground state
while (len(self.an_mode['displacements']) >
len(self.an_mode.get('displacement_energies', []))):
displacement = self.an_mode['displacements'][
len(self.an_mode['displacement_energies'])]
self.add_displacement_energy(displacement)
def get_initial_points(self, nsamples):
"""Get the points to initially calculate the potential
energies at.
Returns:
displacements (list): The displacements along the
translational path
"""
displacements = (
self.an_mode['transition_path_length']
* (np.array(range(0, nsamples)) / (nsamples-1)))
return displacements
def sample_new_point(self):
"""Decide what displacement to sample next
We take the maximum angle distance between two samples scaled with
the exponenital to the average potential energy of the two angles.
> exp(avg(E[p0],E[p2])/kT)
"""
displacements = list(self.an_mode['displacements'])
displacement_energies = list(self.an_mode['displacement_energies'])
sort_args = np.argsort(displacements)
displacements_sorted = np.array([displacements[i] for i in sort_args])
energies = np.array([displacement_energies[i] for i in sort_args])
energies -= np.min(energies)
displacements_spacings = [
displacements_sorted[i+1] - displacements_sorted[i]
for i in range(len(displacements_sorted)-1)]
scaled_displacements_spacings = [
displacements_spacings[i]*np.exp(
-(energies[i]+energies[i+1])/(2*self.kT))
for i in range(len(displacements)-1)]
arg = np.argmax(scaled_displacements_spacings)
# Pick the point in between the two displacements that is the biggest
new_displacement = (displacements_sorted[arg]
+ 0.5*displacements_spacings[arg])
self.an_mode['displacements'] = list(
np.hstack((displacements, new_displacement)))
self.add_displacement_energy(new_displacement)
def add_displacement_energy(self, displacement):
"""Add the groundstate energy for a displacements along the
translational path, and adds it to an_mode['displacement_energies'].
Args:
displacement (float): How much to follow translational path.
"""
# Will otherwise do a groundstate calculation at initial positions
if displacement:
if displacement != self.an_mode['transition_path_length']:
self.atoms.set_positions(
self.get_translation_positions(displacement))
# Do 1D optimization
fix_environment = FixAtoms(mask=[
i not in self.an_mode['indices']
for i in range(len(self.atoms))])
axis_relax = self.an_mode.get('relax_axis')
if axis_relax:
if self.use_forces:
warnings.warn(' '.join([
"relax along axis and force_consistent",
"should only be used with ase releases after",
"Jan 2017. See",
"https://gitlab.com/ase/ase/merge_requests/354"
]))
c = []
for i in self.an_mode['indices']:
c.append(FixedLine(i, axis_relax))
# Fixing everything that is not the vibrating part
c.append(fix_environment)
self.atoms.set_constraint(c)
# Optimization
dyn = QuasiNewton(self.atoms, logfile='/dev/null')
dyn.run(fmax=self.settings.get('fmax', 0.05))
self.atoms.set_constraint(fix_environment)
if not self.an_mode.get('displacement_energies'):
self.an_mode['displacement_energies'] = list()
if self.use_forces:
e = self.atoms.get_potential_energy(force_consistent=True)
# For the forces, we need the projection of the forces
# on the normal mode of the rotation at the current angle
v_force = self.atoms.get_forces()[
self.an_mode['indices']].reshape(-1)
f = float(np.dot(
v_force, self.an_mode['mode_tangent']))
if not self.an_mode.get('displacement_forces'):
self.an_mode['displacement_forces'] = [f]
else:
self.an_mode['displacement_forces'].append(f)
else:
e = self.atoms.get_potential_energy()
if self.traj is not None:
self.traj.write(self.atoms)
self.an_mode['displacement_energies'].append(e)
# adding to trajectory:
if self.traj is not None:
self.traj.write(self.atoms)
self.atoms.set_positions(self.groundstate_positions)
# save to backup file:
if self.an_filename:
self.save_to_backup()
def get_translation_positions(self, displacement):
"""Calculate the new positions of the atoms with the vibrational
system moving along a linear translational path by a displacements
given as an input.
Args:
displacement (float): The displacement along the translational path
Returns:
positions (numpy array): The new positions of the atoms with the
vibrational system moved along the translational path.
"""
positions = self.atoms.get_positions()
for index in self.an_mode['indices']:
positions[index] += displacement*self.an_mode['mode_tangent']
return positions
def make_inspection_traj(
self,
num_displacements=10,
filename=None):
"""Make trajectory file for translational mode to inspect"""
if filename is None:
filename = self.an_filename+'_inspect.traj'
traj = Trajectory(filename, mode='w', atoms=self.atoms)
old_pos = self.atoms.positions.copy()
calc = self.atoms.get_calculator()
self.atoms.set_calculator()
displacements = self.get_initial_points(num_displacements)
for displacement in displacements:
new_pos = self.get_translation_positions(displacement)
self.atoms.set_positions(new_pos)
traj.write(self.atoms)
self.atoms.set_positions(old_pos)
self.atoms.set_calculator(calc)
traj.close()
|
keldLundgaard/ase-anharmonics
|
anh_trans.py
|
Python
|
lgpl-2.1
| 8,043
|
[
"ASE"
] |
d9b9a3d2272ff240b08c1a56c90f3fa93e3dd65b3ad57a5c977f28aca7d7aaec
|
"""
[11/05/2014] Challenge #187 [Intermediate] Finding Time to Reddit
https://www.reddit.com/r/dailyprogrammer/comments/2ledaj/11052014_challenge_187_intermediate_finding_time/
#Description:
I cover the border of my monitor with post it notes with tasks I have to do during the week. I am very unorganized.
Each day I want to find the biggest block of free time to go on to Reddit. But I am not sure when that time is. I am
also curious how I spend my days.
This challenge you will help me get organized and find that time for me to be on Reddit.
#Input:
I will give you a listing of the post it notes around my monitor. Each line represents a single post it note. Sorry but
they are not in any order but I was at least smart enough to date them and put the times of my daily events.
#Output:
Get me organized. I need to see my schedule for the week. For each day you must find the 1 block of time that is the
most time between events on the post its that I can Reddit. Please help maximize my time on Reddit. Assume my start
time at work is the beginning of the first event and my end time at work is the end time of the last event for that
day.
Then show me my final schedule. And while you are at it show me across the week how many minutes I dedicate to each
task with a percentage of time it takes up my time. Hopefully I don't spend most of my time on Reddit.
#Challenge Input:
11-6-2014: 05:18 AM to 06:00 AM -- code review
11-9-2014: 08:52 AM to 09:15 AM -- food
11-8-2014: 07:00 PM to 08:05 PM -- meeting
11-8-2014: 05:30 PM to 06:36 PM -- personal appointment
11-6-2014: 02:47 PM to 03:23 PM -- work
11-11-2014: 07:14 AM to 08:32 AM -- meeting
11-11-2014: 11:22 AM to 12:10 PM -- code review
11-8-2014: 01:39 PM to 02:06 PM -- food
11-9-2014: 07:12 AM to 08:06 AM -- meeting
11-9-2014: 02:14 PM to 03:15 PM -- code review
11-8-2014: 05:13 AM to 06:05 AM -- food
11-6-2014: 05:54 PM to 06:17 PM -- personal appointment
11-7-2014: 08:24 AM to 09:23 AM -- personal appointment
11-8-2014: 11:28 AM to 12:44 PM -- meeting
11-7-2014: 09:35 AM to 10:35 AM -- workout
11-9-2014: 10:05 AM to 11:15 AM -- code review
11-11-2014: 05:02 PM to 06:09 PM -- work
11-6-2014: 06:16 AM to 07:32 AM -- food
11-10-2014: 10:08 AM to 11:14 AM -- workout
11-8-2014: 04:33 PM to 05:12 PM -- meeting
11-10-2014: 01:38 PM to 02:10 PM -- workout
11-11-2014: 03:03 PM to 03:40 PM -- food
11-11-2014: 05:03 AM to 06:12 AM -- food
11-9-2014: 09:49 AM to 10:09 AM -- meeting
11-8-2014: 06:49 AM to 07:34 AM -- work
11-7-2014: 07:29 AM to 08:22 AM -- food
11-10-2014: 03:08 PM to 03:29 PM -- code review
11-9-2014: 03:27 PM to 04:39 PM -- food
11-7-2014: 05:38 AM to 06:49 AM -- meeting
11-7-2014: 03:28 PM to 04:06 PM -- code review
11-8-2014: 02:44 PM to 03:35 PM -- meeting
11-6-2014: 08:53 AM to 09:55 AM -- workout
11-11-2014: 02:05 PM to 02:49 PM -- meeting
11-10-2014: 08:29 AM to 09:23 AM -- code review
11-10-2014: 11:09 AM to 11:35 AM -- sales call
11-6-2014: 11:29 AM to 12:18 PM -- code review
11-11-2014: 08:04 AM to 08:45 AM -- work
11-9-2014: 12:27 PM to 01:29 PM -- sales call
11-7-2014: 11:04 AM to 12:07 PM -- code review
11-11-2014: 09:21 AM to 10:37 AM -- food
11-8-2014: 09:34 AM to 10:53 AM -- meeting
11-11-2014: 12:36 PM to 01:30 PM -- meeting
11-10-2014: 05:44 AM to 06:30 AM -- personal appointment
11-6-2014: 04:22 PM to 05:05 PM -- code review
11-6-2014: 01:30 PM to 01:59 PM -- sales call
11-10-2014: 06:54 AM to 07:41 AM -- code review
11-9-2014: 11:56 AM to 12:17 PM -- work
11-10-2014: 12:20 PM to 01:17 PM -- personal appointment
11-8-2014: 07:57 AM to 09:08 AM -- meeting
11-7-2014: 02:34 PM to 03:06 PM -- work
11-9-2014: 05:13 AM to 06:25 AM -- workout
11-11-2014: 04:04 PM to 04:40 PM -- food
11-9-2014: 06:03 AM to 06:26 AM -- code review
11-6-2014: 10:32 AM to 11:22 AM -- sales call
11-6-2014: 07:51 AM to 08:25 AM -- personal appointment
11-7-2014: 01:07 PM to 02:14 PM -- meeting
#FAQ:
Dates are mm-dd-yyyy
#Check this out:
If you have ideas for challenges - please visit and post on /r/dailyprogrammer_ideas
Check out side bar -- we have an IRC channel. A listing of past challenges and much more.
"""
def main():
pass
if __name__ == "__main__":
main()
|
DayGitH/Python-Challenges
|
DailyProgrammer/DP20141105B.py
|
Python
|
mit
| 4,469
|
[
"VisIt"
] |
367cdbc31cf721aa943108322dbb7c2e3e36a992aabfbba4903ba39e9c0ac2b0
|
import os
import unittest
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import glob
import sys
from subprocess import call
import numpy as np
import SimpleITK as sitk
import sitkUtils
#
# ImportSubject
#
class ImportSubject(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "ImportSubject" # TODO make this more human readable by adding spaces
self.parent.categories = ["Netstim"]
self.parent.dependencies = []
self.parent.contributors = ["John Doe (AnyWare Corp.)"] # replace with "Firstname Lastname (Organization)"
self.parent.helpText = """
This is an example of scripted loadable module bundled in an extension.
It performs a simple thresholding on the input volume and optionally captures a screenshot.
"""
self.parent.helpText += self.getDefaultModuleDocumentationLink()
self.parent.acknowledgementText = """
This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc.
and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
""" # replace with organization, grant and thanks.
#
# ImportSubjectWidget
#
class ImportSubjectWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
# Instantiate and connect widgets ...
#
# Subject Directory Area
#
self.subjectDirectoryButton = ctk.ctkDirectoryButton()
self.subjectDirectoryButton.text = "Select Lead-DBS directory"
self.layout.addWidget(self.subjectDirectoryButton)
#
# Images Area
#
imagesCollapsibleButton = ctk.ctkCollapsibleButton()
imagesCollapsibleButton.text = "Images"
self.layout.addWidget(imagesCollapsibleButton)
# Layout within the dummy collapsible button
imagesFormLayout = qt.QFormLayout(imagesCollapsibleButton)
#
# select images list
#
self.imagesList = qt.QListWidget()
self.imagesList.setSelectionMode(qt.QAbstractItemView.ExtendedSelection)
imagesFormLayout.addRow(self.imagesList)
#
# Transforms Area
#
transformsCollapsibleButton = ctk.ctkCollapsibleButton()
transformsCollapsibleButton.text = "Transforms"
self.layout.addWidget(transformsCollapsibleButton)
# Layout within the dummy collapsible button
transformsFormLayout = qt.QFormLayout(transformsCollapsibleButton)
# converts transforms
self.updateTransformButton = qt.QPushButton('Update Transform')
self.updateTransformButton.visible = False
transformsFormLayout.addRow(self.updateTransformButton)
#
# check box select transforms
#
self.transformsList = qt.QListWidget()
self.transformsList.setSelectionMode(qt.QAbstractItemView.ExtendedSelection)
imagesFormLayout.addRow(self.transformsList)
#
# Import Button
#
self.importButton = qt.QPushButton("Import")
self.importButton.toolTip = "Import selected options."
self.importButton.enabled = True
self.layout.addWidget(self.importButton)
# connections
self.importButton.connect('clicked(bool)', self.onImportButton)
self.updateTransformButton.connect('clicked(bool)', self.onUpdateTransformButton)
self.subjectDirectoryButton.directoryChanged.connect(self.onSubjectDirectoryChanged)
# Add vertical spacer
self.layout.addStretch(1)
# Refresh Apply button state
self.onSubjectDirectoryChanged('.')
def cleanup(self):
pass
def onSubjectDirectoryChanged(self, directory):
logic = ImportSubjectLogic()
self.imagesList.clear()
self.imagesList.addItems(logic.getAvailableModalities(directory))
self.transformsList.clear()
self.transformsList.addItems(logic.getAvailableTransforms(directory))
# check for old transform version
self.updateTransformButton.visible = logic.ish5Transform(directory)
# change subject buton text to subject directory name
subjectName = os.path.basename(directory) if self.imagesList.count else "Select Lead-DBS directory"
self.subjectDirectoryButton.text = subjectName
def onImportButton(self):
logic = ImportSubjectLogic()
for i in range(self.imagesList.count):
if self.imagesList.item(i).isSelected():
logic.importImage(self.subjectDirectoryButton.directory, self.imagesList.item(i).text())
for i in range(self.transformsList.count):
if self.transformsList.item(i).isSelected():
logic.importTransform(self.subjectDirectoryButton.directory, self.transformsList.item(i).text())
def onUpdateTransformButton(self):
logic = ImportSubjectLogic()
directory = self.subjectDirectoryButton.directory
logic.updateTranform(directory)
# update
self.onSubjectDirectoryChanged(directory)
#
# ImportSubjectLogic
#
class ImportSubjectLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def ish5Transform(self, directory):
return os.path.isfile(os.path.join(directory, 'glanatComposite.h5'))
def getAvailableModalities(self, directory):
modalities = []
listing = glob.glob(os.path.join(directory,'anat_*.nii'))
for fileName in listing:
fileName = os.path.split(fileName)[-1] # remove directory
fileName = os.path.splitext(fileName)[0] # remove extension
modality = fileName[5:] # remove 'anat_'
modalities.append(modality)
return modalities
def getAvailableTransforms(self, directory):
posibleTransforms = ["glanat0GenericAffine_backup.mat", "glanatComposite.nii.gz", "glanatInverseComposite.nii.gz"]
availableTransforms = [pt for pt in posibleTransforms if os.path.isfile(os.path.join(directory,pt))]
return availableTransforms
def createNodeName(self, directory, fileName):
subjectName = os.path.split(os.path.abspath(directory))[-1]
fileNameNoExt = os.path.splitext(os.path.splitext(fileName)[0])[0]
return subjectName + '_' + fileNameNoExt
def importImage(self, directory, fileName):
if os.path.splitext(fileName)[-1] != '.nii':
fileName = 'anat_' + fileName + '.nii'
filePath = os.path.join(directory, fileName)
node = slicer.util.loadVolume(filePath, properties={'show':False})
node.SetName(self.createNodeName(directory,fileName))
return node
def importTransform(self, directory, fileName):
filePath = os.path.join(directory, fileName)
if os.path.isfile(filePath):
node = slicer.util.loadTransform(filePath)
node.SetName(self.createNodeName(directory,fileName))
return node
else:
return None
def importReconstruction(self, directory):
pass
def updateTranform(self, directory, antsApplyTransformsPath=None):
# flatten
if not antsApplyTransformsPath:
w = qt.QWidget()
fd = qt.QFileDialog(w,'AntsApplyTransformsPath')
if fd.exec():
antsApplyTransformsPath = fd.selectedFiles()[0]
else:
return False
for transform,reference in zip(['glanatComposite','glanatInverseComposite'],['glanat','anat_t1']):
transformFullPath = os.path.join(directory,transform + '.h5') # in case inverse doesnt exist
if os.path.isfile(transformFullPath):
command = antsApplyTransformsPath + " -r " + os.path.join(directory,reference + '.nii') + " -t " + transformFullPath + " -o [" + os.path.join(directory,transform + '.nii.gz') + ",1] -v 1"
commandOut = call(command, env=slicer.util.startupEnvironment(), shell=True) # run antsApplyTransforms
os.remove(transformFullPath)
return True
def runBinaryThresholdImageFilter(self, inputNode, outputNode):
# run Simple ITK threshold Filter
inputImage = sitkUtils.PullVolumeFromSlicer(inputNode)
myFilter = sitk.BinaryThresholdImageFilter()
myFilter.SetLowerThreshold(0.5)
outputImage = myFilter.Execute(inputImage)
sitkUtils.PushVolumeToSlicer(outputImage, outputNode)
# run Simple ITK fill holes Filter
inputImage = sitkUtils.PullVolumeFromSlicer(outputNode)
myFilter = sitk.BinaryFillholeImageFilter()
outputImage = myFilter.Execute(inputImage)
sitkUtils.PushVolumeToSlicer(outputImage, outputNode)
def importSegmentations(self, directory):
# look for nifty files in the segmentations subdirectory
# load the files and binarize if necesary
# add the binary segments to a segmentation node
# segmentation to model node
listing = glob.glob(os.path.join(directory,'segmentations','*.nii*'))
if not listing:
return
# init segmentation node
segmentationNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLSegmentationNode')
segmentColor = [0] * 4
currentSegment = 0
for filename, i in zip(listing,range(len(listing))):
segmentName = os.path.split(filename)[-1].split('.')[0] # name
volumeNode = slicer.util.loadVolume(filename, properties={'name': 'tmp'}) # load volume node
labelMapNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLLabelMapVolumeNode') # init label map node
# volume to labelmap
if volumeNode.GetImageData().GetScalarType() in [vtk.VTK_FLOAT, vtk.VTK_DOUBLE]:
self.runBinaryThresholdImageFilter(volumeNode, labelMapNode)
else:
slicer.modules.volumes.logic().CreateLabelVolumeFromVolume(slicer.mrmlScene, labelMapNode, volumeNode)
# add to segmentation
slicer.modules.segmentations.logic().ImportLabelmapToSegmentationNode(labelMapNode, segmentationNode)
for s in range(currentSegment, segmentationNode.GetSegmentation().GetNumberOfSegments()):
slicer.util.getNode('Labels').GetColor(s+1, segmentColor) # color
addedSegment = segmentationNode.GetSegmentation().GetNthSegment(s)
addedSegment.SetName(slicer.mrmlScene.GenerateUniqueName(segmentName))
addedSegment.SetColor(segmentColor[:-1])
currentSegment = segmentationNode.GetSegmentation().GetNumberOfSegments()
# remove nodes
slicer.mrmlScene.RemoveNode(volumeNode)
slicer.mrmlScene.RemoveNode(labelMapNode)
# add data attributes
shNode = slicer.mrmlScene.GetSubjectHierarchyNode()
shNode.SetItemAttribute(shNode.GetItemByDataNode(segmentationNode), 'Segment', '1')
# segmentation children
IDList = vtk.vtkIdList()
shNode.GetItemChildren(shNode.GetItemByDataNode(segmentationNode), IDList)
for i in range(IDList.GetNumberOfIds()):
shNode.SetItemAttribute(IDList.GetId(i), 'Segment', '1')
return segmentationNode
class ImportSubjectTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_ImportSubject1()
def test_ImportSubject1(self):
""" Ideally you should have several levels of tests. At the lowest level
tests should exercise the functionality of the logic with different inputs
(both valid and invalid). At higher levels your tests should emulate the
way the user would interact with your code and confirm that it still works
the way you intended.
One of the most important features of the tests is that it should alert other
developers when their changes will have an impact on the behavior of your
module. For example, if a developer removes a feature that you depend on,
your test should break so they know that the feature is needed.
"""
self.delayDisplay("Starting the test")
#
# first, get some data
#
import SampleData
SampleData.downloadFromURL(
nodeNames='FA',
fileNames='FA.nrrd',
uris='http://slicer.kitware.com/midas3/download?items=5767',
checksums='SHA256:12d17fba4f2e1f1a843f0757366f28c3f3e1a8bb38836f0de2a32bb1cd476560')
self.delayDisplay('Finished with download and loading')
volumeNode = slicer.util.getNode(pattern="FA")
logic = ImportSubjectLogic()
self.assertIsNotNone( logic.hasImageData(volumeNode) )
self.delayDisplay('Test passed!')
|
andreashorn/lead_dbs
|
ext_libs/SlicerNetstim/ImportSubject/ImportSubject.py
|
Python
|
gpl-3.0
| 12,976
|
[
"VTK"
] |
0663aaf5a7f0d37bcdd46ab11e01021eedbf71edac39999b90a6d04ac3c84cf7
|
#!/usr/bin/env python3
#
# vdbsetup.py - script for building Vdbench configurations
#
# Author: Ramon A. Lovato (ramonalovato.com)
# For: DeepStorage, LLC (deepstorage.net)
#
import argparse
import os.path
import os
import re
import statistics
import textwrap
import random
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import pylab
from collections import OrderedDict
DEFAULT_MAJOR_DELIMITER = " *= *"
DEFAULT_MINOR_DELIMITER = " *, *"
DEFAULT_MONTE_CARLO_SAMPLES = 200000
DEFAULT_SAMPLE_SCALE = 10000
MAX_RANGE_RETRIES = 10
INPUT_TEMPLATE_CONTENT = """#
# vdbsetup input file example
#
#
# General
#
dedupratio=2
dedupunit=4k
compratio=1.5
#
# SDs
#
luns=lun1,lun2,lun3
# Optional: o_direct provided by default
# openflags=
#
# WDs
#
wdcount=1
xfersize=4k
seekpct=100
rdpct=75
percentdisk=100.0
#
# RDs
#
iorate=1000
format=yes
elapsed=60
interval=1
threads=2
#
# Distribution
#
hotspotnum=10
hotspotcap=25
hotspotiopct=10
disttype=gaussian
# Note: only required if disttype=gaussian
distribution=0.75,0.5
"""
#
# Helper dictionaries for single-entry input parsing.
#
# Dictionary of validation lambdas.
validators = {
"dedupratio": lambda v: float(v) >= 0,
"compratio": lambda v: float(v) >= 0,
"wdcount": lambda v: float(v) > 0,
"seekpct": lambda v: 0 <= float(v) <= 100,
"rdpct": lambda v: 0 <= float(v) <= 100,
"percentdisk": lambda v: 0 <= float(v) <= 100,
"iorate": lambda v: float(v) > 0,
"format": lambda v: re.match("(yes)|(no)", v.lower()),
"threads": lambda v: int(v) > 0,
"elapsed": lambda v: int(v) > 0,
"interval": lambda v: int(v) > 0,
"hotspotnum": lambda v: int(v) >= 0,
"hotspotcap": lambda v: 0 <= float(v) <= 100,
"hotspotiopct": lambda v: 0 <= float(v) <= 100,
"disttype": lambda v: re.match("(even)|(gaussian)|(uniform)", v.lower())
}
# Dictionary of processing lambdas.
processors = {
"dedupratio": lambda v: float(v),
"compratio": lambda v: float(v),
"wdcount": lambda v: int(v),
"seekpct": lambda v: float(v),
"rdpct": lambda v: float(v),
"percentdisk": lambda v: float(v),
"iorate": lambda v: float(v),
"format": lambda v: v.lower(),
"threads": lambda v: int(v),
"elapsed": lambda v: int(v),
"interval": lambda v: int(v),
"hotspotnum": lambda v: int(v),
"hotspotcap": lambda v: float(v),
"hotspotiopct": lambda v: float(v),
"disttype": lambda v: v.lower()
}
# Dictionary of custom usage messages.
messages = {
"dedupratio": 'Key "dedupratio" requires nonnegative value.',
"compratio": 'Key "compratio" requires nonnegative value.',
"wdcount": 'Key "wdcount" requires positive integer value.',
"seekpct": 'Key "seekpct" requires percentage in range [0, 100].',
"rdpct": 'Key "rdpct" requires percentage in range [0, 100].',
"percentdisk": 'Key "percentdisk" requires single percentage in range [0, 100].',
"iorate": 'Key "iorate" requires positive IOPS value.',
"format": 'Key "format" must be one of "yes", "no".',
"threads": 'Key "threads" requires positive integer queue depth.',
"elapsed": 'Key "elapsed" requires positive integer number of seconds.',
"interval": 'Key "interval" requires positive integer number of seconds.',
"hotspotnum": 'Key "hotspotnum" requires nonnegative integer number of hotspots.',
"hotspotcap": 'Key "hotspotcap" requires percentage in range [0, 100].',
"hotspotiopct": 'Key "hotspotiopct" requires percentage in range [0, 100].',
"disttype": 'Key "disttype" must be one of "even", "gaussian", "uniform".'
}
multiValidators = {
"luns": lambda v: len(v) > 0,
"openflags": lambda v: len(v) > 0,
"distribution": lambda v:
config["disttype"] == "gaussian" and
len(v) == 2 and
len(list(filter(lambda w: float(w) >= 0, v))) == 2
}
multiProcessors = {
"luns": lambda v: v,
"openflags": lambda v: v,
"distribution": lambda v: list(map(float, v))
}
multiMessages = {
"luns": 'Key "luns" requires at least one LUN',
"openflags": 'Key "openflags" requires at least one flag.',
'"min,max", such that 0 <= min <= max <= 100.'
"distribution": 'Key "distribution" is only valid for Gaussian '
'distributions, and keys "hotspotnum" and "disttype" must '
'be set first. Values must be of form '
'"SKEW_STD_DEV,RANGE_STD_DEV", where both standard '
'deviations are nonnegative floating point values.'
}
# Uses an OrderedDict because certain parameters must be specified before
# other parameters.
config = OrderedDict({
# General
"dedupratio": None, # Deduplication ratio
"dedupunit": None, # Deduplication unit
"compratio": None, # Compression ratio
# SDs
"luns": None, # Luns, list OK
# WDs
"wdcount": None, # Number of workloads
"xfersize": None, # Block size
"seekpct": None, # Percent random
"rdpct": None, # Percent read (vs. write)
"percentdisk": None, # How much of the total disk to use
# RDs
"iorate": None, # IOPS
"format": None, # Pre-format lun
"threads": None, # Qeueue depth
"elapsed": None, # Duration
"interval": None, # Update frequency
# Distribution
"hotspotnum": None, # Number of hotspots
"hotspotcap": None, # Total capacity percent for all hotspots
"hotspotiopct": None, # Percent of IO for ALL hotspots
"disttype": None # Distribution type: even, gaussian, uniform
#
# Added inline only if needed
#
# "openflags": []
# - open flags for SDs
# - o_direct provided by default as block devices require it
# "distribution": []
# - parameters for the distribution
#
# Type | Params
# ------------------------------------------
# - gaussian | STANDARD_DEVIATION_SKEW, STANDARD_DEVIATION_RANGE
})
#
# Factories.
#
# Parent class.
class Factory:
def __init__(self, name_type="name", name=None, keys=None):
self.name_type=name_type
self.params = OrderedDict()
self.params[name_type] = name
for key in keys:
self.addKey(key)
def addKey(self, key):
self.params[key] = None
def set(self, key, *values):
if not key in self.params:
self.addKey(key)
if len(values) < 1:
raise ValueError('Error: no values passed for key "{}".'.format(
key))
elif len(values) == 1:
self.params[key] = values[0]
else:
self.params[key] = values
def setName(self, name):
self.set(self.name_type, name)
def append(self, key, *values):
if len(values) == 0:
return
if not key in self.params:
self.set(key, values)
else:
if not isinstance(self.params[key], list):
self.params[key] = [self.params[key]]
for v in values:
if isinstance(v, list):
for w in v:
self.params[key].append(w)
else:
self.params[key].append(v)
def toString(self):
partials = []
for k, v in self.params.items():
if v == None:
raise Exception('Error: key "{}" not assigned (value None).'.format(k))
if isinstance(v, list) or isinstance(v, tuple):
if len(v) == 0:
raise Exception('Error: key {} has length 0.'.format(k))
if len(v) == 1:
partial = "{}={}".format(k, truncate(v[0]))
else:
partial = "{}=({})".format(
k, ",".join([str(truncate(w)) for w in v]))
else:
partial = "{}={}".format(k, truncate(v))
partials.append(partial)
return ",".join(partials)
class SDFactory(Factory):
def __init__(self):
super().__init__(name_type="sd", keys=[
"lun",
"openflags"
])
self.set("openflags", ["o_direct"])
def appendOpenFlags(self, *fstrings):
# Filter out duplicates, including "o_direct, since it's provided by
# default.
for f in fstrings:
for g in f:
if g not in self.params["openflags"]:
self.append("openflags", g)
class WDFactory(Factory):
def __init__(self):
super().__init__(name_type="wd", keys=[
"sd",
"xfersize",
"seekpct",
"rdpct"
])
def addRange(self, r):
self.set("range", r)
class RDFactory(Factory):
def __init__(self):
super().__init__(name_type="rd", keys=[
"wd",
"iorate",
"format",
"threads",
"elapsed",
"interval"
])
#
# Functions.
#
# Get CLI arguments.
def getArgs(customArgs=None):
parser = argparse.ArgumentParser(
description="create Vdbench hotspot-distribution configuration files")
# Positional.
parser.add_argument("inPath", type=str, nargs="?",
default=None,
help="where to find the input file")
parser.add_argument("outPath", type=str, nargs="?",
default=None,
help="where to output the configuration file")
# Optional.
parser.add_argument("--make-template", action="store_true",
help="create an example input file and exit")
parser.add_argument("-v", "--verbose", action="store_true",
help="enable verbose mode")
parser.add_argument("-gs", "--graph-skews", action="store_true",
help="enable graph display of the hotspot skews")
parser.add_argument("-gr", "--graph-ranges", action="store_true",
help="enale graph display of the hotspots ranges")
parser.add_argument("--no-overwrite", action="store_true",
help="don't overwrite output file if it already exists")
parser.add_argument("--no-shuffle", action="store_true",
help="disable random hotspot permutation")
parser.add_argument("--header", type=str,
help="add a comment header")
parser.add_argument("-M", "--major-delimiter", type=str,
default=DEFAULT_MAJOR_DELIMITER,
help='major delimiter regex used in configuration file (default "{}")'.format(
DEFAULT_MAJOR_DELIMITER))
parser.add_argument("-m", "--minor-delimiter", type=str,
default=DEFAULT_MINOR_DELIMITER,
help='minor delimiter regex used in configuration file (default "{}")'.format(
DEFAULT_MINOR_DELIMITER))
parser.add_argument("-c", "--sample-count", type=int,
default=DEFAULT_MONTE_CARLO_SAMPLES,
help="number of samples to generate when using Monte Carlo method "
"to compute distributions (default {}); setting sample size < 10000 "
"is strongly discourage".format(
DEFAULT_MONTE_CARLO_SAMPLES))
if customArgs:
args = parser.parse_args(customArgs)
else:
args = parser.parse_args()
# If make_template is set, we can just return without the extra checks,
# since we won't need any of them.
if args.make_template:
return args
# Otherwise, make sure the inPath and outPath were actually set.
if not args.inPath or args.inPath == "" or not args.outPath or args.outPath == "":
parser.print_help()
exit()
args.outPath = os.path.realpath(args.outPath)
# Verify input file exists.
if not os.path.exists(args.inPath):
print("Error: input file {} does not exist.".format(args.inPath))
exit()
elif not os.path.isfile(args.inPath):
print("Error: input path {} is not a valid file.".format(args.inPath))
exit()
# Verify output directories exist.
os.makedirs(os.path.dirname(args.outPath), exist_ok=True)
# Check delimiters.
if not args.major_delimiter or len(args.major_delimiter) == 0:
print("Error: major delimiter cannot be empty. Using default ({}).".format(
DEFAULT_MAJOR_DELIMITER))
args.major_delimiter = DEFAULT_MAJOR_DELIMITER
elif not args.minor_delimiter or len(args.minor_delimiter) == 0:
print("Error: minor_delimiter cannot be empty, Using default ({}).".format(
DEFAULT_MINOR_DELIMITER))
elif args.major_delimiter == args.minor_delimiter:
print("Error: major and minor delimiter cannot be the same. Using defaults ({}, {}).".format(
DEFAULT_MAJOR_DELIMITER, DEFAULT_MINOR_DELIMITER))
args.major_delimiter = DEFAULT_MAJOR_DELIMITER
args.minor_delimiter = DEFAULT_MINOR_DELIMITER
# Check sample count.
if args.sample_count < 10000:
print("Warning: setting sample size < 10000 is strongly discouraged. "
"Errors and unspecified behavior may occur.")
return args
# Parse the input file.
def parseInput(inPath, major_del=DEFAULT_MAJOR_DELIMITER,
minor_del=DEFAULT_MINOR_DELIMITER, verbose=False):
with open(inPath, "r") as inFile:
for realLine in inFile:
line = realLine.strip()
# Comment or empty.
if len(line) == 0 or line.startswith("#"):
continue
tokens = re.split(major_del, line, maxsplit=1)
if len(tokens) < 2:
printBadLine(line)
elif len(tokens[1]) < 1:
printBadLine(line, custom="Empty value.")
key = tokens[0].lower()
values = [stripQuotes(v) for v in re.split(minor_del, tokens[1])]
parseLine(key, values, line, config)
# Check for incomplete entries.
incompletes = []
for k, v in config.items():
if v == None:
incompletes.append(k)
if (k == "disttype" and v == "gaussian"
and not "distribution" in config.keys()):
incompletes.append("distribution required when disttype is "
"gaussian")
if verbose:
print("{}={}".format(k, str(v)))
if len(incompletes) > 0:
print("\nError: input file is missing the following specifications:")
for k in incompletes:
print(" - {}".format(k))
exit()
return config
# Evaluate a single input line.
def parseLine(key, values, line, config):
# Keys for which lists are valid.
if key in multiValidators.keys():
parseListHelper(key, values, line, config)
else:
parseSingleHelper(key, values, line, config)
# Nonlist helper for parseLine.
def parseSingleHelper(key, values, line, config):
if len(values) != 1:
printBadLine(line, custom='Key "{}" accepts exactly one value.'.format(
key))
value = values[0]
# Validate the key exists.
validateKey(key, line, config)
# Validate the key according to its specific criteria.
try:
# Short-circuits if key is unchecked.
if key in validators.keys() and not validators[key](value):
printBadLine(line, custom=messages[key])
except ValueError as e:
printBadLine(line, custom=messages[key])
config[key] = (processors[key](value) if key in processors.keys() else value)
# List helper for parseLine.
def parseListHelper(key, values, line, config):
# Distribution and openflags are special and get added only if present.
if key == "distribution":
if not config["hotspotnum"] or not config["disttype"]:
printBadLine(line,
custom='Keys "hotspotnum" and "disttype" must be specified before "distribution".')
config["distribution"] = None
if key == "openflags":
config["openflags"] = None
# Validate the key exists.
validateKey(key, line, config)
# Validate the key according to its specific criteria.
try:
# Short-circuits if key is unchecked.
if key in multiValidators.keys() and not multiValidators[key](values):
printBadLine(line, custom=multiMessages[key])
except ValueError as e:
printBadLine(line, custom=multiMessages[key])
config[key] = (multiProcessors[key](values) if key in multiProcessors.keys() else values)
# Validate a key.
def validateKey(key, line, config):
# Check to make sure the key is recognized.
if key not in config:
printBadLine(line, "Unrecognized key: {}.".format(key))
# Check to make sure the key isn't a duplicate.
if config[key]:
printBadLine(line, "Duplicate specification.")
# Strip quotes.
def stripQuotes(string):
if re.match(r"^[\"\'].*[\"\']$", string):
return string[1:-1]
else:
# Nothing to do.
return string
# Print uninterpretable line error and exit.
def printBadLine(line, custom=None):
print('Error: bad input line "{}".{}'.format(line,
" {}".format(custom) if custom else ""))
exit()
# Create a header comment block.
def makeCommentHeader(header):
wrapper = textwrap.TextWrapper(
width=70, initial_indent="# ", subsequent_indent="# ",
expand_tabs=True, drop_whitespace=True, fix_sentence_endings=False,
break_long_words=True, break_on_hyphens=True)
return ["#\n"] + wrapper.wrap(header) + ["\n#\n"]
# Create general configuration lines.
def makeGeneral(config):
genList = []
for k in ["dedupratio", "dedupunit", "compratio"]:
genList.append("{}={}\n".format(k, truncate(config[k])))
return genList
# Create storage definitions.
def makeSDs(config):
sdList = []
i = 0
for lun in config["luns"]:
i += 1
sdf = SDFactory()
sdf.setName("sd{}".format(i))
sdf.set("lun", lun)
if "openflags" in config:
sdf.appendOpenFlags(config["openflags"])
sdList.append(sdf.toString() + "\n")
return sdList
# Create skews -- percentage of allotted hotspot IO percentage that goes to
# each hotspot.
def makeSkews(args, config, graph=False):
skews = []
mode = config["disttype"]
hsCount = config["hotspotnum"]
wdCount = config["wdcount"]
totalCount = hsCount + wdCount
ioPct = config["hotspotiopct"]
# Even.
if mode == "even":
skews = [ioPct / totalCount] * hsCount
# Graph if requested.
graphSkews(config, mode, skews)
# Gaussian
elif mode == "gaussian":
sigma = config["distribution"][0]
skews = makeGaussianSkews(sigma, args.sample_count, hsCount, ioPct)
# Graph if requested.
graphSkews(config, mode, skews, sigma=sigma)
# Uniform
elif mode == "uniform":
skews = [random.random() for i in range(hsCount)]
skewSum = sum(skews)
skews = [ioPct * s / skewSum for s in skews]
# Graph if requested.
graphSkews(config, mode, skews)
for s in skews:
assert s > 0, "Size 0 skew generated."
# Shuffle.
if not args.no_shuffle:
random.shuffle(skews)
return skews
# Use Monte Carlo method to determine the skews for the hotspots by generating
# sampleCount samples in the Gaussian distribution f(X | mu, sigma^2) and
# determining what the probability is of a sample ending up in each bucket.
#
# @param dev Standard deviation for the normal distribution.
# @param sampleCount Number of samples to generate.
# @param hSCount Number of hotspots to generate.
# @param ioPct Percentage of total IOs to split among skews (ioPct * skew).
def makeGaussianSkews(sigma, sampleCount, hsCount, ioPct):
# Generate many samples sorted by natural ordering.
samples = getGaussianSamples(0.0, sigma, sampleCount)
# Determine number of samples in each bucket.
buckets = getBucketCounts(samples, -1.0, 1.0, hsCount)
# Determine skews.
bucketSum = sum(buckets)
skews = [ioPct * (b / bucketSum) for b in buckets]
return skews
# Helper function for the Gaussian sampling procedures that returns the buckets
# for a given *sorted* collection of samples.
#
# @param samples Sorted list of samples to count for the buckets.
# @param distMin Minimum sample value we care about. Samples < distMin are
# ignored.
# @param distMax Maximum sample value we care about. Samples >= distMax are
# ignored.
# @param bucketCount Number of buckets to generate.
# @return List of buckets containing the number of samples in each.
def getBucketCounts(samples, distMin, distMax, bucketCount):
distRange = abs(distMax - distMin)
assert distRange > 0, "distRange = 0"
assert bucketCount > 0, "bucketCount = 0"
distStride = float(distRange) / bucketCount
# Calculate bucket boundaries. We do it this way instead of using
# np.arrange because we care more about the list being the right length
# than about small floating point errors.
bucketBounds = [distMin + (distStride * i) for i in range(bucketCount)]
# Sanity check to make sure we generated the right number of buckets.
assert len(bucketBounds) == bucketCount, "Generated wrong number of buckets"
# Determine number of samples in each bucket.
buckets = []
for bucketIt in range(len(bucketBounds)):
buckets.append(0)
bucketMin = bucketBounds[bucketIt]
bucketMax = distMax if bucketIt >= len(bucketBounds)-1 else bucketBounds[bucketIt + 1]
# Slow but reliable.
buckets[bucketIt] = len(list(filter(lambda s: bucketMin <= s < bucketMax,
samples)))
return buckets
# Helper function for generating a sorted list of samples from a Gaussian
# distribution.
def getGaussianSamples(mu, sigma, sampleCount):
samples = np.random.normal(mu, sigma, sampleCount)
samples.sort()
return samples
# Graph Gaussian skews histogram using matplotlib.
#
# @param skews Skews to graph.
# @param binCount Number of bins for the histogram.
# @param mu Mean of the distribution. If None, will be determined automatically
# from the mean of samples.
# @param sigma Standard deviation of the distribution. If None, will be
# determined automatically from the standard deviation of samples.
def graphGaussianSkews(skews, binCount, sigma=None):
barX = np.arange(len(skews))
barY = np.array(skews)
plt.bar(barX, barY, 1, color="blue")
plt.draw()
# Graph a bar chart of skews using matplotlib.
#
# @param skews Skews to graph.
# @param yLim Max value for y-axis.
def graphSkewBars(skews, yLim):
x = range(len(skews))
y = np.array(skews)
width = 1
plt.bar(x, y, width, color="blue")
plt.draw()
# Graph the skews distribution using matplotlib.
#
# @param config Configuration dictionary.
# @param mode Distribution type: "gaussian" or "even".
# @param skews Skews distribution.
# @param sigma Standard deviation of the distribution.
def graphSkews(config, mode, skews, sigma=None):
plt.figure(1)
fig = plt.figure(1)
fig.suptitle("Skews - mode: {}".format(mode))
plt.xlabel("Hotspot number")
plt.ylabel("Percentage of IOs")
graphSkewBars(skews, config['hotspotiopct'])
# Graph the ranges distribution histogram using matplotlib.
#
# @param config Configuration dictionary.
# @param ranges Range distribution.
# @param skews Skews distribution.
def graphRanges(config, ranges, skews):
plt.figure(2)
fig = plt.figure(2)
fig.suptitle("Ranges - mode: {}".format(config["disttype"]))
plt.xlabel("Disk location")
plt.ylabel("Percentage of IOs")
count = config["hotspotnum"]
ioPct = config["hotspotiopct"]
# Sanity check.
assert count == len(ranges) == len(skews), "The number of ranges or skews is incorrect."
triplets = []
for i in range(count):
x = ranges[i][0]
y = skews[i]
w = ranges[i][1]-x
triplets.append((x, y, w))
triplets.sort(key=lambda t: t[0])
x = np.array([triplets[i][0] for i in range(count)])
y = np.array([triplets[i][1] for i in range(count)])
widths = np.array([triplets[i][2] for i in range(count)])
plt.bar(x, y, widths, color="blue")
plt.draw()
pylab.xlim([0, 100])
# Use Monte Carlo method to determine the ranges for the hotspots by generating
# 2 * sampleCount samples in the Gaussian distribution f(X | mu, sigma^2) and
# counting the samples that sit above the mean.
#
# @param dev Standard deviation for the normal distribution.
# @param halfCount One-half the number of samples to generate. This number
# will be doubled so that 2 * halfCount samples are generated.
# @param hSCount Number of hotspots to generate.
# @param capacity Percentage of total capacity to split among all hotspots.
def getGaussianRangeComponents(sigma, halfCount, hsCount, capacity, percentDisk):
# Calculate sizes via uniform-random sampling.
sizes = [random.random() for i in range(hsCount)]
partSum = sum(sizes)
for i in range(len(sizes)):
sizes[i] = capacity * (sizes[i] / partSum)
# Calculate positions from a Gaussian distribution. In order to skew
# the positions near the beginning of the disk, we generate double the
# number of input samples and only count those that are above the mean.
sampleCount = 2 * halfCount
# Generate many samples sorted by natural ordering.
samples = getGaussianSamples(0.0, sigma, sampleCount)
buckets = getBucketCounts(samples, 0.0, 1.0, hsCount)
buckets.reverse()
# Determine start positions.
bucketSum = sum(buckets)
positions = []
for i in range(hsCount):
b = percentDisk * buckets[i] / bucketSum
if i > 0:
b += positions[i-1]
if b < 0.0:
b = 0.0
elif b > percentDisk:
b = percentDisk
positions.append(b)
# Sanity check to make sure we generated the same number of sizes and
# positions.
assert len(sizes) == len(positions) == hsCount, "Generated an unequal number of sizes and positions."
return sizes, positions
# Helper function for assembling ranges from sizes and positions.
def assembleRanges(sizes, positions, hsCount, capacity, percentDisk,
noShuffle=False):
ranges = []
hsSum = 0
# We deliberately don't break out of the loop early in the event of
# overflow. We could do that, but it would result in our generating the
# wrong number of ranges, which would cause problems later. Instead,
# we let the loop continue, which will generate ranges of size 0 for all
# those after the overflow occurred.
for i in range(hsCount):
r = assembleRangesHelper(ranges, positions[i], sizes[i], capacity,
percentDisk, hsSum)
ranges.append(r)
hsSum += r[1] - r[0]
# Need to resort the ranges after each iteration in case we added one
# out of order.
ranges.sort(key=lambda r: r[0])
if not noShuffle:
random.shuffle(ranges)
return ranges
# Helper for assembleRanges that tries to construct a new range according to
# specifications.
#
# @param ranges Currently allotted ranges.
# @param size Size of the range to generate.
# @param capacity Maximum size of sum of all hotspots.
# @param percentDisk Maximum percent of disk we're allowed to use.
# @param hsSum The current sum of all hotspot sizes.
def assembleRangesHelper(ranges, position, size, capacity, percentDisk, hsSum):
if hsSum + size > capacity:
size = capacity - hsSum
hsSize = formatRangeVal(size)
assert hsSize > 0.0, "Size 0 hotspot generated."
hsStart = formatRangeVal(position)
hsEnd = formatRangeVal(hsStart + size)
if checkRangeConflicts(ranges, hsStart, hsEnd):
hsStart = ranges[-1][1]
hsEnd = formatRangeVal(hsStart + size)
# We exceeded the capacity. Try to insert it at a (uniform) random position.
if not checkRangeVals(hsStart, hsEnd, percentDisk):
tries = 0
while True:
hsStart = formatRangeVal(random.uniform(0.0, percentDisk))
hsEnd = formatRangeVal(hsStart + size)
if (checkRangeVals(hsStart, hsEnd, percentDisk) and not
checkRangeConflicts(ranges, hsStart, hsEnd)):
break
tries += 1
if tries >= MAX_RANGE_RETRIES:
raise Exception("Error: unable to generate random non-overlapping range within {} tries.".format(
MAX_RANGE_RETRIES))
# Sanity check.
assert checkRangeVals(hsStart, hsEnd, percentDisk), "Generated an invalid range: ({},{}). Allowed percentage of disk: {}.".format(
hsStart, hsEnd, percentDisk)
return (hsStart, hsEnd)
# Format a range value by truncating it to two decimal places.
def formatRangeVal(val):
return float(truncate(val))
# Check if range is valid stand-alone, but does not check for conflicts.
def checkRangeVals(start, end, percentDisk):
return 0.0 <= start < end <= percentDisk
# Check for range conflict.
#
# @param ranges Currently allotted ranges.
# @param start Start of range.
# @param end End of range.
def checkRangeConflicts(ranges, start, end):
for r in ranges:
if r[0] <= start < r[1] or r[0] <= end < r[1]:
return True
return False
# Make uniform random ranges.
def makeUniformRanges(hsCount, hsSpace, percentDisk):
sizes = []
positions = []
for i in range(hsCount):
sizes.append(random.random())
positions.append(random.uniform(0.0, percentDisk))
positions.sort()
sizeSum = sum(sizes)
sizes = [hsSpace * s / sizeSum for s in sizes]
return assembleRanges(sizes, positions, hsCount, hsSpace, percentDisk)
# Create hotspot range distribution.
def makeRanges(args, config):
ranges = []
mode = config["disttype"]
hsCount = config["hotspotnum"]
wdCount = config["wdcount"]
totalCount = hsCount + wdCount
hsSpace = config["hotspotcap"]
percentDisk = config["percentdisk"]
# Even
if mode == "even":
width = hsSpace / hsCount
freeSpace = percentDisk - hsSpace
gapCount = hsCount + 1
gapWidth = freeSpace / gapCount
stride = width + gapWidth
for i in range(hsCount):
start = (i * stride) + gapWidth
end = start + width
ranges.append((start, end))
# Gaussian
elif mode == "gaussian":
sigma = config["distribution"][1]
sizes, positions = getGaussianRangeComponents(sigma, args.sample_count,
hsCount, hsSpace, percentDisk)
ranges = assembleRanges(sizes, positions, hsCount,
hsSpace, config["percentdisk"], noShuffle=args.no_shuffle)
# Uniform random
elif mode == "uniform":
ranges = makeUniformRanges(hsCount, hsSpace, percentDisk)
return ranges
# Make workload definitions.
def makeWDs(args, config):
wdList = []
wdCount = config["wdcount"]
hsCount = config["hotspotnum"]
percentDisk = config["percentdisk"]
skews = makeSkews(args, config)
ranges = makeRanges(args, config)
# Setup range graph if requested.
if args.graph_ranges:
graphRanges(config, ranges, skews)
total = wdCount + hsCount
for i in range(total):
wdf = WDFactory()
wdf.setName("wd{}".format(i+1))
wdf.set("sd", "sd*")
wdf.set("xfersize", config["xfersize"])
wdf.set("seekpct", config["seekpct"])
wdf.set("rdpct", config["rdpct"])
if percentDisk != 100.0:
wdf.addRange((0, percentDisk))
# Hotspot.
if i >= wdCount:
j = i - wdCount - 1
wdf.set("skew", skews[j])
wdf.addRange(ranges[j])
wdList.append(wdf.toString() + "\n")
return wdList
# Make run definitions.
def makeRDs(config):
rdList = []
# There's only one RD per file, so the list is more for consistency.
rdf = RDFactory()
rdf.setName("rd1")
rdf.set("wd", "wd*")
rdf.set("iorate", config["iorate"])
rdf.set("format", config["format"])
rdf.set("threads", config["threads"])
rdf.set("elapsed", config["elapsed"])
rdf.set("interval", config["interval"])
rdList.append(rdf.toString() + "\n")
return rdList
# Build the output configuration file.
def buildOutput(args, config, verbose=False):
outPath = args.outPath
if args.no_overwrite and os.path.exists(outPath):
i = 0
while os.path.exists(outPath):
i += 1
outPath = "{} ({})".format(args.outPath, str(i))
with open(outPath, "w") as outFile:
print("\nOutput saved as {}".format(outPath))
if args.header:
outFile.writelines(makeCommentHeader(args.header))
# General
lines = makeGeneral(config)
outFile.writelines(lines)
# SDs
lines = makeSDs(config)
outFile.writelines(lines)
# WDs and distribution
lines = makeWDs(args, config)
outFile.writelines(lines)
# RDs
lines = makeRDs(config)
outFile.writelines(lines)
# If the input is a floating point number, truncate it to three decimal places.
# If it's an integer (its fractional portion is 0), return it as an integer.
# Else return it unchanged.
def truncate(f):
if isinstance(f, float):
if f.is_integer():
return int(f)
return "{0:.2f}".format(f)
return f
# Create an input file template.
def makeTemplate():
templatePath = "vdbsetup_input_template.txt"
with open(templatePath, "w") as f:
f.write(INPUT_TEMPLATE_CONTENT)
print('Input file example saved as "{}".'.format(templatePath))
# Main.
def main():
args = getArgs()
if args.verbose:
print("Verbose logging enabled.\n")
if args.make_template:
try:
makeTemplate()
except IOError as e:
raise e
finally:
return
try:
config = parseInput(args.inPath, major_del=args.major_delimiter,
minor_del=args.minor_delimiter, verbose=args.verbose)
except IOError as e:
raise e
try:
buildOutput(args, config, verbose=args.verbose)
except IOError as e:
raise e
if args.graph_skews or args.graph_ranges:
plt.show()
if __name__ == "__main__":
main()
|
TheOtherOtherOperation/vdbsetup
|
vdbsetup.py
|
Python
|
mit
| 34,423
|
[
"Gaussian"
] |
505db2d22187e27df5a4e8e4182a8922634619316585fdc612642bad63250e4e
|
from electrum_NMC.util import print_error
import httplib, urllib
import socket
import threading
import hashlib
import json
from urlparse import urlparse, parse_qs
try:
import PyQt4
except Exception:
sys.exit("Error: Could not import PyQt4 on Linux systems, you may try 'sudo apt-get install python-qt4'")
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
import aes
import base64
import electrum_NMC
from electrum_NMC.plugins import BasePlugin, hook
from electrum_NMC.i18n import _
from electrum_NMC_gui.qt import HelpButton, EnterButton
class Plugin(BasePlugin):
target_host = 'labelectrum.herokuapp.com'
encode_password = None
def fullname(self):
return _('Label Sync')
def description(self):
return '%s\n\n%s%s%s' % (_("This plugin can sync your labels across multiple Electrum installs by using a remote database to save your data. Labels, transactions ids and addresses are encrypted before they are sent to the remote server. This code might increase the load of your wallet with a few microseconds as it will sync labels on each startup."), _("To get started visit"), " http://labelectrum.herokuapp.com/ ", _(" to sign up for an account."))
def version(self):
return "0.2.1"
def encode(self, message):
encrypted = electrum.bitcoin.aes_encrypt_with_iv(self.encode_password, self.iv, message.encode('utf8'))
encoded_message = base64.b64encode(encrypted)
return encoded_message
def decode(self, message):
decoded_message = electrum.bitcoin.aes_decrypt_with_iv(self.encode_password, self.iv, base64.b64decode(message)).decode('utf8')
return decoded_message
@hook
def init_qt(self, gui):
self.window = gui.main_window
if not self.auth_token(): # First run, throw plugin settings in your face
self.load_wallet(self.window.wallet)
if self.settings_dialog():
self.set_enabled(True)
return True
else:
self.set_enabled(False)
return False
@hook
def load_wallet(self, wallet):
self.wallet = wallet
mpk = self.wallet.get_master_public_key()
self.encode_password = hashlib.sha1(mpk).digest().encode('hex')[:32]
self.iv = hashlib.sha256(self.encode_password).digest()[:16]
self.wallet_id = hashlib.sha256(mpk).digest().encode('hex')
addresses = []
for account in self.wallet.accounts.values():
for address in account.get_addresses(0):
addresses.append(address)
self.addresses = addresses
if self.auth_token():
# If there is an auth token we can try to actually start syncing
threading.Thread(target=self.do_full_pull).start()
def auth_token(self):
return self.config.get("plugin_label_api_key")
def is_available(self):
return True
def requires_settings(self):
return True
@hook
def set_label(self, item,label, changed):
if self.encode_password is None:
return
if not changed:
return
try:
bundle = {"label": {"external_id": self.encode(item), "text": self.encode(label)}}
params = json.dumps(bundle)
connection = httplib.HTTPConnection(self.target_host)
connection.request("POST", ("/api/wallets/%s/labels.json?auth_token=%s" % (self.wallet_id, self.auth_token())), params, {'Content-Type': 'application/json'})
response = connection.getresponse()
if response.reason == httplib.responses[httplib.NOT_FOUND]:
return
response = json.loads(response.read())
except socket.gaierror as e:
print_error('Error connecting to service: %s ' % e)
return False
def settings_widget(self, window):
return EnterButton(_('Settings'), self.settings_dialog)
def settings_dialog(self):
def check_for_api_key(api_key):
if api_key and len(api_key) > 12:
self.config.set_key("plugin_label_api_key", str(self.auth_token_edit.text()))
self.upload.setEnabled(True)
self.download.setEnabled(True)
self.accept.setEnabled(True)
else:
self.upload.setEnabled(False)
self.download.setEnabled(False)
self.accept.setEnabled(False)
d = QDialog()
layout = QGridLayout(d)
layout.addWidget(QLabel("API Key: "),0,0)
self.auth_token_edit = QLineEdit(self.auth_token())
self.auth_token_edit.textChanged.connect(check_for_api_key)
layout.addWidget(QLabel("Label sync options: "),2,0)
layout.addWidget(self.auth_token_edit, 0,1,1,2)
decrypt_key_text = QLineEdit(self.encode_password)
decrypt_key_text.setReadOnly(True)
layout.addWidget(decrypt_key_text, 1,1)
layout.addWidget(QLabel("Decryption key: "),1,0)
layout.addWidget(HelpButton("This key can be used on the LabElectrum website to decrypt your data in case you want to review it online."),1,2)
self.upload = QPushButton("Force upload")
self.upload.clicked.connect(self.full_push)
layout.addWidget(self.upload, 2,1)
self.download = QPushButton("Force download")
self.download.clicked.connect(self.full_pull)
layout.addWidget(self.download, 2,2)
c = QPushButton(_("Cancel"))
c.clicked.connect(d.reject)
self.accept = QPushButton(_("Done"))
self.accept.clicked.connect(d.accept)
layout.addWidget(c,3,1)
layout.addWidget(self.accept,3,2)
check_for_api_key(self.auth_token())
self.window.labelsChanged.connect(self.done_processing)
if d.exec_():
return True
else:
return False
def done_processing(self):
QMessageBox.information(None, _("Labels synchronised"), _("Your labels have been synchronised."))
def full_push(self):
threading.Thread(target=self.do_full_push).start()
def full_pull(self):
threading.Thread(target=self.do_full_pull, args=([True])).start()
def do_full_push(self):
try:
bundle = {"labels": {}}
for key, value in self.wallet.labels.iteritems():
try:
encoded_key = self.encode(key)
except:
print_error('cannot encode', repr(key))
continue
try:
encoded_value = self.encode(value)
except:
print_error('cannot encode', repr(value))
continue
bundle["labels"][encoded_key] = encoded_value
params = json.dumps(bundle)
connection = httplib.HTTPConnection(self.target_host)
connection.request("POST", ("/api/wallets/%s/labels/batch.json?auth_token=%s" % (self.wallet_id, self.auth_token())), params, {'Content-Type': 'application/json'})
response = connection.getresponse()
if response.reason == httplib.responses[httplib.NOT_FOUND]:
print_error('404 error' % e)
return
try:
response = json.loads(response.read())
except ValueError as e:
print_error('Error loading labelsync response: %s' % e)
return False
if "error" in response:
print_error('Error loading labelsync response.')
return False
except socket.gaierror as e:
print_error('Error connecting to service: %s ' % e)
return False
self.window.labelsChanged.emit()
def do_full_pull(self, force = False):
connection = httplib.HTTPConnection(self.target_host)
connection.request("GET", ("/api/wallets/%s/labels.json?auth_token=%s" % (self.wallet_id, self.auth_token())),"", {'Content-Type': 'application/json'})
response = connection.getresponse()
if response.status != 200:
print_error("Cannot retrieve labels:", response.status, response.reason)
return
response = json.loads(response.read())
if "error" in response:
raise BaseException(_("Could not sync labels: %s" % response["error"]))
for label in response:
try:
key = self.decode(label["external_id"])
except:
continue
try:
value = self.decode(label["text"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
print_error('error: no json', key)
continue
if force or not self.wallet.labels.get(key):
self.wallet.labels[key] = value
self.wallet.storage.put('labels', self.wallet.labels)
print_error("received %d labels"%len(response))
self.window.labelsChanged.emit()
|
testalt/electrum-NMC
|
plugins/labels.py
|
Python
|
gpl-3.0
| 9,152
|
[
"VisIt"
] |
e52447b84b90895c317978c33750ff0f802d9d6a071acacc9061e0eec24b3bff
|
#--------------------------------------------------------------------------
# Software: InVesalius - Software de Reconstrucao 3D de Imagens Medicas
# Copyright: (C) 2001 Centro de Pesquisas Renato Archer
# Homepage: http://www.softwarepublico.gov.br
# Contact: invesalius@cti.gov.br
# License: GNU - GPL 2 (LICENSE.txt/LICENCA.txt)
#--------------------------------------------------------------------------
# Este programa e software livre; voce pode redistribui-lo e/ou
# modifica-lo sob os termos da Licenca Publica Geral GNU, conforme
# publicada pela Free Software Foundation; de acordo com a versao 2
# da Licenca.
#
# Este programa eh distribuido na expectativa de ser util, mas SEM
# QUALQUER GARANTIA; sem mesmo a garantia implicita de
# COMERCIALIZACAO ou de ADEQUACAO A QUALQUER PROPOSITO EM
# PARTICULAR. Consulte a Licenca Publica Geral GNU para obter mais
# detalhes.
#--------------------------------------------------------------------------
import os.path
import platform
import psutil
import sys
import wx
import itertools
from invesalius import utils
from invesalius import inv_paths
#from invesalius.project import Project
INVESALIUS_VERSION = "3.1.99997"
INVESALIUS_ACTUAL_FORMAT_VERSION = 1.1
#---------------
# Measurements
MEASURE_NAME_PATTERN = _("M %d")
MEASURE_LINEAR = 101
MEASURE_ANGULAR = 102
DEFAULT_MEASURE_COLOUR = (1,0,0)
DEFAULT_MEASURE_BG_COLOUR = (250/255.0, 247/255.0, 218/255.0)
DEFAULT_MEASURE_RADIUS = 1
DEFAULT_MEASURE_TYPE = MEASURE_LINEAR
PROP_MEASURE = 0.8
STEREO_OFF = _(" Off")
STEREO_RED_BLUE = _("Red-blue")
STEREO_CRISTAL = _("CristalEyes")
STEREO_INTERLACED = _("Interlaced")
STEREO_LEFT = _("Left")
STEREO_RIGHT = _("Right")
STEREO_DRESDEN = _("Dresden")
STEREO_CHECKBOARD = _("Checkboard")
STEREO_ANAGLYPH = _("Anaglyph")
# VTK text
TEXT_SIZE_SMALL = 11
TEXT_SIZE = 12
TEXT_SIZE_LARGE = 16
TEXT_SIZE_EXTRA_LARGE = 20
TEXT_SIZE_DIST_NAV = 32
TEXT_COLOUR = (1,1,1)
(X,Y) = (0.03, 0.97)
(XZ, YZ) = (0.05, 0.93)
TEXT_POS_LEFT_UP = (X, Y)
#------------------------------------------------------------------
TEXT_POS_LEFT_DOWN = (X, 1-Y) # SetVerticalJustificationToBottom
TEXT_POS_LEFT_DOWN_ZERO = (X, 1-YZ)
#------------------------------------------------------------------
TEXT_POS_RIGHT_UP = (1-X, Y) # SetJustificationToRight
#------------------------------------------------------------------
TEXT_POS_RIGHT_DOWN = (1-X, 1-Y) # SetVerticalJustificationToBottom &
# SetJustificationToRight
#------------------------------------------------------------------
TEXT_POS_HCENTRE_DOWN = (0.5, 1-Y) # SetJustificationToCentered
# ChildrticalJustificationToBottom
TEXT_POS_HCENTRE_DOWN_ZERO = (0.5, 1-YZ)
#------------------------------------------------------------------
TEXT_POS_HCENTRE_UP = (0.5, Y) # SetJustificationToCentered
#------------------------------------------------------------------
TEXT_POS_VCENTRE_RIGHT = (1-X, 0.5) # SetVerticalJustificationToCentered
# SetJustificationToRight
TEXT_POS_VCENTRE_RIGHT_ZERO = (1-XZ, 0.5)
#------------------------------------------------------------------
TEXT_POS_VCENTRE_LEFT = (X, 0.5) # SetVerticalJustificationToCentered
#------------------------------------------------------------------
# Slice orientation
AXIAL = 1
CORONAL = 2
SAGITAL = 3
VOLUME = 4
SURFACE = 5
AXIAL_STR="AXIAL"
CORONAL_STR="CORONAL"
SAGITAL_STR="SAGITAL"
# Measure type
LINEAR = 6
ANGULAR = 7
DENSITY_ELLIPSE = 8
DENSITY_POLYGON = 9
# Colour representing each orientation
ORIENTATION_COLOUR = {'AXIAL': (1,0,0), # Red
'CORONAL': (0,1,0), # Green
'SAGITAL': (0,0,1)} # Blue
IMPORT_INTERVAL = [_("Keep all slices"), _("Skip 1 for each 2 slices"),
_("Skip 2 for each 3 slices"), _("Skip 3 for each 4 slices"),
_("Skip 4 for each 5 slices"),_("Skip 5 for each 6 slices")]
# Camera according to slice's orientation
#CAM_POSITION = {"AXIAL":(0, 0, 1), "CORONAL":(0, -1, 0), "SAGITAL":(1, 0, 0)}
#CAM_VIEW_UP = {"AXIAL":(0, 1, 0), "CORONAL":(0, 0, 1), "SAGITAL":(0, 0, 1)}
AXIAL_SLICE_CAM_POSITION = {"AXIAL":(0, 0, 1), "CORONAL":(0, -1, 0), "SAGITAL":(1, 0, 0)}
AXIAL_SLICE_CAM_VIEW_UP = {"AXIAL":(0, 1, 0), "CORONAL":(0, 0, 1), "SAGITAL":(0, 0, 1)}
SAGITAL_SLICE_CAM_POSITION = {"AXIAL":(0, 0, 1), "CORONAL":(0, 1, 0), "SAGITAL":(-1, 0, 0)}
SAGITAL_SLICE_CAM_VIEW_UP = {"AXIAL":(0, -1, 0), "CORONAL":(0, 0, 1), "SAGITAL":(0, 0, 1)}
CORONAL_SLICE_CAM_POSITION = {"AXIAL":(0, 0, 1), "CORONAL":(0, 1, 0), "SAGITAL":(-1, 0, 0)}
CORONAL_SLICE_CAM_VIEW_UP = {"AXIAL":(0, -1, 0), "CORONAL":(0, 0, 1), "SAGITAL":(0, 0, 1)}
SLICE_POSITION = {AXIAL:[AXIAL_SLICE_CAM_VIEW_UP, AXIAL_SLICE_CAM_POSITION],
SAGITAL:[SAGITAL_SLICE_CAM_VIEW_UP, SAGITAL_SLICE_CAM_POSITION],
CORONAL:[CORONAL_SLICE_CAM_VIEW_UP, CORONAL_SLICE_CAM_POSITION]}
#Project Status
#NEW_PROJECT = 0
#OPEN_PROJECT = 1
#CHANGE_PROJECT = 2
#SAVE_PROJECT = 3
PROJ_NEW = 0
PROJ_OPEN = 1
PROJ_CHANGE = 2
PROJ_CLOSE = 3
PROJ_MAX = 4
####
MODE_RP = 0
MODE_NAVIGATOR = 1
MODE_RADIOLOGY = 2
MODE_ODONTOLOGY = 3
#Crop box sides code
AXIAL_RIGHT = 1
AXIAL_LEFT = 2
AXIAL_UPPER = 3
AXIAL_BOTTOM = 4
SAGITAL_RIGHT = 5
SAGITAL_LEFT = 6
SAGITAL_UPPER = 7
SAGITAL_BOTTOM = 8
CORONAL_RIGHT = 9
CORONAL_LEFT = 10
CORONAL_UPPER = 11
CORONAL_BOTTOM = 12
CROP_PAN = 13
#Color Table from Slice
#NumberOfColors, SaturationRange, HueRange, ValueRange
SLICE_COLOR_TABLE = {_("Default "):(None,(0,0),(0,0),(0,1)),
_("Hue"):(None,(1,1),(0,1),(1,1)),
_("Saturation"):(None,(0,1),(0.6,0.6),(1,1)),
_("Desert"):(256, (1,1), (0, 0.1), (1,1)),
_("Rainbow"):(256,(1,1),(0,0.8),(1,1)),
_("Ocean"):(256,(1,1),(0.667, 0.5),(1,1)),
_("Inverse Gray"):(256, (0, 0), (0, 0), (1,0)),
}
# Volume view angle
VOL_FRONT = wx.NewId()
VOL_BACK = wx.NewId()
VOL_RIGHT = wx.NewId()
VOL_LEFT = wx.NewId()
VOL_TOP = wx.NewId()
VOL_BOTTOM = wx.NewId()
VOL_ISO = wx.NewId()
# Camera according to volume's orientation
AXIAL_VOLUME_CAM_VIEW_UP = {VOL_FRONT:(0,0,1), VOL_BACK:(0,0,1), VOL_RIGHT:(0,0,1),\
VOL_LEFT:(0,0,1), VOL_TOP:(0,1,0), VOL_BOTTOM:(0,-1,0),\
VOL_ISO:(0,0,1)}
AXIAL_VOLUME_CAM_POSITION = {VOL_FRONT:(0,-1,0), VOL_BACK:(0,1,0), VOL_RIGHT:(-1,0,0),\
VOL_LEFT:(1,0,0), VOL_TOP:(0,0,1), VOL_BOTTOM:(0,0,-1),\
VOL_ISO:(0.5,-1,0.5)}
SAGITAL_VOLUME_CAM_VIEW_UP = {VOL_FRONT:(0,-1,0), VOL_BACK:(0,-1,0), VOL_RIGHT:(0,-1,1),\
VOL_LEFT:(0,-1,1), VOL_TOP:(1,-1,0), VOL_BOTTOM:(-1,1,0),\
VOL_ISO:(0,-1,0)}
SAGITAL_VOLUME_CAM_POSITION = {VOL_FRONT:(-1,0,0), VOL_BACK:(1,0,0), VOL_RIGHT:(0,0,1),\
VOL_LEFT:(0,0,-1), VOL_TOP:(0,-1,0), VOL_BOTTOM:(0,1,0),\
VOL_ISO:(-1,-0.5,-0.5)}
CORONAL_VOLUME_CAM_VIEW_UP = {VOL_FRONT:(0,-1,0), VOL_BACK:(0,-1,0), VOL_RIGHT:(0,-1,0),\
VOL_LEFT:(0,-1,0), VOL_TOP:(0,1,0), VOL_BOTTOM:(0,-1,0),\
VOL_ISO:(0,-1,0)}
CORONAL_VOLUME_CAM_POSITION = {VOL_FRONT:(0,0,-1), VOL_BACK:(0,0,1), VOL_RIGHT:(-1,0,0),\
VOL_LEFT:(1,0,0), VOL_TOP:(0,-1,0), VOL_BOTTOM:(0,1,0),\
VOL_ISO:(0.5,-0.5,-1)}
VOLUME_POSITION = {AXIAL: [AXIAL_VOLUME_CAM_VIEW_UP, AXIAL_VOLUME_CAM_POSITION],
SAGITAL: [SAGITAL_VOLUME_CAM_VIEW_UP, SAGITAL_VOLUME_CAM_POSITION],
CORONAL: [CORONAL_VOLUME_CAM_VIEW_UP, CORONAL_VOLUME_CAM_POSITION]}
# Mask threshold options
#proj = Project()
#THRESHOLD_RANGE = proj.threshold_modes[_("Bone")]
THRESHOLD_RANGE = [0,3033]
THRESHOLD_PRESETS_INDEX = _("Bone")
THRESHOLD_HUE_RANGE = (0, 0.6667)
THRESHOLD_INVALUE = 5000
THRESHOLD_OUTVALUE = 0
# Mask properties
MASK_NAME_PATTERN = _("Mask %d")
MASK_OPACITY = 0.40
#MASK_OPACITY = 0.35
MASK_COLOUR = [[0.33, 1, 0.33],
[1, 1, 0.33],
[0.33, 0.91, 1],
[1, 0.33, 1],
[1, 0.68, 0.33],
[1, 0.33, 0.33],
[0.33333333333333331, 0.33333333333333331, 1.0],
#(1.0, 0.33333333333333331, 0.66666666666666663),
[0.74901960784313726, 1.0, 0.0],
[0.83529411764705885, 0.33333333333333331, 1.0]]#,
#(0.792156862745098, 0.66666666666666663, 1.0),
#(1.0, 0.66666666666666663, 0.792156862745098), # too "light"
#(0.33333333333333331, 1.0, 0.83529411764705885),#],
#(1.0, 0.792156862745098, 0.66666666666666663),
#(0.792156862745098, 1.0, 0.66666666666666663), # too "light"
#(0.66666666666666663, 0.792156862745098, 1.0)]
MEASURE_COLOUR = itertools.cycle([[1, 0, 0],
[1, 0.4, 0],
[0, 0, 1],
[1, 0, 1],
[0, 0.6, 0]])
SURFACE_COLOUR = [(0.33, 1, 0.33),
(1, 1, 0.33),
(0.33, 0.91, 1),
(1, 0.33, 1),
(1, 0.68, 0.33),
(1, 0.33, 0.33),
(0.33333333333333331, 0.33333333333333331, 1.0),
(1.0, 0.33333333333333331, 0.66666666666666663),
(0.74901960784313726, 1.0, 0.0),
(0.83529411764705885, 0.33333333333333331, 1.0),
(0.792156862745098, 0.66666666666666663, 1.0),
(1.0, 0.66666666666666663, 0.792156862745098),
(0.33333333333333331, 1.0, 0.83529411764705885),
(1.0, 0.792156862745098, 0.66666666666666663),
(0.792156862745098, 1.0, 0.66666666666666663),
(0.66666666666666663, 0.792156862745098, 1.0)]
# Related to slice editor brush
BRUSH_CIRCLE = 0 #
BRUSH_SQUARE = 1
DEFAULT_BRUSH_FORMAT = BRUSH_CIRCLE
BRUSH_DRAW = 0
BRUSH_ERASE = 1
BRUSH_THRESH = 2
BRUSH_THRESH_ERASE = 3
BRUSH_THRESH_ADD_ONLY = 4
BRUSH_THRESH_ERASE_ONLY = 5
DEFAULT_BRUSH_OP = BRUSH_THRESH
BRUSH_OP_NAME = [_("Draw"), _("Erase"), _("Threshold")]
BRUSH_COLOUR = (0,0,1.0)
BRUSH_SIZE = 30
BRUSH_MAX_SIZE = 100
# Surface creation values. Each element's list contains:
# 0: imagedata reformat ratio
# 1: smooth_iterations
# 2: smooth_relaxation_factor
# 3: decimate_reduction
SURFACE_QUALITY = {
_("Low"): (3, 2, 0.3000, 0.4),
_("Medium"): (2, 2, 0.3000, 0.4),
_("High"): (0, 1, 0.3000, 0.1),
_("Optimal *"): (0, 2, 0.3000, 0.4)}
DEFAULT_SURFACE_QUALITY = _("Optimal *")
SURFACE_QUALITY_LIST = [_("Low"),_("Medium"),_("High"),_("Optimal *")]
# Surface properties
SURFACE_TRANSPARENCY = 0.0
SURFACE_NAME_PATTERN = _("Surface %d")
# Imagedata - window and level presets
WINDOW_LEVEL = {_("Abdomen"):(350,50),
_("Bone"):(2000, 300),
_("Brain posterior fossa"):(120,40),
_("Brain"):(80,40),
_("Default"):(None, None), #Control class set window and level from DICOM
_("Emphysema"):(500,-850),
_("Ischemia - Hard, non contrast"):(15,32),
_("Ischemia - Soft, non contrast"):(80,20),
_("Larynx"):(180, 80),
_("Liver"):(2000, -500),
_("Lung - Soft"):(1600,-600),
_("Lung - Hard"):(1000,-600),
_("Mediastinum"):(350,25),
_("Manual"):(None, None), #Case the user change window and level
_("Pelvis"): (450,50),
_("Sinus"):(4000, 400),
_("Vasculature - Hard"):(240,80),
_("Vasculature - Soft"):(650,160),
_("Contour"): (255, 127)}
REDUCE_IMAGEDATA_QUALITY = 0
# PATHS
FS_ENCODE = sys.getfilesystemencoding()
ID_TO_BMP = {VOL_FRONT: [_("Front"), str(inv_paths.ICON_DIR.joinpath("view_front.png"))],
VOL_BACK: [_("Back"), str(inv_paths.ICON_DIR.joinpath("view_back.png"))],
VOL_TOP: [_("Top"), str(inv_paths.ICON_DIR.joinpath("view_top.png"))],
VOL_BOTTOM: [_("Bottom"), str(inv_paths.ICON_DIR.joinpath("view_bottom.png"))],
VOL_RIGHT: [_("Right"), str(inv_paths.ICON_DIR.joinpath("view_right.png"))],
VOL_LEFT: [_("Left"), str(inv_paths.ICON_DIR.joinpath("view_left.png"))],
VOL_ISO:[_("Isometric"), str(inv_paths.ICON_DIR.joinpath("view_isometric.png"))]
}
# if 1, use vtkVolumeRaycastMapper, if 0, use vtkFixedPointVolumeRayCastMapper
TYPE_RAYCASTING_MAPPER = 0
RAYCASTING_FILES = {_("Airways"): "Airways.plist",
_("Airways II"): "Airways II.plist",
_("Black & White"): "Black & White.plist",
_("Bone + Skin"): "Bone + Skin.plist",
_("Bone + Skin II"): "Bone + Skin II.plist",
_("Dark bone"): "Dark Bone.plist",
_("Glossy"): "Glossy.plist",
_("Glossy II"): "Glossy II.plist",
_("Gold bone"): "Gold Bone.plist",
_("High contrast"): "High Contrast.plist",
_("Low contrast"): "Low Contrast.plist",
_("Soft on white"): "Soft on White.plist",
_("Mid contrast"): "Mid Contrast.plist",
_("MIP"): "MIP.plist",
_("No shading"): "No Shading.plist",
_("Pencil"): "Pencil.plist",
_("Red on white"): "Red on White.plist",
_("Skin on blue"): "Skin On Blue.plist",
_("Skin on blue II"): "Skin On Blue II.plist",
_("Soft on white"): "Soft on White.plist",
_("Soft + Skin"): "Soft + Skin.plist",
_("Soft + Skin II"): "Soft + Skin II.plist",
_("Soft + Skin III"): "Soft + Skin III.plist",
_("Soft on blue"): "Soft On Blue.plist",
_("Soft"): "Soft.plist",
_("Standard"): "Standard.plist",
_("Vascular"): "Vascular.plist",
_("Vascular II"): "Vascular II.plist",
_("Vascular III"): "Vascular III.plist",
_("Vascular IV"): "Vascular IV.plist",
_("Yellow bone"): "Yellow Bone.plist"}
#RAYCASTING_TYPES = [_(filename.split(".")[0]) for filename in
# os.listdir(folder) if
# os.path.isfile(os.path.join(folder,filename))]
RAYCASTING_TYPES = [_(filename.name.split(".")[0]) for filename in
inv_paths.USER_RAYCASTING_PRESETS_DIRECTORY.glob('*') if
filename.is_file()]
RAYCASTING_TYPES += RAYCASTING_FILES.keys()
RAYCASTING_TYPES.append(_(' Off'))
RAYCASTING_TYPES.sort()
RAYCASTING_OFF_LABEL = _(' Off')
RAYCASTING_TOOLS = [_("Cut plane")]
# If 0 dont't blur, 1 blur
RAYCASTING_WWWL_BLUR = 0
RAYCASTING_PRESETS_FOLDERS = (inv_paths.RAYCASTING_PRESETS_DIRECTORY,
inv_paths.USER_RAYCASTING_PRESETS_DIRECTORY)
####
#MODE_ZOOM = 0 #"Set Zoom Mode",
#MODE_ZOOM_SELECTION = 1 #:"Set Zoom Select Mode",
#MODE_ROTATE = 2#:"Set Spin Mode",
#MODE_MOVE = 3#:"Set Pan Mode",
#MODE_WW_WL = 4#:"Bright and contrast adjustment"}
#MODE_LINEAR_MEASURE = 5
# self.states = {0:"Set Zoom Mode", 1:"Set Zoom Select Mode",
# 2:"Set Spin Mode", 3:"Set Pan Mode",
# 4:"Bright and contrast adjustment"}
#ps.Publisher().sendMessage('Set interaction mode %d'%
# (MODE_BY_ID[id]))
#('Set Editor Mode')
#{0:"Set Change Slice Mode"}
####
MODE_SLICE_SCROLL = -1
MODE_SLICE_EDITOR = -2
MODE_SLICE_CROSS = -3
############
FILETYPE_IV = wx.NewId()
FILETYPE_RIB = wx.NewId()
FILETYPE_STL = wx.NewId()
FILETYPE_STL_ASCII = wx.NewId()
FILETYPE_VRML = wx.NewId()
FILETYPE_OBJ = wx.NewId()
FILETYPE_VTP = wx.NewId()
FILETYPE_PLY = wx.NewId()
FILETYPE_X3D = wx.NewId()
FILETYPE_IMAGEDATA = wx.NewId()
FILETYPE_BMP = wx.NewId()
FILETYPE_JPG = wx.NewId()
FILETYPE_PNG = wx.NewId()
FILETYPE_PS = wx.NewId()
FILETYPE_POV = wx.NewId()
FILETYPE_TIF = wx.NewId()
IMAGE_TILING = {"1 x 1":(1,1), "1 x 2":(1,2),
"1 x 3":(1,3), "1 x 4":(1,4),
"2 x 1":(2,1), "2 x 2":(2,2),
"2 x 3":(2,3), "2 x 4":(2,4),
"3 x 1":(3,1), "3 x 2":(3,2),
"3 x 3":(3,3), "3 x 4":(3,4),
"4 x 1":(4,1), "4 x 2":(4,2),
"4 x 3":(4,3), "4 x 4":(4,4),
"4 x 5":(4,5), "5 x 4":(5,4)}
VTK_WARNING = 0
#----------------------------------------------------------
[ID_DICOM_IMPORT, ID_PROJECT_OPEN, ID_PROJECT_SAVE_AS, ID_PROJECT_SAVE,
ID_PROJECT_CLOSE, ID_EXPORT_SLICE, ID_PROJECT_PROPERTIES, ID_EXPORT_MASK, ID_PROJECT_INFO,
ID_SAVE_SCREENSHOT, ID_DICOM_LOAD_NET, ID_PRINT_SCREENSHOT,
ID_IMPORT_OTHERS_FILES, ID_PREFERENCES, ID_DICOM_NETWORK, ID_TIFF_JPG_PNG,
ID_VIEW_INTERPOLATED, ID_MODE_NAVIGATION, ID_ANALYZE_IMPORT, ID_NIFTI_IMPORT,
ID_PARREC_IMPORT, ID_MODE_DBS] = [wx.NewId() for number in range(22)]
ID_EXIT = wx.ID_EXIT
ID_ABOUT = wx.ID_ABOUT
[ID_EDIT_UNDO, ID_EDIT_REDO, ID_EDIT_LIST] =\
[wx.NewId() for number in range(3)]
[ID_TOOL_PROJECT, ID_TOOL_LAYOUT, ID_TOOL_OBJECT, ID_TOOL_SLICE] =\
[wx.NewId() for number in range(4)]
[ID_TASK_BAR, ID_VIEW_FOUR] =\
[wx.NewId() for number in range(2)]
[ID_VIEW_FULL, ID_VIEW_TEXT, ID_VIEW_3D_BACKGROUND] =\
[wx.NewId() for number in range(3)]
ID_START = wx.NewId()
ID_PLUGINS_SHOW_PATH = wx.NewId()
ID_FLIP_X = wx.NewId()
ID_FLIP_Y = wx.NewId()
ID_FLIP_Z = wx.NewId()
ID_SWAP_XY = wx.NewId()
ID_SWAP_XZ = wx.NewId()
ID_SWAP_YZ = wx.NewId()
ID_BOOLEAN_MASK = wx.NewId()
ID_CLEAN_MASK = wx.NewId()
ID_REORIENT_IMG = wx.NewId()
ID_FLOODFILL_MASK = wx.NewId()
ID_FILL_HOLE_AUTO = wx.NewId()
ID_REMOVE_MASK_PART = wx.NewId()
ID_SELECT_MASK_PART = wx.NewId()
ID_MANUAL_SEGMENTATION = wx.NewId()
ID_WATERSHED_SEGMENTATION = wx.NewId()
ID_THRESHOLD_SEGMENTATION = wx.NewId()
ID_FLOODFILL_SEGMENTATION = wx.NewId()
ID_FLOODFILL_SEGMENTATION = wx.NewId()
ID_SEGMENTATION_BRAIN = wx.NewId()
ID_CROP_MASK = wx.NewId()
ID_DENSITY_MEASURE = wx.NewId()
ID_MASK_DENSITY_MEASURE = wx.NewId()
ID_CREATE_SURFACE = wx.NewId()
ID_CREATE_MASK = wx.NewId()
ID_MASK_3D_PREVIEW = wx.NewId()
ID_MASK_3D_RELOAD = wx.NewId()
ID_MASK_3D_AUTO_RELOAD = wx.NewId()
ID_GOTO_SLICE = wx.NewId()
ID_GOTO_COORD = wx.NewId()
ID_MANUAL_WWWL = wx.NewId()
# Tractography with Trekker
ID_TREKKER_MASK = wx.NewId()
ID_TREKKER_IMG = wx.NewId()
ID_TREKKER_FOD = wx.NewId()
ID_TREKKER_ACT = wx.NewId()
#---------------------------------------------------------
STATE_DEFAULT = 1000
STATE_WL = 1001
STATE_SPIN = 1002
STATE_ZOOM = 1003
STATE_ZOOM_SL = 1004
STATE_PAN = 1005
STATE_ANNOTATE = 1006
STATE_MEASURE_DISTANCE = 1007
STATE_MEASURE_ANGLE = 1008
STATE_MEASURE_DENSITY = 1009
STATE_MEASURE_DENSITY_ELLIPSE = 1010
STATE_MEASURE_DENSITY_POLYGON = 1011
SLICE_STATE_CROSS = 3006
SLICE_STATE_SCROLL = 3007
SLICE_STATE_EDITOR = 3008
SLICE_STATE_WATERSHED = 3009
SLICE_STATE_REORIENT = 3010
SLICE_STATE_MASK_FFILL = 3011
SLICE_STATE_REMOVE_MASK_PARTS = 3012
SLICE_STATE_SELECT_MASK_PARTS = 3013
SLICE_STATE_FFILL_SEGMENTATION = 3014
SLICE_STATE_CROP_MASK = 3015
SLICE_STATE_TRACTS = 3016
VOLUME_STATE_SEED = 2001
# STATE_LINEAR_MEASURE = 3001
# STATE_ANGULAR_MEASURE = 3002
TOOL_STATES = [STATE_WL, STATE_SPIN, STATE_ZOOM,
STATE_ZOOM_SL, STATE_PAN, STATE_MEASURE_DISTANCE,
STATE_MEASURE_ANGLE, STATE_MEASURE_DENSITY_ELLIPSE,
STATE_MEASURE_DENSITY_POLYGON,
] #, STATE_ANNOTATE]
TOOL_SLICE_STATES = [SLICE_STATE_CROSS, SLICE_STATE_SCROLL,
SLICE_STATE_REORIENT, SLICE_STATE_TRACTS]
SLICE_STYLES = TOOL_STATES + TOOL_SLICE_STATES
SLICE_STYLES.append(STATE_DEFAULT)
SLICE_STYLES.append(SLICE_STATE_EDITOR)
SLICE_STYLES.append(SLICE_STATE_WATERSHED)
SLICE_STYLES.append(SLICE_STATE_MASK_FFILL)
SLICE_STYLES.append(SLICE_STATE_REMOVE_MASK_PARTS)
SLICE_STYLES.append(SLICE_STATE_SELECT_MASK_PARTS)
SLICE_STYLES.append(SLICE_STATE_FFILL_SEGMENTATION)
SLICE_STYLES.append(SLICE_STATE_CROP_MASK)
SLICE_STYLES.append(STATE_MEASURE_DENSITY)
SLICE_STYLES.append(STATE_MEASURE_DENSITY_ELLIPSE)
SLICE_STYLES.append(STATE_MEASURE_DENSITY_POLYGON)
STYLE_LEVEL = {SLICE_STATE_EDITOR: 1,
SLICE_STATE_WATERSHED: 1,
SLICE_STATE_MASK_FFILL: 2,
SLICE_STATE_REMOVE_MASK_PARTS: 2,
SLICE_STATE_SELECT_MASK_PARTS: 2,
SLICE_STATE_FFILL_SEGMENTATION: 2,
SLICE_STATE_CROSS: 2,
SLICE_STATE_SCROLL: 2,
SLICE_STATE_REORIENT: 2,
SLICE_STATE_CROP_MASK: 1,
STATE_ANNOTATE: 2,
STATE_DEFAULT: 0,
STATE_MEASURE_ANGLE: 2,
STATE_MEASURE_DISTANCE: 2,
STATE_MEASURE_DENSITY_ELLIPSE: 2,
STATE_MEASURE_DENSITY_POLYGON: 2,
STATE_MEASURE_DENSITY: 2,
STATE_WL: 2,
STATE_SPIN: 2,
STATE_ZOOM: 2,
STATE_ZOOM_SL: 2,
STATE_PAN:2,
VOLUME_STATE_SEED:1}
#------------ Prefereces options key ------------
RENDERING = 0
SURFACE_INTERPOLATION = 1
LANGUAGE = 2
SLICE_INTERPOLATION = 3
#Correlaction extracted from pyDicom
DICOM_ENCODING_TO_PYTHON = {
'None':'iso8859',
None:'iso8859',
'': 'iso8859',
'ISO_IR 6': 'iso8859',
'ISO_IR 100': 'latin_1',
'ISO 2022 IR 87': 'iso2022_jp',
'ISO 2022 IR 13': 'iso2022_jp',
'ISO 2022 IR 149': 'euc_kr',
'ISO_IR 192': 'UTF8',
'GB18030': 'GB18030',
'ISO_IR 126': 'iso_ir_126',
'ISO_IR 127': 'iso_ir_127',
'ISO_IR 138': 'iso_ir_138',
'ISO_IR 144': 'iso_ir_144',
}
#-------------------- Projections type ----------------
PROJECTION_NORMAL=0
PROJECTION_MaxIP=1
PROJECTION_MinIP=2
PROJECTION_MeanIP=3
PROJECTION_LMIP=4
PROJECTION_MIDA=5
PROJECTION_CONTOUR_MIP=6
PROJECTION_CONTOUR_LMIP=7
PROJECTION_CONTOUR_MIDA=8
#------------ Projections defaults ------------------
PROJECTION_BORDER_SIZE=1.0
PROJECTION_MIP_SIZE=2
# ------------- Boolean operations ------------------
BOOLEAN_UNION = 1
BOOLEAN_DIFF = 2
BOOLEAN_AND = 3
BOOLEAN_XOR = 4
# -------------- User interface ---------------------
# The column order in the marker panel
#
ID_COLUMN = 0
SESSION_COLUMN = 1
LABEL_COLUMN = 2
TARGET_COLUMN = 3
X_COLUMN = 4
Y_COLUMN = 5
Z_COLUMN = 6
#------------ Navigation defaults -------------------
MARKER_COLOUR = (1.0, 1.0, 0.)
MARKER_SIZE = 2
ARROW_MARKER_SIZE = 10
CALIBRATION_TRACKER_SAMPLES = 10
FIDUCIAL_REGISTRATION_ERROR_THRESHOLD = 3.0
SELECT = 0
MTC = 1
FASTRAK = 2
ISOTRAKII = 3
PATRIOT = 4
CAMERA = 5
POLARIS = 6
POLARISP4 = 7
OPTITRACK = 8
ROBOT = 9
DEBUGTRACKRANDOM = 10
DEBUGTRACKAPPROACH = 11
DEFAULT_TRACKER = SELECT
NDICOMPORT = b'COM1'
TRACKERS = [_("Claron MicronTracker"),
_("Polhemus FASTRAK"), _("Polhemus ISOTRAK II"),
_("Polhemus PATRIOT"), _("Camera tracker"),
_("NDI Polaris"), _("NDI Polaris P4"),
_("Optitrack"), _("Robot tracker"),
_("Debug tracker (random)"), _("Debug tracker (approach)")]
STATIC_REF = 0
DYNAMIC_REF = 1
DEFAULT_REF_MODE = DYNAMIC_REF
REF_MODE = [_("Static ref."), _("Dynamic ref.")]
FT_SENSOR_MODE = [_("Sensor 3"), _("Sensor 4")]
DEFAULT_COIL = SELECT
COIL = [_("Select coil:"), _("Neurosoft Figure-8"),
_("Magstim 70 mm"), _("Nexstim")]
IR1 = wx.NewId()
IR2 = wx.NewId()
IR3 = wx.NewId()
TR1 = wx.NewId()
TR2 = wx.NewId()
TR3 = wx.NewId()
SET = wx.NewId()
IMAGE_FIDUCIALS = [
{
'button_id': IR1,
'label': 'LEI',
'fiducial_name': 'LE',
'fiducial_index': 0,
'tip': _("Select left ear in image"),
},
{
'button_id': IR2,
'label': 'REI',
'fiducial_name': 'RE',
'fiducial_index': 1,
'tip': _("Select right ear in image"),
},
{
'button_id': IR3,
'label': 'NAI',
'fiducial_name': 'NA',
'fiducial_index': 2,
'tip': _("Select nasion in image"),
},
]
TRACKER_FIDUCIALS = [
{
'button_id': TR1,
'label': 'LET',
'fiducial_name': 'LE',
'fiducial_index': 0,
'tip': _("Select left ear with spatial tracker"),
},
{
'button_id': TR2,
'label': 'RET',
'fiducial_name': 'RE',
'fiducial_index': 1,
'tip': _("Select right ear with spatial tracker"),
},
{
'button_id': TR3,
'label': 'NAT',
'fiducial_name': 'NA',
'fiducial_index': 2,
'tip': _("Select nasion with spatial tracker"),
},
]
BTNS_IMG_MARKERS = {IR1: {0: 'LEI'},
IR2: {1: 'REI'},
IR3: {2: 'NAI'}}
OBJL = wx.NewId()
OBJR = wx.NewId()
OBJA = wx.NewId()
OBJC = wx.NewId()
OBJF = wx.NewId()
OBJECT_FIDUCIALS = [
{
'fiducial_index': 0,
'button_id': OBJL,
'label': _('Left'),
'tip': _("Select left object fiducial"),
},
{
'fiducial_index': 1,
'button_id': OBJR,
'label': _('Right'),
'tip': _("Select right object fiducial"),
},
{
'fiducial_index': 2,
'button_id': OBJA,
'label': _('Anterior'),
'tip': _("Select anterior object fiducial"),
},
{
'fiducial_index': 3,
'button_id': OBJC,
'label': _('Center'),
'tip': _("Select object center"),
},
{
'fiducial_index': 4,
'button_id': OBJF,
'label': _('Fixed'),
'tip': _("Attach sensor to object"),
},
]
MTC_PROBE_NAME = "1Probe"
MTC_REF_NAME = "2Ref"
MTC_OBJ_NAME = "3Coil"
# Object tracking
ARROW_SCALE = 6
ARROW_UPPER_LIMIT = 15
#COIL_ANGLES_THRESHOLD = 3 * ARROW_SCALE
COIL_ANGLES_THRESHOLD = 3
COIL_COORD_THRESHOLD = 3
TIMESTAMP = 2.0
COIL_ANGLE_ARROW_PROJECTION_THRESHOLD = 5
CAM_MODE = True
# Tractography visualization
N_TRACTS = 200
PEEL_DEPTH = 10
MAX_PEEL_DEPTH = 40
SEED_OFFSET = 30
SEED_RADIUS = 1.5
# Increased the default sleep parameter from 0.1 to 0.15 to decrease CPU load during navigation.
SLEEP_NAVIGATION = 0.2
SLEEP_COORDINATES = 0.05
SLEEP_ROBOT = 0.01
BRAIN_OPACITY = 0.6
N_CPU = psutil.cpu_count()
# the max_sampling_step can be set to something different as well. Above 100 is probably not necessary
TREKKER_CONFIG = {'seed_max': 1,
'step_size': 0.03125,
'min_fod': 0.05,
'probe_quality': 3,
'max_interval': 1,
'min_radius_curvature': 0.625,
'probe_length': 0.15625,
'write_interval': 50,
'numb_threads': '',
'max_length': 250,
'min_length': 10,
'max_sampling_step': 100,
'data_support_exponent': 0.5,
'use_best_init': True,
'init_max_est_trials': 100}
MARKER_FILE_MAGICK_STRING = "##INVESALIUS3_MARKER_FILE_"
CURRENT_MARKER_FILE_VERSION = 0
WILDCARD_MARKER_FILES = _("Marker scanner coord files (*.mkss)|*.mkss")
# Serial port
BAUD_RATES = [300, 1200, 2400, 4800, 9600, 19200, 38400, 57600, 115200]
BAUD_RATE_DEFAULT_SELECTION = 4
PULSE_DURATION_IN_MILLISECONDS = 0.2
#Robot
ROBOT_ElFIN_IP = ['143.107.220.251', '169.254.153.251', '127.0.0.1']
ROBOT_ElFIN_PORT = 10003
ROBOT_MOTIONS = {"normal": 0, "linear out": 1, "arc": 2}
ROBOT_HEAD_VELOCITY_THRESHOLD = 10 #mm/s
ROBOT_ARC_THRESHOLD_DISTANCE = 100 #mm
ROBOT_VERSOR_SCALE_FACTOR = 70
#Robot Working Space is defined as 800mm in Elfin manual. For safety, the value is reduced by 5%.
ROBOT_WORKING_SPACE = 760 #mm
ROBOT_MOVE_STATE = {"free to move": 0,
"in motion": 1009,
"waiting for execution": 1013,
"error": 1025}
|
paulojamorim/invesalius3
|
invesalius/constants.py
|
Python
|
gpl-2.0
| 28,589
|
[
"VTK"
] |
9f3836ccf2098e7857dfc09f615d3a6f5118f722782e2314bc8f96cb1e7fa17e
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
from PyQt5.QtWidgets import QFileDialog, QPlainTextEdit, QSizePolicy, QMessageBox
from PyQt5 import QtCore
from peacock.utils import WidgetUtils
from peacock.base.Plugin import Plugin
from peacock.utils.RecentlyUsedMenu import RecentlyUsedMenu
from .CheckInputWidget import CheckInputWidget
from .InputFileEditor import InputFileEditor
class InputFileEditorPlugin(InputFileEditor, Plugin):
"""
The widget to edit the input file.
In addition to InputFileEditor, this class adds menus and
is available as a Plugin.
"""
def __init__(self, **kwds):
super(InputFileEditorPlugin, self).__init__(**kwds)
self.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self._menus_initialized = False
self._recently_used_menu = None
self._save_action = None
self._open_action = None
self._save_as_action = None
self._clear_action = None
self._check_action = None
self._view_file = None
self.check_widget = CheckInputWidget()
self.check_widget.needInputFile.connect(self.writeInputFile)
self.check_widget.hide()
self.blockChanged.connect(self._updateChanged)
self.input_file_view = QPlainTextEdit()
self.input_file_view.setReadOnly(True)
self.input_file_view.setWindowFlags(QtCore.Qt.Window)
self.input_file_view.resize(640, 480)
self.has_changed = False
self._preferences.addInt("input/maxRecentlyUsed",
"Max number of input files",
20,
1,
50,
"Set the maximum number of recent input files that have been used.",
)
self.setup()
def _updateChanged(self, block, tree):
self.has_changed = True
def _askToSave(self, app_info, reason):
if self.has_changed and app_info.valid() and self.tree and self.tree.input_filename and self.tree.incompatibleChanges(app_info):
msg = "%s\nYou have unsaved changes in your input file, do you want to save?" % reason
reply = QMessageBox.question(self, "Save?", msg, QMessageBox.Save, QMessageBox.Discard)
if reply == QMessageBox.Save:
self._saveInputFile()
def executableInfoChanged(self, app_info):
self._askToSave(app_info, "Reloading syntax from executable.")
super(InputFileEditorPlugin, self).executableInfoChanged(app_info)
self._setMenuStatus()
self.has_changed = False
def setInputFile(self, input_file):
self._askToSave(self.tree.app_info, "Changing input files.")
val = super(InputFileEditorPlugin, self).setInputFile(input_file)
if self._menus_initialized:
path = os.path.abspath(input_file)
if os.path.exists(path):
self._recently_used_menu.update(path)
else:
self._recently_used_menu.removeEntry(path)
self._setMenuStatus()
self.has_changed = False
return val
def _openInputFile(self):
"""
Ask the user what input file to open.
"""
input_name, other = QFileDialog.getOpenFileName(self, "Choose input file", os.getcwd(), "Input File (*.i)")
if input_name:
input_name = os.path.abspath(input_name)
success = self.setInputFile(input_name)
if success:
self._recently_used_menu.update(input_name)
else:
self._recently_used_menu.removeEntry(input_name)
def _saveInputFile(self):
"""
Save the current input tree to the current filename.
"""
self.writeInputFile(self.tree.input_filename)
self.has_changed = False
def _saveInputFileAs(self):
"""
Ask the user what file to save the input tree to.
"""
input_name, other = QFileDialog.getSaveFileName(self, "Choose input file", os.getcwd(), "Input File (*.i)")
if input_name:
input_name = os.path.abspath(input_name)
self.writeInputFile(input_name)
self.setInputFile(input_name)
self._recently_used_menu.update(input_name)
self.has_changed = False
def _checkInputFile(self):
"""
Show the input file check window.
"""
self.check_widget.show()
self.check_widget.check(self.tree.app_info.path)
def _viewInputFile(self):
"""
View the text of the current input tree.
"""
if self.input_file_view.isVisible():
self.input_file_view.hide()
else:
data = self.tree.getInputFileString()
self.input_file_view.setPlainText(data)
self.input_file_view.show()
def _clearInputFile(self):
self.tree.resetInputFile()
self.tree.input_filename = None
self.block_tree.setInputTree(self.tree)
self.inputFileChanged.emit("")
self._setMenuStatus()
self.has_changed = False
def _setMenuStatus(self):
"""
Set the status of the menus.
"""
if not self._menus_initialized:
return
enabled = self.tree.app_info.valid()
if self.tree.input_filename:
self._save_action.setEnabled(enabled)
else:
self._save_action.setEnabled(False)
self._save_as_action.setEnabled(enabled)
self._open_action.setEnabled(enabled)
self._recently_used_menu.setEnabled(enabled)
self._clear_action.setEnabled(enabled)
self._check_action.setEnabled(enabled)
self._view_file.setEnabled(enabled)
def addToMenu(self, menu):
"""
Register the menus specific to the InputTab.
Input:
menu[QMenu]: The menu to add the items to.
"""
self._open_action = WidgetUtils.addAction(menu, "Open", self._openInputFile, "Ctrl+O")
recentMenu = menu.addMenu("Recently opened")
self._recently_used_menu = RecentlyUsedMenu(recentMenu,
"input/recentlyUsed",
"input/maxRecentlyUsed",
20,
)
self._recently_used_menu.selected.connect(self.setInputFile)
self._save_action = WidgetUtils.addAction(menu, "Save", self._saveInputFile)
self._save_as_action = WidgetUtils.addAction(menu, "Save As", self._saveInputFileAs)
self._clear_action = WidgetUtils.addAction(menu, "Clear", self._clearInputFile)
self._check_action = WidgetUtils.addAction(menu, "Check", self._checkInputFile, "Ctrl+K")
self._view_file = WidgetUtils.addAction(menu, "View current input file", self._viewInputFile, "Ctrl+V", True)
self._menus_initialized = True
self._setMenuStatus()
def closing(self):
self.check_widget.cleanup()
def clearRecentlyUsed(self):
if self._menus_initialized:
self._recently_used_menu.clearValues()
def onCurrentChanged(self, index):
"""
This is called when the tab is changed.
If the block editor window is open we want to raise it
to the front so it doesn't get lost.
"""
if index == self._index:
if self.block_editor:
self.block_editor.raise_()
if __name__ == "__main__":
from PyQt5.QtWidgets import QApplication, QMainWindow
from ExecutableInfo import ExecutableInfo
import sys
if len(sys.argv) != 3:
print("Usage: %s <exe> <input file>" % sys.argv[0])
sys.exit(1)
qapp = QApplication(sys.argv)
main_win = QMainWindow()
w = InputFileEditorPlugin()
main_win.setCentralWidget(w)
exe_info = ExecutableInfo()
exe_info.setPath(sys.argv[1])
w.initialize()
w.executableInfoChanged(exe_info)
w.setInputFile(sys.argv[2])
menubar = main_win.menuBar()
menubar.setNativeMenuBar(False)
input_menu = menubar.addMenu("Input File")
w.addToMenu(input_menu)
main_win.show()
sys.exit(qapp.exec_())
|
nuclear-wizard/moose
|
python/peacock/Input/InputFileEditorPlugin.py
|
Python
|
lgpl-2.1
| 8,363
|
[
"MOOSE"
] |
274f4c1e368d538d80811190421f68d66e4c51c51bc4c9de6362d3525fafe3a3
|
'''
Created on 23/11/2009
@author: brian
'''
import logging
from numpy import linspace
from scipysim.actors import Source, Event, LastEvent
class Constant(Source):
'''
This actor is a constant value source
'''
def __init__(self, out, value=1.0, resolution=10, simulation_time=120, endpoint=False):
"""
default parameters creates a constant output of 1.0 for 2 minutes (with 10 values per "second")
"""
super(Constant, self).__init__(output_channel=out, simulation_time=simulation_time)
self.resolution = resolution
self.endpoint = endpoint
self.value = value
def process(self):
"""Create the numbers..."""
logging.debug("Running ramp process")
tags = linspace(0, self.simulation_time, self.simulation_time * self.resolution, endpoint=self.endpoint) # for now just compute 2 minutes of values
[self.output_channel.put(Event(tag, self.value)) for tag in tags]
#time.sleep(random.random() * 0.001) # Adding a delay so we can see the async
logging.debug("Const process finished adding all data to its output channel")
self.stop = True
self.output_channel.put(LastEvent(self.simulation_time))
|
hardbyte/scipy-sim
|
scipysim/actors/math/constant.py
|
Python
|
gpl-3.0
| 1,242
|
[
"Brian"
] |
c62b3eec572de70647b3dff704574dbb02526ad769e7da25774c7f72e0c24a07
|
# -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from gluon.validators import IS_NOT_EMPTY
from s3.s3fields import S3Represent
from s3.s3query import FS
from s3.s3utils import S3DateTime, s3_auth_user_represent_name, s3_avatar_represent
from s3.s3validators import IS_LOCATION_SELECTOR2, IS_ONE_OF
from s3.s3widgets import S3LocationSelectorWidget2
from s3.s3forms import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
datetime_represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
"""
Template settings for Requests Management
- for Philippines
"""
# -----------------------------------------------------------------------------
# Pre-Populate
settings.base.prepopulate = ("Philippines", "default/users")
settings.base.system_name = T("Sahana")
settings.base.system_name_short = T("Sahana")
# =============================================================================
# System Settings
# -----------------------------------------------------------------------------
# Authorization Settings
# Users can self-register
#settings.security.self_registration = False
# Users need to verify their email
settings.auth.registration_requires_verification = True
# Users don't need to be approved
#settings.auth.registration_requires_approval = True
# Organisation links are either done automatically
# - by registering with official domain of Org
# or Manually by Call Center staff
#settings.auth.registration_requests_organisation = True
#settings.auth.registration_organisation_required = True
settings.auth.registration_requests_site = False
# Uncomment this to allow Admin to see Organisations in user Admin even if the Registration doesn't request this
settings.auth.admin_sees_organisation = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
# Terms of Service to be able to Register on the system
# uses <template>/views/tos.html
settings.auth.terms_of_service = True
settings.auth.show_utc_offset = False
settings.auth.show_link = False
# -----------------------------------------------------------------------------
# Security Policy
settings.security.policy = 5 # Apply Controller, Function and Table ACLs
settings.security.map = True
# Owner Entity
settings.auth.person_realm_human_resource_site_then_org = False
# -----------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
settings.base.theme = "Philippines"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
#settings.gis.map_height = 600
#settings.gis.map_width = 854
# Uncomment to disable responsive behavior of datatables
# - Disabled until tested
settings.ui.datatables_responsive = False
# -----------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("en", "English"),
# ("tl", "Tagalog"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "UTC +0800"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%d %b %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# Restrict the Location Selector to just certain countries
settings.gis.countries = ["PH"]
# Until we add support to LocationSelector2 to set dropdowns from LatLons
#settings.gis.check_within_parent_boundaries = False
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# -----------------------------------------------------------------------------
# Finance settings
settings.fin.currencies = {
"PHP" : T("Philippine Pesos"),
#"EUR" : T("Euros"),
#"GBP" : T("Great British Pounds"),
#"CHF" : T("Swiss Francs"),
"USD" : T("United States Dollars"),
}
settings.fin.currency_default = "PHP"
# -----------------------------------------------------------------------------
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
#settings.ui.camp = True
# -----------------------------------------------------------------------------
# Uncomment to restrict the export formats available
#settings.ui.export_formats = ["xls"]
settings.ui.update_label = "Edit"
# -----------------------------------------------------------------------------
# Summary Pages
settings.ui.summary = [#{"common": True,
# "name": "cms",
# "widgets": [{"method": "cms"}]
# },
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map", "ajax_init": True}],
},
{"name": "charts",
"label": "Reports",
"widgets": [{"method": "report", "ajax_init": True}]
},
]
settings.search.filter_manager = False
# Filter forms - style for Summary pages
#def filter_formstyle(row_id, label, widget, comment, hidden=False):
# return DIV(label, widget, comment,
# _id=row_id,
# _class="horiz_filter_form")
# =============================================================================
# Module Settings
# -----------------------------------------------------------------------------
# Human Resource Management
settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an organisation
settings.hrm.org_required = False
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Skills
settings.hrm.use_skills = False
# Uncomment to disable the use of HR Teams
settings.hrm.teams = False
# Uncomment to hide fields in S3AddPersonWidget[2]
settings.pr.request_dob = False
settings.pr.request_gender = False
# -----------------------------------------------------------------------------
# Org
#settings.org.site_label = "Office/Shelter/Hospital"
settings.org.site_label = "Site"
settings.org.site_autocomplete = True
# Extra fields to show in Autocomplete Representations
settings.org.site_autocomplete_fields = ["location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
]
# -----------------------------------------------------------------------------
# Project
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# Links to Filtered Components for Donors & Partners
#settings.project.organisation_roles = {
# 1: T("Host National Society"),
# 2: T("Partner"),
# 3: T("Donor"),
# #4: T("Customer"), # T("Beneficiary")?
# #5: T("Supplier"),
# 9: T("Partner National Society"),
#}
# -----------------------------------------------------------------------------
# Notifications
# Template for the subject line in update notifications
#settings.msg.notify_subject = "$S %s" % T("Notification")
settings.msg.notify_subject = "$S Notification"
# -----------------------------------------------------------------------------
def currency_represent(v):
"""
Custom Representation of Currencies
"""
if v == "USD":
return "$"
elif v == "EUR":
return "€"
elif v == "GBP":
return "£"
else:
# e.g. CHF
return v
# -----------------------------------------------------------------------------
def render_contacts(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Contacts on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_human_resource.id"]
item_class = "thumbnail"
raw = record._row
#author = record["hrm_human_resource.modified_by"]
date = record["hrm_human_resource.modified_on"]
fullname = record["hrm_human_resource.person_id"]
job_title = raw["hrm_human_resource.job_title_id"] or ""
if job_title:
job_title = "- %s" % record["hrm_human_resource.job_title_id"]
#organisation = record["hrm_human_resource.organisation_id"]
organisation_id = raw["hrm_human_resource.organisation_id"]
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
pe_id = raw["pr_person.pe_id"]
person_id = raw["hrm_human_resource.person_id"]
location = record["org_site.location_id"]
location_id = raw["org_site.location_id"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"] or T("no office assigned")
email = raw["pr_email_contact.value"] or T("no email address")
if isinstance(email, list):
email = email[0]
phone = raw["pr_phone_contact.value"] or T("no phone number")
if isinstance(phone, list):
phone = phone[0]
db = current.db
s3db = current.s3db
ltable = s3db.pr_person_user
query = (ltable.pe_id == pe_id)
row = db(query).select(ltable.user_id,
limitby=(0, 1)
).first()
if row:
# Use Personal Avatar
# @ToDo: Optimise by not doing DB lookups (especially duplicate) within render, but doing these in the bulk query
avatar = s3_avatar_represent(row.user_id,
_class="media-object")
else:
avatar = IMG(_src=URL(c="static", f="img", args="blank-user.gif"),
_class="media-object")
# Edit Bar
permit = current.auth.s3_has_permission
table = db.pr_person
if permit("update", table, record_id=person_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_url = URL(c="hrm", f="person",
args=[person_id, "update.popup"],
vars=vars)
title_update = current.response.s3.crud_strings.hrm_human_resource.title_update
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=edit_url,
_class="s3_modal",
_title=title_update,
)
else:
edit_btn = ""
edit_url = "#"
title_update = ""
# Deletions failing due to Integrity Errors
#if permit("delete", table, record_id=person_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
#else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
avatar = A(avatar,
_href=edit_url,
_class="pull-left s3_modal",
_title=title_update,
)
# Render the item
body = TAG[""](P(fullname,
" ",
SPAN(job_title),
_class="person_pos",
),
P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
I(_class="icon-envelope-alt"),
" ",
SPAN(email),
_class="card_1_line",
),
P(I(_class="icon-home"),
" ",
address,
_class="card_manylines",
))
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
# Organisation only needed if displaying elsewhere than org profile
# Author confusing with main contact record
#DIV(#author,
# #" - ",
# A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
# _class="card-person",
# ),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def quote_unicode(s):
"""
Quote unicode strings for URLs for Rocket
"""
chars = []
for char in s:
o = ord(char)
if o < 128:
chars.append(char)
else:
chars.append(hex(o).replace("0x", "%").upper())
return "".join(chars)
# -----------------------------------------------------------------------------
def render_locations(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = raw["gis_location.name"]
level = raw["gis_location.level"]
L1 = raw["gis_location.L1"]
L2 = raw["gis_location.L2"]
L3 = raw["gis_location.L3"]
L4 = raw["gis_location.L4"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
if level == "L1":
represent = name
if level == "L2":
represent = "%s (%s)" % (name, L1)
elif level == "L3":
represent = "%s (%s, %s)" % (name, L2, L1)
elif level == "L4":
represent = "%s (%s, %s, %s)" % (name, L3, L2, L1)
else:
# L0 or specific
represent = name
# Users don't edit locations
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars={"refresh": list_id,
# "record": record_id}),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Tallies
# NB We assume that all records are readable here
# Search all sub-locations
locations = current.gis.get_children(record_id)
locations = [l.id for l in locations]
locations.append(record_id)
db = current.db
s3db = current.s3db
stable = s3db.org_site
query = (stable.deleted == False) & \
(stable.location_id.belongs(locations))
count = stable.id.count()
row = db(query).select(count).first()
if row:
tally_sites = row[count]
else:
tally_sites = 0
table = s3db.req_req
query = (table.deleted == False) & \
(stable.site_id == table.site_id) & \
(stable.location_id.belongs(locations))
count = table.id.count()
row = db(query).select(count).first()
if row:
tally_reqs = row[count]
else:
tally_reqs = 0
table = s3db.req_commit
query = (table.deleted == False) & \
(table.location_id.belongs(locations))
count = table.id.count()
row = db(query).select(count).first()
if row:
tally_commits = row[count]
else:
tally_commits = 0
if level == "L4":
next_Lx = ""
next_Lx_label = ""
else:
if level == "L0":
next_Lx = "L1"
next_Lx_label = "Regions"
if level == "L1":
next_Lx = "L2"
next_Lx_label = "Provinces"
elif level == "L2":
next_Lx = "L3"
next_Lx_label = "Municipalities / Cities"
elif level == "L3":
next_Lx = "L4"
next_Lx_label = "Barangays"
table = db.gis_location
query = (table.deleted == False) & \
(table.level == next_Lx) & \
(table.parent == record_id)
count = table.id.count()
row = db(query).select(count).first()
if row:
tally_Lx = row[count]
else:
tally_Lx = 0
next_url = URL(c="gis", f="location",
args=["datalist"],
vars={"~.level": next_Lx,
"~.parent": record_id,
})
next_Lx_label = A(next_Lx_label,
_href=next_url,
)
next_Lx = SPAN(tally_Lx,
_class="badge",
)
# Build the icon, if it doesn't already exist
filename = "%s.svg" % record_id
import os
filepath = os.path.join(current.request.folder, "static", "cache", "svg", filename)
if not os.path.exists(filepath):
gtable = db.gis_location
loc = db(gtable.id == record_id).select(gtable.wkt,
limitby=(0, 1)
).first()
if loc:
from s3.s3codecs.svg import S3SVG
S3SVG.write_file(filename, loc.wkt)
# Render the item
item = DIV(DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="cache",
args=["svg", filename],
)
),
_class="pull-left",
_href=location_url,
),
DIV(SPAN(A(represent,
_href=location_url,
_class="media-heading"
),
),
#edit_bar,
_class="card-header-select",
),
DIV(P(next_Lx_label,
next_Lx,
T("Sites"),
SPAN(tally_sites,
_class="badge",
),
T("Requests"),
SPAN(tally_reqs,
_class="badge",
),
T("Donations"),
SPAN(tally_commits,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_locations_profile(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Profile Page
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = record["gis_location.name"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
# Placeholder to maintain style
#logo = DIV(IMG(_class="media-object"),
# _class="pull-left")
# We don't Edit Locations
# Edit Bar
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# vars = {"refresh": list_id,
# "record": record_id,
# }
# f = current.request.function
# if f == "organisation" and organisation_id:
# vars["(organisation)"] = organisation_id
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars=vars),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Render the item
item = DIV(DIV(DIV(#SPAN(A(name,
# _href=location_url,
# ),
# _class="location-title"),
#" ",
#edit_bar,
P(A(name,
_href=location_url,
),
_class="card_comments"),
_class="span5"), # card-details
_class="row",
),
)
return item
# -----------------------------------------------------------------------------
def render_sites(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Facilities on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_facility.id"]
item_class = "thumbnail"
raw = record._row
name = record["org_facility.name"]
site_id = raw["org_facility.id"]
opening_times = raw["org_facility.opening_times"] or ""
author = record["org_facility.modified_by"]
date = record["org_facility.modified_on"]
organisation = record["org_facility.organisation_id"]
organisation_id = raw["org_facility.organisation_id"]
location = record["org_facility.location_id"]
level = raw["gis_location.level"]
if level:
location_id = raw["org_facility.location_id"]
else:
location_id = raw["gis_location.parent"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"] or ""
phone = raw["org_facility.phone1"] or ""
facility_type = record["org_site_facility_type.facility_type_id"]
comments = record["org_facility.comments"] or ""
logo = raw["org_organisation.logo"]
site_url = URL(c="org", f="facility", args=[site_id, "profile"])
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
facility_status = raw["org_site_status.facility_status"] or ""
if facility_status:
if facility_status == 1:
icon = "thumbs-up-alt"
colour = "green"
elif facility_status == 2:
icon = "thumbs-down-alt"
colour = "amber"
elif facility_status == 3:
icon = "reply-all"
colour = "red"
elif facility_status == 4:
icon = "remove"
colour = "red"
elif facility_status == 99:
icon = "question"
colour = ""
facility_status = P(#I(_class="icon-%s" % icon),
#" ",
SPAN("%s: %s" % (T("Status"), record["org_site_status.facility_status"])),
" ",
_class="card_1_line %s" % colour,
)
power_supply_type = raw["org_site_status.power_supply_type"] or ""
if power_supply_type:
if power_supply_type == 1:
icon = "thumbs-up-alt"
colour = "green"
elif power_supply_type == 2:
icon = "cogs"
colour = "amber"
elif power_supply_type == 98:
icon = "question"
colour = "amber"
elif power_supply_type == 99:
icon = "remove"
colour = "red"
power_supply_type = P(#I(_class="icon-%s" % icon),
#" ",
SPAN("%s: %s" % (T("Power"), record["org_site_status.power_supply_type"])),
" ",
_class="card_1_line %s" % colour,
)
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.org_facility
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="facility",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_facility.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
body = TAG[""](P(I(_class="icon-flag"),
" ",
SPAN(facility_type),
" ",
_class="card_1_line",
),
P(I(_class="icon-home"),
" ",
address,
_class="card_manylines",
),
P(I(_class="icon-time"),
" ",
SPAN(opening_times),
" ",
_class="card_1_line",
),
P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
_class="card_1_line",
),
facility_status,
power_supply_type,
P(comments,
_class="card_manylines s3-truncate",
),
)
item = DIV(DIV(SPAN(A(name,
_href=site_url,
),
_class="card-title",
),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_organisations(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Organisations on the Stakeholder Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_organisation.id"]
item_class = "thumbnail span6" # span6 for 2 cols
raw = record._row
name = record["org_organisation.name"]
logo = raw["org_organisation.logo"]
phone = raw["org_organisation.phone"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
money = raw["req_organisation_needs.money"]
if money:
money_details = record["req_organisation_needs.money_details"]
money_details = SPAN(XML(money_details),
_class="s3-truncate")
money_details = P(I(_class="icon icon-dollar"),
" ",
money_details,
_class="card_manylines",
)
else:
# Include anyway to make cards align
money_details = P(I(_class="icon icon-dollar"),
" ",
_class="card_1_line",
)
#time = raw["req_organisation_needs.vol"]
#if time:
# time_details = record["req_organisation_needs.vol_details"]
# time_details = P(I(_class="icon icon-time"),
# " ",
# XML(time_details),
# _class="card_1_line",
# )
#else:
# time_details = ""
org_url = URL(c="org", f="organisation", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
db = current.db
permit = current.auth.s3_has_permission
table = db.org_organisation
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_organisation.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Tallies
# NB We assume that all records are readable here
s3db = current.s3db
stable = s3db.org_site
query = (stable.deleted == False) & \
(stable.obsolete == False) & \
(stable.organisation_id == record_id)
tally_sites = db(query).count()
table = s3db.req_req
query = (table.deleted == False) & \
(stable.site_id == table.site_id) & \
(stable.organisation_id == record_id)
tally_reqs = db(query).count()
table = s3db.req_commit
query = (table.deleted == False) & \
(table.organisation_id == record_id)
tally_commits = db(query).count()
# Render the item
item = DIV(DIV(logo,
DIV(SPAN(A(name,
_href=org_url,
_class="media-heading"
),
),
edit_bar,
_class="card-header-select",
),
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
money_details,
#time_details,
P(T("Sites"),
SPAN(tally_sites,
_class="badge",
),
T("Requests"),
SPAN(tally_reqs,
_class="badge",
),
T("Donations"),
SPAN(tally_commits,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_org_needs(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Needs
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["req_organisation_needs.id"]
item_class = "thumbnail"
raw = record._row
logo = raw["org_organisation.logo"]
phone = raw["org_organisation.phone"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
author = record["req_organisation_needs.modified_by"]
date = record["req_organisation_needs.modified_on"]
money = raw["req_organisation_needs.money"]
if money:
money_details = record["req_organisation_needs.money_details"]
money_details = P(I(_class="icon icon-dollar"),
" ",
XML(money_details),
_class="card_manylines",
)
else:
money_details = ""
time = raw["req_organisation_needs.vol"]
if time:
time_details = record["req_organisation_needs.vol_details"]
time_details = P(I(_class="icon icon-time"),
" ",
XML(time_details),
_class="card_manylines",
)
else:
time_details = ""
org_id = raw["org_organisation.id"]
org_url = URL(c="org", f="organisation", args=[org_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
permit = current.auth.s3_has_permission
table = current.db.req_organisation_needs
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="req", f="organisation_needs",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.req_organisation_needs.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
if current.request.controller == "org":
# Org Profile page - no need to repeat Org Name
title = " "
else:
title = raw["org_organisation.name"]
# Render the item
item = DIV(DIV(SPAN(title, _class="card-title"),
SPAN(author, _class="location-title"),
SPAN(date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
money_details,
time_details,
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
s3.render_org_needs = render_org_needs
# -----------------------------------------------------------------------------
def render_site_needs(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Needs
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["req_site_needs.id"]
item_class = "thumbnail"
raw = record._row
logo = raw["org_organisation.logo"]
addresses = raw["gis_location.addr_street"]
if addresses:
if isinstance(addresses, list):
address = addresses[0]
else:
address = addresses
else:
address = ""
#contact = raw["org_facility.contact"] or ""
opening_times = raw["org_facility.opening_times"] or ""
phone = raw["org_facility.phone1"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
author = record["req_site_needs.modified_by"]
date = record["req_site_needs.modified_on"]
#goods = raw["req_site_needs.goods"]
#if goods:
# goods_details = record["req_site_needs.goods_details"]
# goods_details = P(I(_class="icon icon-truck"),
# " ",
# XML(goods_details),
# _class="card_1_line",
# )
#else:
# goods_details = ""
#time = raw["req_site_needs.vol"]
#if time:
# time_details = record["req_site_needs.vol_details"]
# time_details = P(I(_class="icon icon-time"),
# " ",
# XML(time_details),
# _class="card_1_line",
# )
#else:
# time_details = ""
site_url = URL(c="org", f="facility", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=site_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
permit = current.auth.s3_has_permission
table = current.db.req_site_needs
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="req", f="site_needs",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.req_site_needs.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
if current.request.controller == "org":
# Site Profile page - no need to repeat Site Name
title = " "
else:
title = raw["org_facility.name"]
# Render the item
item = DIV(DIV(SPAN(title, _class="card-title"),
SPAN(author, _class="location-title"),
SPAN(date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(#goods_details,
#time_details,
P(I(_class="icon icon-home"),
" ",
address,
_class="card_manylines",
),
P(I(_class="icon-time"),
" ",
SPAN(opening_times),
" ",
_class="card_1_line",
),
P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
P(I(_class="icon icon-user"),
" ",
contact,
_class="card_1_line",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
s3.render_site_needs = render_site_needs
# -----------------------------------------------------------------------------
def customise_gis_location_controller(**attr):
"""
Customise gis_location controller
- Profile Page
"""
db = current.db
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
s3db = current.s3db
table = s3db.gis_location
if r.method == "datalist":
# Lx selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
# Default 5 triggers an AJAX call, we should load all by default
s3.dl_pagelength = 17
level = current.request.get_vars.get("~.level", None)
if not level:
# Just show PH L1s
level = "L1"
s3.filter = (table.L0 == "Philippines") & (table.level == "L1")
parent = current.request.get_vars.get("~.parent", None)
if level == "L1":
s3.crud_strings["gis_location"].title_list = T("Regions")
elif level == "L2":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Provinces in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Provinces")
elif level == "L3":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Municipalities and Cities in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Municipalities and Cities")
elif level == "L4":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Barangays in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Barangays")
list_fields = ["name",
"level",
"L1",
"L2",
"L3",
"L4",
]
s3db.configure("gis_location",
filter_widgets = None,
list_fields = list_fields,
list_layout = render_locations,
)
elif r.method == "profile":
# Customise tables used by widgets
#customise_hrm_human_resource_fields()
customise_org_facility_fields()
s3db.req_customise_req_fields()
s3db.req_customise_commit_fields()
# gis_location table (Sub-Locations)
table.parent.represent = s3db.gis_LocationRepresent(sep=" | ")
list_fields = ["name",
"id",
]
location = r.record
record_id = location.id
# Override context as that's a Path
default = "~.(location)=%s" % record_id
map_widget = dict(label = "Map",
type = "map",
context = "location",
icon = "icon-map",
height = 383,
width = 568,
bbox = {"lat_max" : location.lat_max,
"lon_max" : location.lon_max,
"lat_min" : location.lat_min,
"lon_min" : location.lon_min
},
)
#locations_widget = dict(label = "Locations",
# insert = False,
# #label_create = "Create Location",
# type = "datalist",
# tablename = "gis_location",
# context = "location",
# icon = "icon-globe",
# # @ToDo: Show as Polygons?
# show_on_map = False,
# list_layout = render_locations_profile,
# )
#needs_widget = dict(label = "Needs",
# label_create = "Add New Need",
# type = "datalist",
# tablename = "req_site_needs",
# context = "location",
# icon = "icon-hand-up",
# multiple = False,
# # Would just show up on Sites
# show_on_map = False,
# list_layout = render_site_needs,
# )
reqs_widget = dict(label = "Requests",
label_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "location",
default = default,
filter = FS("req_status").belongs([0, 1]),
icon = "icon-flag",
layer = "Requests",
# provided by Catalogue Layer
#marker = "request",
list_layout = s3db.req_req_list_layout,
)
commits_widget = dict(label = "Donations",
label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "location",
default = default,
filter = FS("cancel") == False,
icon = "icon-truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_commit_list_layout,
)
#resources_widget = dict(label = "Resources",
# label_create = "Create Resource",
# type = "datalist",
# tablename = "org_resource",
# context = "location",
# default = default,
# #filter = FS("req_status").belongs([0, 1]),
# icon = "icon-wrench",
# layer = "Resources",
# # provided by Catalogue Layer
# #marker = "resource",
# list_layout = s3db.org_resource_list_layout,
# )
sites_widget = dict(label = "Sites",
label_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
context = "location",
default = default,
filter = FS("obsolete") == False,
icon = "icon-home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
# Build the icon, if it doesn't already exist
filename = "%s.svg" % record_id
import os
filepath = os.path.join(current.request.folder, "static", "cache", "svg", filename)
if not os.path.exists(filepath):
gtable = db.gis_location
loc = db(gtable.id == record_id).select(gtable.wkt,
limitby=(0, 1)
).first()
if loc and loc.wkt:
from s3.s3codecs.svg import S3SVG
S3SVG.write_file(filename, loc.wkt)
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class="icon icon-edit"),
_href=URL(c="gis", f="location",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["gis_location"].title_update,
)
else:
edit_btn = ""
name = location.name
s3db.configure("gis_location",
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["gis_location"].title_list,
name),
profile_header = DIV(edit_btn,
A(IMG(_class="media-object",
_src=URL(c="static",
f="cache",
args=["svg", filename],
),
),
_class="pull-left",
#_href=location_url,
),
H2(name),
_class="profile-header",
),
profile_widgets = [reqs_widget,
map_widget,
commits_widget,
#resources_widget,
sites_widget,
#locations_widget,
],
)
return True
s3.prep = custom_prep
return attr
settings.customise_gis_location_controller = customise_gis_location_controller
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_fields():
"""
Customise hrm_human_resource for Profile widgets and 'more' popups
"""
s3db = current.s3db
table = s3db.hrm_human_resource
table.site_id.represent = S3Represent(lookup="org_site")
s3db.org_site.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
#table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["person_id",
"person_id$pe_id",
"organisation_id",
"site_id$location_id",
"site_id$location_id$addr_street",
"job_title_id",
"email.value",
"phone.value",
#"modified_by",
"modified_on",
]
s3db.configure("hrm_human_resource",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
"""
Customise hrm_human_resource controller
- used for 'more' popups
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "datalist":
customise_hrm_human_resource_fields()
current.s3db.configure("hrm_human_resource",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_contacts,
)
return True
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -----------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
s3 = current.response.s3
table = current.s3db.hrm_job_title
# Configure fields
field = table.organisation_id
field.readable = field.writable = False
field.default = None
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("hrm_job_title")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("hrm_job_title")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -----------------------------------------------------------------------------
def customise_org_facility_fields():
"""
Customise org_facility for Profile widgets and 'more' popups
"""
# Truncate comments fields
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
s3db = current.s3db
tablename = "org_facility"
table = s3db.org_facility
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
field = table.comments
field.represent = lambda body: XML(s3_URLise(body))
field.comment = None
table.phone1.label = T("Phone")
# CRUD strings
ADD_FAC = T("Add Site")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_FAC,
title_display = T("Site Details"),
title_list = T("Sites"),
title_update = T("Edit Site"),
label_list_button = T("List Sites"),
label_delete_button = T("Delete Site"),
msg_record_created = T("Site Added"),
msg_record_modified = T("Site Updated"),
msg_record_deleted = T("Site Canceled"),
msg_list_empty = T("No Sites registered"))
list_fields = ["name",
"code",
"site_facility_type.facility_type_id",
"organisation_id",
"location_id",
"location_id$addr_street",
"location_id$level",
"location_id$parent",
"modified_by",
"modified_on",
"organisation_id$logo",
"opening_times",
"human_resource.person_id",
#"contact",
"phone1",
"status.facility_status",
"status.power_supply_type",
"comments",
]
crud_form = S3SQLCustomForm("name",
"code",
S3SQLInlineComponentMultiSelectWidget(
"facility_type",
label = T("Facility Type"),
field = "facility_type_id",
widget = "multiselect",
),
"organisation_id",
"location_id",
"opening_times",
# This is too Ugly right now!
#S3SQLInlineComponent(
# "human_resource_site",
# label = T("Focal Point"),
# field = ["human_resource_id"],
# multiple = False,
#),
#"contact",
"phone1",
# This is too Ugly right now!
#S3SQLInlineComponent(
# "needs",
# label = T("Needs"),
# multiple = False,
#),
S3SQLInlineComponent(
"status",
label = T("Status"),
multiple = False,
),
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_org_facility_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
table = s3db.org_facility
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
customise_org_facility_fields()
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
location_id = get_vars.get("~.(location)", None)
organisation_id = get_vars.get("~.(organisation)", None)
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
location_field = table.location_id
if location_id:
location_field.default = location_id
location_field.readable = location_field.writable = False
else:
# Don't add new Locations here
location_field.comment = None
location_field.requires = IS_LOCATION_SELECTOR2(levels=levels)
location_field.widget = S3LocationSelectorWidget2(levels=levels,
show_address=True,
show_map=True)
# @ToDo: Proper button if we want this & amend functionality for Bootstrap)
#s3.cancel = True
if r.method == "datalist":
# Site selection page
# 2-column datalist, 6 rows per page
#s3.dl_pagelength = 12
#s3.dl_rowsize = 2
from s3.s3filter import S3TextFilter, S3OptionsFilter, S3LocationFilter
filter_widgets = [
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter(name = "type",
label = T("Type"),
field="site_facility_type.facility_type_id",
hidden = True,
),
S3OptionsFilter(name = "status",
label = T("Status"),
field = "status.facility_status",
hidden = True,
),
S3OptionsFilter(name = "power",
label = T("Power Supply"),
field = "status.power_supply_type",
hidden = True,
),
]
#get_vars = current.request.get_vars
#goods = get_vars.get("needs.goods", None)
#vol = get_vars.get("needs.vol", None)
#if goods:
# needs_fields = ["needs.goods_details"]
# s3.crud_strings["org_facility"].title_list = T("Sites where you can Drop-off Goods")
#elif vol:
# needs_fields = ["needs.vol_details"]
# s3.crud_strings["org_facility"].title_list = T("Sites where you can Volunteer your time")
#else:
# yesno = {True: T("Yes"), False: T("No")}
# needs_fields = ["needs.goods_details", "needs.vol_details"]
# filter_widgets.insert(0, S3OptionsFilter("needs.goods",
# label = T("Drop-off Goods"),
# cols = 2,
# options = yesno,
# multiple = False,
# hidden = True,
# ))
# filter_widgets.insert(1, S3OptionsFilter("needs.vol",
# label = T("Volunteer Time"),
# cols = 2,
# options = yesno,
# multiple = False,
# hidden = True,
# ))
filter_widgets.insert(0, S3TextFilter(["name",
"code",
"comments",
], #+ needs_fields,
label = T("Search")))
s3db.configure("org_facility",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_sites,
filter_widgets = filter_widgets,
)
elif r.method == "profile":
# Customise tables used by widgets
customise_hrm_human_resource_fields()
customise_site_needs_fields(profile=True)
s3db.req_customise_req_fields()
list_fields = ["name",
"id",
]
record = r.record
record_id = record.id
# @ToDo: Center on the Site
map_widget = dict(label = "Map",
type = "map",
context = "site",
icon = "icon-map",
height = 383,
width = 568,
)
contacts_widget = dict(label = "Contacts",
label_create = "Create Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "site",
create_controller = "pr",
create_function = "person",
icon = "icon-contact",
show_on_map = False, # Since they will show within Sites
list_layout = render_contacts,
)
reqs_widget = dict(label = "Requests",
label_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "site",
filter = FS("req_status").belongs([0, 1]),
icon = "icon-flag",
show_on_map = False, # Since they will show within Sites
list_layout = s3db.req_req_list_layout,
)
commits_widget = dict(label = "Donations",
#label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "site",
filter = FS("cancel") == False,
icon = "icon-truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_commit_list_layout,
)
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="org", f="facility",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["org_facility"].title_update,
)
else:
edit_btn = ""
name = record.name
code = record.code
if code:
name_code = "%s - %s" % (name, code)
else:
name_code = code
location = table.location_id.represent(record.location_id)
organisation_id = record.organisation_id
db = current.db
otable = db.org_organisation
query = (otable.id == organisation_id)
org = db(query).select(otable.name,
otable.logo,
limitby=(0, 1)).first()
if org and org.logo:
logo = URL(c="default", f="download", args=[org.logo])
else:
# @ToDo: Placeholder
logo = "#"
# Add primary resource to map
# Lookup Marker (type-dependent)
ftable = s3db.org_facility
ltable = s3db.org_site_facility_type
query = (ftable == record_id) & \
(ftable.site_id == ltable.site_id)
facility_type = db(query).select(ltable.facility_type_id,
limitby = (0, 1)
).first()
# Lookup Marker
if facility_type:
layer_filter = "facility_type.facility_type_id=%s" % \
facility_type.id
else:
layer_filter = ""
marker = current.gis.get_marker(controller = "org",
function = "facility",
filter = layer_filter)
lat = None
lon = None
gtable = s3db.gis_location
query = (r.id == ftable.id) & \
(ftable.location_id == gtable.id)
lat_lon = db(query).select(gtable.lat,
gtable.lon,
limitby = (0,1)).first()
if lat_lon:
lat = lat_lon["gis_location.lat"]
lon = lat_lon["gis_location.lon"]
map_widget["lat"] = lat
map_widget["lon"] = lon
tablename = "org_facility"
layer = dict(name = record.name,
id = "profile-header-%s-%s" % (tablename, record_id),
active = True,
tablename = r.tablename,
url = "/%s/org/facility.geojson?facility.id=%s" % \
(r.application, record_id),
marker = marker,
)
s3db.configure(tablename,
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["org_facility"].title_list,
name),
profile_header = DIV(edit_btn,
IMG(_class="media-object",
_src=logo,
),
H2(name),
record.code and P(record.code) or "",
P(I(_class="icon-sitemap"),
" ",
SPAN(org and org.name or current.messages.NONE),
" ",
_class="card_1_line",
),
P(I(_class="icon-globe"),
" ",
SPAN(location),
" ",
_class="card_1_line",
),
P(record.comments,
_class="s3-truncate"),
_class="profile-header",
),
profile_layers = [layer],
profile_widgets = [reqs_widget,
map_widget,
commits_widget,
contacts_widget,
],
)
if r.interactive or r.representation == "aadata":
# Configure fields
#table.code.readable = table.code.writable = False
#table.phone1.readable = table.phone1.writable = False
table.phone2.readable = table.phone2.writable = False
table.email.readable = table.email.writable = False
elif r.representation == "geojson":
# Don't represent facility_status, but just show integers
s3db.org_site_status.facility_status.represent = None
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
if isinstance(output, dict) and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="org", f="facility",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Add New Site"),
)
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("org_facility")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("org_facility")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
# @ToDo: Don't just hide but prevent building
#attr["rheader"] = None
return attr
settings.customise_org_facility_controller = customise_org_facility_controller
# -----------------------------------------------------------------------------
def customise_org_needs_fields(profile=False):
# Truncate details field(s)
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
s3db = current.s3db
table = s3db.req_organisation_needs
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
table.vol.readable = table.vol.writable = False
table.vol_details.readable = table.vol_details.writable = False
# Hide money_details unless used
s3.jquery_ready.append(
'''$('#req_organisation_needs_money_details__row').hide()
$('#req_organisation_needs_money').change(function(){
$('#req_organisation_needs_money_details__row').toggle($(this).prop('checked'))
}).change()''')
list_fields = ["id",
"organisation_id",
# @ToDo: Are these better displayed elsewhere in Profile view?
"organisation_id$logo",
"organisation_id$phone",
"organisation_id$website",
"money",
"money_details",
#"vol",
#"vol_details",
"modified_on",
"modified_by",
]
if not profile:
list_fields += ["organisation_id$name",
]
s3db.configure("req_organisation_needs",
list_fields=list_fields,
)
return
# -----------------------------------------------------------------------------
def customise_req_organisation_needs_controller(**attr):
"""
Customise req_organisation_needs controller
"""
customise_org_needs_fields()
return attr
settings.customise_req_organisation_needs_controller = customise_req_organisation_needs_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
"""
Customise org_organisation controller
- Profile Page
- Requests
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive or r.representation == "aadata":
# Load normal Model
s3db = current.s3db
table = s3db.org_organisation
list_fields = ["id",
"name",
"logo",
"phone",
"website",
"needs.money",
"needs.money_details",
#"needs.vol",
#"needs.vol_details",
]
if r.method == "profile":
# Customise tables used by widgets
customise_hrm_human_resource_fields()
customise_org_facility_fields()
customise_org_needs_fields(profile=True)
s3db.org_customise_org_resource_fields("profile")
contacts_widget = dict(label = "Contacts",
label_create = "Create Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "organisation",
create_controller = "pr",
create_function = "person",
icon = "icon-contact",
show_on_map = False, # Since they will show within Offices
list_layout = render_contacts,
)
map_widget = dict(label = "Map",
type = "map",
context = "organisation",
icon = "icon-map",
height = 383,
width = 568,
)
needs_widget = dict(label = "Needs",
label_create = "Add New Need",
type = "datalist",
tablename = "req_organisation_needs",
multiple = False,
context = "organisation",
icon = "icon-hand-up",
show_on_map = False,
list_layout = render_org_needs,
)
reqs_widget = dict(label = "Requests",
label_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "organisation",
filter = FS("req_status").belongs([0, 1]),
icon = "icon-flag",
layer = "Requests",
# provided by Catalogue Layer
#marker = "request",
list_layout = s3db.req_req_list_layout,
)
#resources_widget = dict(label = "Resources",
# label_create = "Create Resource",
# type = "datalist",
# tablename = "org_resource",
# context = "organisation",
# #filter = FS("req_status").belongs([0, 1]),
# icon = "icon-wrench",
# layer = "Resources",
# # provided by Catalogue Layer
# #marker = "resource",
# list_layout = s3db.org_resource_list_layout,
# )
commits_widget = dict(label = "Donations",
#label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "organisation",
filter = FS("cancel") == False,
icon = "icon-truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_commit_list_layout,
)
sites_widget = dict(label = "Sites",
label_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
context = "organisation",
filter = FS("obsolete") == False,
icon = "icon-home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
record = r.record
record_id = record.id
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["org_organisation"].title_update,
)
else:
edit_btn = ""
s3db.configure("org_organisation",
profile_title = "%s : %s" % (s3.crud_strings["org_organisation"].title_list,
record.name),
profile_header = DIV(edit_btn,
IMG(_class="media-object",
_src=URL(c="default", f="download",
args=[record.logo]),
),
H2(record.name),
_class="profile-header",
),
profile_widgets = [reqs_widget,
map_widget,
# @ToDo: Move to profile_header
#needs_widget,
#resources_widget,
commits_widget,
needs_widget,
contacts_widget,
sites_widget,
]
)
elif r.method == "datalist":
# Stakeholder selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
from s3.s3filter import S3TextFilter, S3OptionsFilter
filter_widgets = [
# no other filter widgets here yet?
]
# Needs page
# Truncate details field(s)
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
get_vars = current.request.get_vars
money = get_vars.get("needs.money", None)
#vol = get_vars.get("needs.vol", None)
if money:
needs_fields = ["needs.money_details"]
s3.crud_strings["org_organisation"].title_list = T("Organizations soliciting Money")
#elif vol:
# needs_fields = ["needs.vol_details"]
# s3.crud_strings["org_organisation"].title_list = T("Organizations with remote Volunteer opportunities")
else:
yesno = {True: T("Yes"), False: T("No")}
needs_fields = ["needs.money_details", "needs.vol_details"]
filter_widgets.insert(0, S3OptionsFilter("needs.money",
options = yesno,
multiple = False,
cols = 2,
hidden = True,
))
#filter_widgets.insert(1, S3OptionsFilter("needs.vol",
# options = yesno,
# multiple = False,
# cols = 2,
# hidden = True,
# ))
filter_widgets.insert(0, S3TextFilter(["name",
"acronym",
"website",
"comments",
] + needs_fields,
label = T("Search")))
ntable = s3db.req_organisation_needs
s3db.configure("org_organisation",
filter_widgets = filter_widgets
)
# Represent used in rendering
current.auth.settings.table_user.organisation_id.represent = s3db.org_organisation_represent
# Hide fields
field = s3db.org_organisation_organisation_type.organisation_type_id
field.readable = field.writable = False
table.region_id.readable = table.region_id.writable = False
table.country.readable = table.country.writable = False
table.year.readable = table.year.writable = False
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="org", f="organisation", args="datalist")
s3db.configure("org_organisation",
create_next = url_next,
delete_next = url_next,
update_next = url_next,
# We want the Create form to be in a modal, not inline, for consistency
listadd = False,
list_fields = list_fields,
list_layout = render_organisations,
)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive and \
isinstance(output, dict) and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="org", f="organisation",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Create Organization"),
)
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -----------------------------------------------------------------------------
def customise_site_needs_fields(profile=False):
s3db = current.s3db
table = s3db.req_site_needs
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["id",
"organisation_id$id",
# @ToDo: Are these better displayed elsewhere in Profile view?
"organisation_id$name",
"organisation_id$logo",
"organisation_id$website",
"location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
"location_id$addr_street",
"phone1",
#"goods",
#"goods_details",
#"vol",
#"vol_details",
"modified_on",
"modified_by",
]
if not profile:
list_fields += ["site_id$name"]
s3db.configure("req_site_needs",
list_fields=list_fields,
)
return
s3.customise_site_needs_fields = customise_site_needs_fields
# -----------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
s3db = current.s3db
request = current.request
s3 = current.response.s3
tablename = "pr_person"
table = s3db.pr_person
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "validate":
# Can't validate image without the file
image_field = s3db.pr_image.image
image_field.requires = None
if r.interactive or r.representation == "aadata":
if request.controller != "default":
# CRUD Strings
ADD_CONTACT = T("Create Contact")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Contact"),
title_display = T("Contact Details"),
title_list = T("Contact Directory"),
title_update = T("Edit Contact Details"),
label_list_button = T("List Contacts"),
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact details updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Contacts currently registered"))
MOBILE = settings.get_ui_label_mobile_phone()
EMAIL = T("Email")
htable = s3db.hrm_human_resource
htable.organisation_id.widget = None
site_field = htable.site_id
represent = S3Represent(lookup="org_site")
site_field.represent = represent
site_field.requires = IS_ONE_OF(current.db, "org_site.site_id",
represent,
orderby = "org_site.name")
from s3layouts import S3AddResourceLink
site_field.comment = S3AddResourceLink(c="org", f="facility",
vars={"child": "site_id"},
label=T("Add New Site"),
title=T("Site"),
tooltip=T("If you don't see the Site in the list, you can add a new one by clicking link 'Add New Site'."))
# ImageCrop widget doesn't currently work within an Inline Form
s3db.pr_image.image.widget = None
hr_fields = ["organisation_id",
"job_title_id",
"site_id",
"site_contact",
]
if r.method in ("create", "update"):
# Context from a Profile page?"
organisation_id = request.get_vars.get("(organisation)", None)
if organisation_id:
field = s3db.hrm_human_resource.organisation_id
field.default = organisation_id
field.readable = field.writable = False
hr_fields.remove("organisation_id")
s3_sql_custom_fields = [
"first_name",
#"middle_name",
"last_name",
S3SQLInlineComponent(
"human_resource",
name = "human_resource",
label = "",
multiple = False,
fields = hr_fields,
),
S3SQLInlineComponent(
"image",
name = "image",
label = T("Photo"),
multiple = False,
fields = [("", "image")],
filterby = dict(field = "profile",
options = [True]
)
),
]
list_fields = [(current.messages.ORGANISATION, "human_resource.organisation_id"),
"first_name",
#"middle_name",
"last_name",
(T("Job Title"), "human_resource.job_title_id"),
(T("Site"), "human_resource.site_id"),
(T("Site Contact"), "human_resource.site_contact"),
]
# Don't include Email/Phone for unauthenticated users
if current.auth.is_logged_in():
list_fields += [(MOBILE, "phone.value"),
(EMAIL, "email.value"),
]
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "phone",
label = MOBILE,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "SMS")),
)
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "email",
label = EMAIL,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL")),
)
crud_form = S3SQLCustomForm(*s3_sql_custom_fields)
if r.id and request.controller == "default":
url_next = URL(c="default", f="person", args=[r.id, "read"])
else:
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="pr", f="person")
s3db.configure(tablename,
create_next = url_next,
crud_form = crud_form,
delete_next = url_next,
list_fields = list_fields,
# Don't include a Create form in 'More' popups
listadd = False if r.method=="datalist" else True,
list_layout = render_contacts,
update_next = url_next,
)
# Move fields to their desired Locations
# Disabled as breaks submission of inline_component
#i18n = []
#iappend = i18n.append
#iappend('''i18n.office="%s"''' % T("Office"))
#iappend('''i18n.organisation="%s"''' % T("Organization"))
#iappend('''i18n.job_title="%s"''' % T("Job Title"))
#i18n = '''\n'''.join(i18n)
#s3.js_global.append(i18n)
#s3.scripts.append('/%s/static/themes/DRMP/js/contacts.js' % request.application)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
output["rheader"] = ""
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="pr", f="person",
args=["[id]", "read"]))
]
# All users just get "Open"
#db = current.db
#auth = current.auth
#has_permission = auth.s3_has_permission
#ownership_required = auth.permission.ownership_required
#s3_accessible_query = auth.s3_accessible_query
#if has_permission("update", table):
# action = dict(label=str(T("Edit")),
# _class="action-btn",
# url=URL(c="pr", f="person",
# args=["[id]", "update"]),
# )
# if ownership_required("update", table):
# # Check which records can be updated
# query = s3_accessible_query("update", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
#if has_permission("delete", table):
# action = dict(label=str(T("Delete")),
# _class="action-btn",
# url=URL(c="pr", f="person",
# args=["[id]", "delete"]),
# )
# if ownership_required("delete", table):
# # Check which records can be deleted
# query = s3_accessible_query("delete", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
s3.actions = actions
if "form" in output:
output["form"].add_class("pr_person")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("pr_person")
return output
s3.postp = custom_postp
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
def customise_doc_document_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
tablename = "doc_document"
table = s3db.doc_document
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
# Filter Out Docs from Newsfeed
current.response.s3.filter = (table.name != None)
if r.interactive:
s3.crud_strings[tablename] = Storage(
label_create = T("Add Document"),
title_display = T("Document"),
title_list = T("Documents"),
title_update = T("Edit Document"),
label_list_button = T("List New Documents"),
label_delete_button = T("Remove Documents"),
msg_record_created = T("Documents added"),
msg_record_modified = T("Documents updated"),
msg_record_deleted = T("Documents removed"),
msg_list_empty = T("No Documents currently recorded"))
# Force added docs to have a name
table.name.requires = IS_NOT_EMPTY()
list_fields = ["name",
"file",
"url",
"organisation_id",
"comments",
]
crud_form = S3SQLCustomForm(*list_fields)
s3db.configure(tablename,
list_fields = list_fields,
crud_form = crud_form,
)
return True
s3.prep = custom_prep
return attr
settings.customise_doc_document_controller = customise_doc_document_controller
# -----------------------------------------------------------------------------
settings.req.req_type = ["Other"]
settings.req.requester_label = "Contact"
# Uncomment if the User Account logging the Request is NOT normally the Requester
settings.req.requester_is_author = False
# Uncomment to have Donations include a 'Value' field
settings.req.commit_value = True
# Uncomment if the User Account logging the Commitment is NOT normally the Committer
#settings.req.comittter_is_author = False
# Uncomment to allow Donations to be made without a matching Request
#settings.req.commit_without_request = True
# Set the Requester as being an HR for the Site if no HR record yet & as Site contact if none yet exists
settings.req.requester_to_site = True
def customise_req_req_controller(**attr):
s3 = current.response.s3
# Custom PreP
#standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
#if callable(standard_prep):
# result = standard_prep(r)
s3db = current.s3db
if r.component_name == "commit":
s3db.req_customise_commit_fields()
else:
s3db.req_customise_req_fields()
if r.method in ("datalist", "datalist.dl"):
s3.filter = (r.table.req_status.belongs([0, 1]))
elif r.method == "profile":
# Customise tables used by widgets
s3db.req_customise_commit_fields()
customise_org_facility_fields()
record = r.record
record_id = record.id
commits_widget = dict(label = "Donations",
label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "request",
default = "req_id=%s" % record_id,
filter = FS("cancel") == False,
icon = "icon-truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = s3db.req_commit_list_layout,
)
filter = (FS("obsolete") == False)
sites_widget = dict(label = "Sites",
#label_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
multiple = False,
context = "request",
filter = filter,
icon = "icon-home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
if current.auth.s3_has_permission("update", r.table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="req", f="req",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["req_req"].title_update,
)
else:
edit_btn = ""
db = current.db
stable = db.org_site
query = (stable.site_id == record.site_id)
site = db(query).select(stable.name,
stable.location_id,
stable.organisation_id,
limitby=(0, 1)
).first()
location = s3db.gis_LocationRepresent(sep=" | ")(site.location_id)
otable = db.org_organisation
org = db(otable.id == site.organisation_id).select(otable.name,
otable.logo,
limitby=(0, 1)
).first()
if org and org.logo:
logo = URL(c="default", f="download", args=[org.logo])
else:
# @ToDo: Placeholder
logo = "#"
s3db.configure("req_req",
profile_title = s3.crud_strings["req_req"].title_list,
profile_header = DIV(edit_btn,
A(IMG(_class="media-object",
_src=logo,
),
_class="pull-left",
#_href=org_url,
),
H2(site.name),
P(I(_class="icon-sitemap"),
" ",
SPAN(org and org.name or current.messages.NONE),
" ",
_class="card_1_line",
),
P(I(_class="icon-globe"),
" ",
SPAN(location),
" ",
_class="card_1_line",
),
P(record.purpose,
_class="s3-truncate"),
_class="profile-header",
),
profile_widgets = [commits_widget,
sites_widget,
],
)
return True
s3.prep = custom_prep
# Disable postp
s3.postp = None
return attr
settings.customise_req_req_controller = customise_req_req_controller
# -----------------------------------------------------------------------------
def customise_req_commit_controller(**attr):
s3 = current.response.s3
# Custom PreP
#standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
#if callable(standard_prep):
# result = standard_prep(r)
current.s3db.req_customise_commit_fields()
if r.method in ("datalist", "datalist.dl"):
s3.filter = (r.table.cancel != True)
return True
s3.prep = custom_prep
# Disable postp
s3.postp = None
return attr
settings.customise_req_commit_controller = customise_req_commit_controller
# =============================================================================
# Modules
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = "Ticket Viewer",
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = "Translation Functionality",
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = None
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("msg", Storage(
name_nice = "Messaging",
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
#("event", Storage(
# name_nice = "Disasters",
# #description = "Events",
# restricted = True,
# module_type = None
#)),
("req", Storage(
name_nice = "Requests",
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = None,
)),
#("project", Storage(
# name_nice = "Projects",
# restricted = True,
# module_type = None
#)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
#("vulnerability", Storage(
# name_nice = "Vulnerability",
# restricted = True,
# module_type = None
#)),
#("transport", Storage(
# name_nice = "Transport",
# restricted = True,
# module_type = None
#)),
#("hms", Storage(
# name_nice = "Hospitals",
# restricted = True,
# module_type = None
#)),
#("cr", Storage(
# name_nice = "Shelters",
# restricted = True,
# module_type = None
#)),
("supply", Storage(
name_nice = "Supply Chain Management",
restricted = True,
module_type = None
)),
])
|
devinbalkind/eden
|
private/templates/Philippines/config.py
|
Python
|
mit
| 128,192
|
[
"Amber"
] |
b00c588d303175fa529f95d9d3a0edc02364f623aa1950b2976a900d74e0c062
|
"""
Integration with native distribution package managers.
@since: 0.28
"""
# Copyright (C) 2009, Thomas Leonard
# See the README file for details, or visit http://0install.net.
from zeroinstall import _, logger
import os, platform, re, subprocess, sys
from zeroinstall.injector import namespaces, model
from zeroinstall.support import basedir, portable_rename, intern
from zeroinstall.support.tasks import get_loop
_dotted_ints = '[0-9]+(?:\.[0-9]+)*'
# This matches a version number that would be a valid Zero Install version without modification
_zeroinstall_regexp = '(?:%s)(?:-(?:pre|rc|post|)(?:%s))*' % (_dotted_ints, _dotted_ints)
# This matches the interesting bits of distribution version numbers
# (first matching group is for Java-style 6b17 or 7u9 syntax, or "major")
_version_regexp = '(?:[a-z])?({ints}\.?[bu])?({zero})(-r{ints})?'.format(zero = _zeroinstall_regexp, ints = _dotted_ints)
_PYTHON_URI = 'http://repo.roscidus.com/python/python'
def _set_quick_test(impl, path):
"""Set impl.quick_test_file and impl.quick_test_mtime from path."""
impl.quick_test_file = path
impl.quick_test_mtime = int(os.stat(path).st_mtime)
# We try to do updates atomically without locking, but we don't worry too much about
# duplicate entries or being a little out of sync with the on-disk copy.
class Cache(object):
def __init__(self, cache_leaf, source, format):
"""Maintain a cache file (e.g. ~/.cache/0install.net/injector/$name).
If the size or mtime of $source has changed
format version if different, reset the cache first.
@type cache_leaf: str
@type source: str
@type format: int"""
self.cache_leaf = cache_leaf
self.source = source
self.format = format
self.cache_dir = basedir.save_cache_path(namespaces.config_site,
namespaces.config_prog)
self.cached_for = {} # Attributes of source when cache was created
try:
self._load_cache()
except Exception as ex:
logger.info(_("Failed to load cache (%s). Flushing..."), ex)
self.flush()
def flush(self):
# Wipe the cache
try:
info = os.stat(self.source)
mtime = int(info.st_mtime)
size = info.st_size
except Exception as ex:
logger.warning("Failed to stat %s: %s", self.source, ex)
mtime = size = 0
self.cache = {}
import tempfile
tmp = tempfile.NamedTemporaryFile(mode = 'wt', dir = self.cache_dir, delete = False)
tmp.write("mtime=%d\nsize=%d\nformat=%d\n\n" % (mtime, size, self.format))
tmp.close()
portable_rename(tmp.name, os.path.join(self.cache_dir, self.cache_leaf))
self._load_cache()
# Populate self.cache from our saved cache file.
# Throws an exception if the cache doesn't exist or has the wrong format.
def _load_cache(self):
self.cache = cache = {}
with open(os.path.join(self.cache_dir, self.cache_leaf)) as stream:
for line in stream:
line = line.strip()
if not line:
break
key, value = line.split('=', 1)
if key in ('mtime', 'size', 'format'):
self.cached_for[key] = int(value)
self._check_valid()
for line in stream:
key, value = line.split('=', 1)
cache[key] = value[:-1]
# Check the source file hasn't changed since we created the cache
def _check_valid(self):
info = os.stat(self.source)
if self.cached_for['mtime'] != int(info.st_mtime):
raise Exception("Modification time of %s has changed" % self.source)
if self.cached_for['size'] != info.st_size:
raise Exception("Size of %s has changed" % self.source)
if self.cached_for.get('format', None) != self.format:
raise Exception("Format of cache has changed")
def get(self, key):
"""@type key: str
@rtype: str"""
try:
self._check_valid()
except Exception as ex:
logger.info(_("Cache needs to be refreshed: %s"), ex)
self.flush()
return None
else:
return self.cache.get(key, None)
def put(self, key, value):
"""@type key: str
@type value: str"""
cache_path = os.path.join(self.cache_dir, self.cache_leaf)
self.cache[key] = value
try:
with open(cache_path, 'a') as stream:
stream.write('%s=%s\n' % (key, value))
except Exception as ex:
logger.warning("Failed to write to cache %s: %s=%s: %s", cache_path, key, value, ex)
def try_cleanup_distro_version(version):
"""Try to turn a distribution version string into one readable by Zero Install.
We do this by stripping off anything we can't parse.
@type version: str
@return: the part we understood, or None if we couldn't parse anything
@rtype: str"""
if ':' in version:
version = version.split(':')[1] # Skip 'epoch'
version = version.replace('_', '-')
if '~' in version:
version, suffix = version.split('~', 1)
if suffix.startswith('pre'):
suffix = suffix[3:]
suffix = '-pre' + (try_cleanup_distro_version(suffix) or '')
else:
suffix = ''
match = re.match(_version_regexp, version)
if match:
major, version, revision = match.groups()
if major is not None:
version = major[:-1].rstrip('.') + '.' + version
if revision is not None:
version = '%s-%s' % (version, revision[2:])
return version + suffix
return None
class Distribution(object):
"""Represents a distribution with which we can integrate.
Sub-classes should specialise this to integrate with the package managers of
particular distributions. This base class ignores the native package manager.
@since: 0.28
@ivar name: the default value for Implementation.distro_name for our implementations
@type name: str
@ivar system_paths: list of paths to search for binaries (we MUST NOT find 0install launchers, so only include directories where system packages install binaries - e.g. /usr/bin but not /usr/local/bin)
@type system_paths: [str]
"""
name = "fallback"
_packagekit = None
system_paths = ['/usr/bin', '/bin', '/usr/sbin', '/sbin']
def get_package_info(self, package, factory):
"""Get information about the given package.
Add zero or more implementations using the factory (typically at most two
will be added; the currently installed version and the latest available).
@param package: package name (e.g. "gimp")
@type package: str
@param factory: function for creating new DistributionImplementation objects from IDs
@type factory: str -> L{model.DistributionImplementation}"""
return
def get_score(self, distribution):
"""Indicate how closely the host distribution matches this one.
The <package-implementation> with the highest score is passed
to L{Distribution.get_package_info}. If several elements get
the same score, get_package_info is called for all of them.
@param distribution: a distribution name
@type distribution: str
@return: an integer, or -1 if there is no match at all
@rtype: int"""
return 0
def get_feed(self, master_feed_url, package_impls):
"""Generate a feed containing information about distribution packages.
This should immediately return a feed containing an implementation for the
package if it's already installed. Information about versions that could be
installed using the distribution's package manager can be added asynchronously
later (see L{fetch_candidates}).
@rtype: L{model.ZeroInstallFeed}"""
feed = model.ZeroInstallFeed(None)
feed.url = 'distribution:' + master_feed_url
for item, item_attrs, _depends in package_impls:
package = item_attrs.get('package', None)
if package is None:
raise model.InvalidInterface(_("Missing 'package' attribute on %s") % item)
new_impls = []
def factory(id, only_if_missing = False, installed = True):
assert id.startswith('package:')
if id in feed.implementations:
if only_if_missing:
return None
logger.warning(_("Duplicate ID '%s' for DistributionImplementation"), id)
impl = model.DistributionImplementation(feed, id, self, item)
feed.implementations[id] = impl
new_impls.append(impl)
impl.installed = installed
impl.metadata = item_attrs
if impl.main is None:
item_main = item_attrs.get('main', None)
if item_main:
impl.main = item_main
impl.upstream_stability = model.packaged
return impl
self.get_package_info(package, factory)
for impl in new_impls:
self.fixup(package, impl)
if impl.installed:
self.installed_fixup(impl)
if master_feed_url == _PYTHON_URI and os.name != "nt":
# Hack: we can support Python on platforms with unsupported package managers
# by adding the implementation of Python running us now to the list.
python_version = '.'.join([str(v) for v in sys.version_info if isinstance(v, int)])
impl_id = 'package:host:python:' + python_version
assert impl_id not in feed.implementations
impl = model.DistributionImplementation(feed, impl_id, self, distro_name = 'host')
impl.installed = True
impl.version = model.parse_version(python_version)
impl.main = sys.executable or '/usr/bin/python'
impl.upstream_stability = model.packaged
impl.machine = host_machine # (hopefully)
_set_quick_test(impl, sys.executable)
feed.implementations[impl_id] = impl
elif master_feed_url == 'http://repo.roscidus.com/python/python-gobject' and os.name != "nt":
gobject = get_loop().gobject
if gobject:
# Likewise, we know that there is a native python-gobject available for our Python
impl_id = 'package:host:python-gobject:' + '.'.join(str(x) for x in gobject.pygobject_version)
assert impl_id not in feed.implementations
impl = model.DistributionImplementation(feed, impl_id, self, distro_name = 'host')
impl.installed = True
impl.version = [list(gobject.pygobject_version)]
impl.upstream_stability = model.packaged
impl.machine = host_machine # (hopefully)
if gobject.__file__.startswith('<'):
_set_quick_test(impl, gobject.__path__) # Python 3
else:
_set_quick_test(impl, gobject.__file__) # Python 2
feed.implementations[impl_id] = impl
return feed
def fetch_candidates(self, package_impls):
"""Collect information about versions we could install using
the distribution's package manager. On success, the distribution
feed in iface_cache is updated.
@return: a L{tasks.Blocker} if the task is in progress, or None if not"""
if self.packagekit.available:
package_names = [item.getAttribute("package") for item, item_attrs, depends in package_impls]
return self.packagekit.fetch_candidates(package_names)
@property
def packagekit(self):
"""For use by subclasses.
@rtype: L{packagekit.PackageKit}"""
if not self._packagekit:
from zeroinstall.injector import packagekit
self._packagekit = packagekit.PackageKit()
return self._packagekit
def fixup(self, package, impl):
"""Some packages require special handling (e.g. Java). This is called for each
package that was added by L{get_package_info} after it returns. The default
method does nothing.
@param package: the name of the package
@param impl: the constructed implementation"""
pass
def installed_fixup(self, impl):
"""Called when an installed package is added (after L{fixup}), or when installation
completes. This is useful to fix up the main value.
The default implementation checks that main exists, and searches L{Distribution.system_paths} for
it if not.
@type impl: L{DistributionImplementation}
@since: 1.11"""
path = impl.main
if not path: return
if os.path.isabs(path) and os.path.exists(path):
return
basename = os.path.basename(path)
if os.name == "nt" and not basename.endswith('.exe'):
basename += '.exe'
for d in self.system_paths:
path = os.path.join(d, basename)
if os.path.isfile(path):
logger.info("Found %s by searching system paths", path)
impl.main = path
return
else:
logger.info("Binary '%s' not found in any system path (checked %s)", basename, self.system_paths)
def get_score(self, distro_name):
"""@type distro_name: str
@rtype: int"""
return int(distro_name == self.name)
class WindowsDistribution(Distribution):
name = 'Windows'
system_paths = []
def get_package_info(self, package, factory):
def _is_64bit_windows():
p = sys.platform
from win32process import IsWow64Process
if p == 'win64' or (p == 'win32' and IsWow64Process()): return True
elif p == 'win32': return False
else: raise Exception(_("WindowsDistribution may only be used on the Windows platform"))
def _read_hklm_reg(key_name, value_name):
from win32api import RegOpenKeyEx, RegQueryValueEx, RegCloseKey
from win32con import HKEY_LOCAL_MACHINE, KEY_READ
KEY_WOW64_64KEY = 0x0100
KEY_WOW64_32KEY = 0x0200
if _is_64bit_windows():
try:
key32 = RegOpenKeyEx(HKEY_LOCAL_MACHINE, key_name, 0, KEY_READ | KEY_WOW64_32KEY)
(value32, _) = RegQueryValueEx(key32, value_name)
RegCloseKey(key32)
except:
value32 = ''
try:
key64 = RegOpenKeyEx(HKEY_LOCAL_MACHINE, key_name, 0, KEY_READ | KEY_WOW64_64KEY)
(value64, _) = RegQueryValueEx(key64, value_name)
RegCloseKey(key64)
except:
value64 = ''
else:
try:
key32 = RegOpenKeyEx(HKEY_LOCAL_MACHINE, key_name, 0, KEY_READ)
(value32, _) = RegQueryValueEx(key32, value_name)
RegCloseKey(key32)
except:
value32 = ''
value64 = ''
return (value32, value64)
def find_java(part, win_version, zero_version):
reg_path = r"SOFTWARE\JavaSoft\{part}\{win_version}".format(part = part, win_version = win_version)
(java32_home, java64_home) = _read_hklm_reg(reg_path, "JavaHome")
for (home, arch) in [(java32_home, 'i486'), (java64_home, 'x86_64')]:
if os.path.isfile(home + r"\bin\java.exe"):
impl = factory('package:windows:%s:%s:%s' % (package, zero_version, arch))
impl.machine = arch
impl.version = model.parse_version(zero_version)
impl.upstream_stability = model.packaged
impl.main = home + r"\bin\java.exe"
_set_quick_test(impl, impl.main)
def find_netfx(win_version, zero_version):
reg_path = r"SOFTWARE\Microsoft\NET Framework Setup\NDP\{win_version}".format(win_version = win_version)
(netfx32_install, netfx64_install) = _read_hklm_reg(reg_path, "Install")
for (install, arch) in [(netfx32_install, 'i486'), (netfx64_install, 'x86_64')]:
impl = factory('package:windows:%s:%s:%s' % (package, zero_version, arch))
impl.installed = (install == 1)
impl.machine = arch
impl.version = model.parse_version(zero_version)
impl.upstream_stability = model.packaged
impl.main = "" # .NET executables do not need a runner on Windows but they need one elsewhere
def find_netfx_release(win_version, release_version, zero_version):
reg_path = r"SOFTWARE\Microsoft\NET Framework Setup\NDP\{win_version}".format(win_version = win_version)
(netfx32_install, netfx64_install) = _read_hklm_reg(reg_path, "Install")
(netfx32_release, netfx64_release) = _read_hklm_reg(reg_path, "Release")
for (install, release, arch) in [(netfx32_install, netfx32_release, 'i486'), (netfx64_install, netfx64_release, 'x86_64')]:
impl = factory('package:windows:%s:%s:%s' % (package, zero_version, arch))
impl.installed = (install == 1 and release != '' and release >= release_version)
impl.machine = arch
impl.version = model.parse_version(zero_version)
impl.upstream_stability = model.packaged
impl.main = "" # .NET executables do not need a runner on Windows but they need one elsewhere
if package == 'openjdk-6-jre':
find_java("Java Runtime Environment", "1.6", '6')
elif package == 'openjdk-6-jdk':
find_java("Java Development Kit", "1.6", '6')
elif package == 'openjdk-7-jre':
find_java("Java Runtime Environment", "1.7", '7')
elif package == 'openjdk-7-jdk':
find_java("Java Development Kit", "1.7", '7')
elif package == 'netfx':
find_netfx("v2.0.50727", '2.0')
find_netfx("v3.0", '3.0')
find_netfx("v3.5", '3.5')
find_netfx("v4\\Full", '4.0')
find_netfx_release("v4\\Full", 378389, '4.5')
find_netfx("v5", '5.0')
elif package == 'netfx-client':
find_netfx("v4\\Client", '4.0')
find_netfx_release("v4\\Client", 378389, '4.5')
class DarwinDistribution(Distribution):
"""@since: 1.11"""
name = 'Darwin'
def get_package_info(self, package, factory):
"""@type package: str"""
def java_home(version, arch):
null = os.open(os.devnull, os.O_WRONLY)
child = subprocess.Popen(["/usr/libexec/java_home", "--failfast", "--version", version, "--arch", arch],
stdout = subprocess.PIPE, stderr = null, universal_newlines = True)
home = child.stdout.read().strip()
child.stdout.close()
child.wait()
return home
def find_java(part, jvm_version, zero_version):
for arch in ['i386', 'x86_64']:
home = java_home(jvm_version, arch)
if os.path.isfile(home + "/bin/java"):
impl = factory('package:darwin:%s:%s:%s' % (package, zero_version, arch))
impl.machine = arch
impl.version = model.parse_version(zero_version)
impl.upstream_stability = model.packaged
impl.main = home + "/bin/java"
_set_quick_test(impl, impl.main)
if package == 'openjdk-6-jre':
find_java("Java Runtime Environment", "1.6", '6')
elif package == 'openjdk-6-jdk':
find_java("Java Development Kit", "1.6", '6')
elif package == 'openjdk-7-jre':
find_java("Java Runtime Environment", "1.7", '7')
elif package == 'openjdk-7-jdk':
find_java("Java Development Kit", "1.7", '7')
def get_output(args):
child = subprocess.Popen(args, stdout = subprocess.PIPE, universal_newlines = True)
return child.communicate()[0]
def get_version(program):
stdout = get_output([program, "--version"])
return stdout.strip().split('\n')[0].split()[-1] # the last word of the first line
def find_program(file):
if os.path.isfile(file) and os.access(file, os.X_OK):
program_version = try_cleanup_distro_version(get_version(file))
impl = factory('package:darwin:%s:%s' % (package, program_version), True)
if impl:
impl.installed = True
impl.version = model.parse_version(program_version)
impl.upstream_stability = model.packaged
impl.machine = host_machine # (hopefully)
impl.main = file
_set_quick_test(impl, impl.main)
if package == 'gnupg':
find_program("/usr/local/bin/gpg")
elif package == 'gnupg2':
find_program("/usr/local/bin/gpg2")
class CachedDistribution(Distribution):
"""For distributions where querying the package database is slow (e.g. requires running
an external command), we cache the results.
@since: 0.39
@deprecated: use Cache instead
"""
def __init__(self, db_status_file):
"""@param db_status_file: update the cache when the timestamp of this file changes
@type db_status_file: str"""
self._status_details = os.stat(db_status_file)
self.versions = {}
self.cache_dir = basedir.save_cache_path(namespaces.config_site,
namespaces.config_prog)
try:
self._load_cache()
except Exception as ex:
logger.info(_("Failed to load distribution database cache (%s). Regenerating..."), ex)
try:
self.generate_cache()
self._load_cache()
except Exception as ex:
logger.warning(_("Failed to regenerate distribution database cache: %s"), ex)
def _load_cache(self):
"""Load {cache_leaf} cache file into self.versions if it is available and up-to-date.
Throws an exception if the cache should be (re)created."""
with open(os.path.join(self.cache_dir, self.cache_leaf), 'rt') as stream:
cache_version = None
for line in stream:
if line == '\n':
break
name, value = line.split(': ')
if name == 'mtime' and int(value) != int(self._status_details.st_mtime):
raise Exception(_("Modification time of package database file has changed"))
if name == 'size' and int(value) != self._status_details.st_size:
raise Exception(_("Size of package database file has changed"))
if name == 'version':
cache_version = int(value)
else:
raise Exception(_('Invalid cache format (bad header)'))
if cache_version is None:
raise Exception(_('Old cache format'))
versions = self.versions
for line in stream:
package, version, zi_arch = line[:-1].split('\t')
versionarch = (version, intern(zi_arch))
if package not in versions:
versions[package] = [versionarch]
else:
versions[package].append(versionarch)
def _write_cache(self, cache):
#cache.sort() # Might be useful later; currently we don't care
"""@type cache: [str]"""
import tempfile
fd, tmpname = tempfile.mkstemp(prefix = 'zeroinstall-cache-tmp',
dir = self.cache_dir)
try:
stream = os.fdopen(fd, 'wt')
stream.write('version: 2\n')
stream.write('mtime: %d\n' % int(self._status_details.st_mtime))
stream.write('size: %d\n' % self._status_details.st_size)
stream.write('\n')
for line in cache:
stream.write(line + '\n')
stream.close()
portable_rename(tmpname,
os.path.join(self.cache_dir,
self.cache_leaf))
except:
os.unlink(tmpname)
raise
# Maps machine type names used in packages to their Zero Install versions
# (updates to this might require changing the reverse Java mapping)
_canonical_machine = {
'all' : '*',
'any' : '*',
'noarch' : '*',
'(none)' : '*',
'x86_64': 'x86_64',
'amd64': 'x86_64',
'i386': 'i386',
'i486': 'i486',
'i586': 'i586',
'i686': 'i686',
'ppc64': 'ppc64',
'ppc': 'ppc',
}
def arch_canonicalize_machine(machine_):
"""@type machine_: str
@rtype: str"""
machine = machine_.lower()
if machine == 'x86':
machine = 'i386'
elif machine == 'amd64':
machine = 'x86_64'
elif machine == 'Power Macintosh':
machine = 'ppc'
elif machine == 'i86pc':
machine = 'i686'
return machine
host_machine = arch_canonicalize_machine(platform.uname()[4])
def canonical_machine(package_machine):
"""@type package_machine: str
@rtype: str"""
machine = _canonical_machine.get(package_machine.lower(), None)
if machine is None:
# Safe default if we can't understand the arch
return host_machine.lower()
return machine
class DebianDistribution(Distribution):
"""A dpkg-based distribution."""
name = 'Debian'
cache_leaf = 'dpkg-status.cache'
def __init__(self, dpkg_status):
"""@type dpkg_status: str"""
self.dpkg_cache = Cache('dpkg-status.cache', dpkg_status, 2)
self.apt_cache = {}
def _query_installed_package(self, package):
"""@type package: str
@rtype: str"""
null = os.open(os.devnull, os.O_WRONLY)
child = subprocess.Popen(["dpkg-query", "-W", "--showformat=${Version}\t${Architecture}\t${Status}\n", "--", package],
stdout = subprocess.PIPE, stderr = null,
universal_newlines = True) # Needed for Python 3
os.close(null)
stdout, stderr = child.communicate()
child.wait()
for line in stdout.split('\n'):
if not line: continue
version, debarch, status = line.split('\t', 2)
if not status.endswith(' installed'): continue
clean_version = try_cleanup_distro_version(version)
if debarch.find("-") != -1:
debarch = debarch.split("-")[-1]
if clean_version:
return '%s\t%s' % (clean_version, canonical_machine(debarch.strip()))
else:
logger.warning(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': package})
return '-'
def get_package_info(self, package, factory):
# Add any already-installed package...
"""@type package: str"""
installed_cached_info = self._get_dpkg_info(package)
if installed_cached_info != '-':
installed_version, machine = installed_cached_info.split('\t')
impl = factory('package:deb:%s:%s:%s' % (package, installed_version, machine))
impl.version = model.parse_version(installed_version)
if machine != '*':
impl.machine = machine
else:
installed_version = None
# Add any uninstalled candidates (note: only one of these two methods will add anything)
# From PackageKit...
self.packagekit.get_candidates(package, factory, 'package:deb')
# From apt-cache...
cached = self.apt_cache.get(package, None)
if cached:
candidate_version = cached['version']
candidate_arch = cached['arch']
if candidate_version and candidate_version != installed_version:
impl = factory('package:deb:%s:%s:%s' % (package, candidate_version, candidate_arch), installed = False)
impl.version = model.parse_version(candidate_version)
if candidate_arch != '*':
impl.machine = candidate_arch
impl.download_sources.append(model.DistributionSource(package, cached['size'], needs_confirmation = False))
def fixup(self, package, impl):
"""@type package: str
@type impl: L{zeroinstall.injector.model.DistributionImplementation}"""
if impl.id.startswith('package:deb:openjdk-6-jre:') or \
impl.id.startswith('package:deb:openjdk-7-jre:'):
# Debian marks all Java versions as pre-releases
# See: http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=685276
impl.version = model.parse_version(impl.get_version().replace('-pre', '.'))
def installed_fixup(self, impl):
"""@type impl: L{zeroinstall.injector.model.DistributionImplementation}"""
# Hack: If we added any Java implementations, find the corresponding JAVA_HOME...
if impl.id.startswith('package:deb:openjdk-6-jre:'):
java_version = '6-openjdk'
elif impl.id.startswith('package:deb:openjdk-7-jre:'):
java_version = '7-openjdk'
else:
return Distribution.installed_fixup(self, impl) # super
if impl.machine == 'x86_64':
java_arch = 'amd64'
else:
java_arch = impl.machine
java_bin = '/usr/lib/jvm/java-%s-%s/jre/bin/java' % (java_version, java_arch)
if not os.path.exists(java_bin):
# Try without the arch...
java_bin = '/usr/lib/jvm/java-%s/jre/bin/java' % java_version
if not os.path.exists(java_bin):
logger.info("Java binary not found (%s)", java_bin)
if impl.main is None:
java_bin = '/usr/bin/java'
else:
return
impl.main = java_bin
def _get_dpkg_info(self, package):
"""@type package: str
@rtype: str"""
installed_cached_info = self.dpkg_cache.get(package)
if installed_cached_info == None:
installed_cached_info = self._query_installed_package(package)
self.dpkg_cache.put(package, installed_cached_info)
return installed_cached_info
def fetch_candidates(self, package_impls):
"""@type master_feed: L{zeroinstall.injector.model.ZeroInstallFeed}
@rtype: [L{zeroinstall.support.tasks.Blocker}]"""
package_names = [item.getAttribute("package") for item, item_attrs, depends in package_impls]
if self.packagekit.available:
return self.packagekit.fetch_candidates(package_names)
# No PackageKit. Use apt-cache directly.
for package in package_names:
# Check to see whether we could get a newer version using apt-get
try:
null = os.open(os.devnull, os.O_WRONLY)
child = subprocess.Popen(['apt-cache', 'show', '--no-all-versions', '--', package], stdout = subprocess.PIPE, stderr = null, universal_newlines = True)
os.close(null)
arch = version = size = None
for line in child.stdout:
line = line.strip()
if line.startswith('Version: '):
version = line[9:]
version = try_cleanup_distro_version(version)
elif line.startswith('Architecture: '):
arch = canonical_machine(line[14:].strip())
elif line.startswith('Size: '):
size = int(line[6:].strip())
if version and arch:
cached = {'version': version, 'arch': arch, 'size': size}
else:
cached = None
child.stdout.close()
child.wait()
except Exception as ex:
logger.warning("'apt-cache show %s' failed: %s", package, ex)
cached = None
# (multi-arch support? can there be multiple candidates?)
self.apt_cache[package] = cached
class RPMDistribution(CachedDistribution):
"""An RPM-based distribution."""
name = 'RPM'
cache_leaf = 'rpm-status.cache'
def generate_cache(self):
cache = []
child = subprocess.Popen(["rpm", "-qa", "--qf=%{NAME}\t%{VERSION}-%{RELEASE}\t%{ARCH}\n"],
stdout = subprocess.PIPE, universal_newlines = True)
for line in child.stdout:
package, version, rpmarch = line.split('\t', 2)
if package == 'gpg-pubkey':
continue
zi_arch = canonical_machine(rpmarch.strip())
clean_version = try_cleanup_distro_version(version)
if clean_version:
cache.append('%s\t%s\t%s' % (package, clean_version, zi_arch))
else:
logger.warning(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': package})
self._write_cache(cache)
child.stdout.close()
child.wait()
def get_package_info(self, package, factory):
# Add installed versions...
"""@type package: str"""
versions = self.versions.get(package, [])
for version, machine in versions:
impl = factory('package:rpm:%s:%s:%s' % (package, version, machine))
impl.version = model.parse_version(version)
if machine != '*':
impl.machine = machine
# Add any uninstalled candidates found by PackageKit
self.packagekit.get_candidates(package, factory, 'package:rpm')
def installed_fixup(self, impl):
# OpenSUSE uses _, Fedora uses .
"""@type impl: L{zeroinstall.injector.model.DistributionImplementation}"""
impl_id = impl.id.replace('_', '.')
# Hack: If we added any Java implementations, find the corresponding JAVA_HOME...
if impl_id.startswith('package:rpm:java-1.6.0-openjdk:'):
java_version = '1.6.0-openjdk'
elif impl_id.startswith('package:rpm:java-1.7.0-openjdk:'):
java_version = '1.7.0-openjdk'
else:
return Distribution.installed_fixup(self, impl) # super
# On Fedora, unlike Debian, the arch is x86_64, not amd64
java_bin = '/usr/lib/jvm/jre-%s.%s/bin/java' % (java_version, impl.machine)
if not os.path.exists(java_bin):
# Try without the arch...
java_bin = '/usr/lib/jvm/jre-%s/bin/java' % java_version
if not os.path.exists(java_bin):
logger.info("Java binary not found (%s)", java_bin)
if impl.main is None:
java_bin = '/usr/bin/java'
else:
return
impl.main = java_bin
def fixup(self, package, impl):
# OpenSUSE uses _, Fedora uses .
"""@type package: str
@type impl: L{zeroinstall.injector.model.DistributionImplementation}"""
package = package.replace('_', '.')
if package in ('java-1.6.0-openjdk', 'java-1.7.0-openjdk',
'java-1.6.0-openjdk-devel', 'java-1.7.0-openjdk-devel'):
if impl.version[0][0] == 1:
# OpenSUSE uses 1.6 to mean 6
del impl.version[0][0]
class SlackDistribution(Distribution):
"""A Slack-based distribution."""
name = 'Slack'
def __init__(self, packages_dir):
"""@type packages_dir: str"""
self._packages_dir = packages_dir
def get_package_info(self, package, factory):
# Add installed versions...
"""@type package: str"""
for entry in os.listdir(self._packages_dir):
name, version, arch, build = entry.rsplit('-', 3)
if name == package:
zi_arch = canonical_machine(arch)
clean_version = try_cleanup_distro_version("%s-%s" % (version, build))
if not clean_version:
logger.warning(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': name})
continue
impl = factory('package:slack:%s:%s:%s' % \
(package, clean_version, zi_arch))
impl.version = model.parse_version(clean_version)
if zi_arch != '*':
impl.machine = zi_arch
# Add any uninstalled candidates found by PackageKit
self.packagekit.get_candidates(package, factory, 'package:slack')
class ArchDistribution(Distribution):
"""An Arch Linux distribution."""
name = 'Arch'
def __init__(self, packages_dir):
"""@type packages_dir: str"""
self._packages_dir = os.path.join(packages_dir, "local")
def get_package_info(self, package, factory):
# Add installed versions...
"""@type package: str"""
for entry in os.listdir(self._packages_dir):
name, version, build = entry.rsplit('-', 2)
if name == package:
gotarch = False
# (read in binary mode to avoid unicode errors in C locale)
with open(os.path.join(self._packages_dir, entry, "desc"), 'rb') as stream:
for line in stream:
if line == b"%ARCH%\n":
gotarch = True
continue
if gotarch:
arch = line.strip().decode('utf-8')
break
zi_arch = canonical_machine(arch)
clean_version = try_cleanup_distro_version("%s-%s" % (version, build))
if not clean_version:
logger.warning(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': name})
continue
impl = factory('package:arch:%s:%s:%s' % \
(package, clean_version, zi_arch))
impl.version = model.parse_version(clean_version)
if zi_arch != '*':
impl.machine = zi_arch
impl.quick_test_file = os.path.join(self._packages_dir, entry, 'desc')
# Add any uninstalled candidates found by PackageKit
self.packagekit.get_candidates(package, factory, 'package:arch')
class GentooDistribution(Distribution):
name = 'Gentoo'
def __init__(self, pkgdir):
"""@type pkgdir: str"""
self._pkgdir = pkgdir
def get_package_info(self, package, factory):
# Add installed versions...
"""@type package: str"""
_version_start_reqexp = '-[0-9]'
if package.count('/') != 1: return
category, leafname = package.split('/')
category_dir = os.path.join(self._pkgdir, category)
match_prefix = leafname + '-'
if not os.path.isdir(category_dir): return
for filename in os.listdir(category_dir):
if filename.startswith(match_prefix) and filename[len(match_prefix)].isdigit():
with open(os.path.join(category_dir, filename, 'PF'), 'rt') as stream:
name = stream.readline().strip()
match = re.search(_version_start_reqexp, name)
if match is None:
logger.warning(_('Cannot parse version from Gentoo package named "%(name)s"'), {'name': name})
continue
else:
version = try_cleanup_distro_version(name[match.start() + 1:])
if category == 'app-emulation' and name.startswith('emul-'):
__, __, machine, __ = name.split('-', 3)
else:
with open(os.path.join(category_dir, filename, 'CHOST'), 'rt') as stream:
machine, __ = stream.readline().split('-', 1)
machine = arch_canonicalize_machine(machine)
impl = factory('package:gentoo:%s:%s:%s' % \
(package, version, machine))
impl.version = model.parse_version(version)
impl.machine = machine
# Add any uninstalled candidates found by PackageKit
self.packagekit.get_candidates(package, factory, 'package:gentoo')
class PortsDistribution(Distribution):
name = 'Ports'
system_paths = ['/usr/local/bin']
def __init__(self, pkgdir):
"""@type pkgdir: str"""
self._pkgdir = pkgdir
def get_package_info(self, package, factory):
"""@type package: str"""
_name_version_regexp = '^(.+)-([^-]+)$'
nameversion = re.compile(_name_version_regexp)
for pkgname in os.listdir(self._pkgdir):
pkgdir = os.path.join(self._pkgdir, pkgname)
if not os.path.isdir(pkgdir): continue
#contents = open(os.path.join(pkgdir, '+CONTENTS')).readline().strip()
match = nameversion.search(pkgname)
if match is None:
logger.warning(_('Cannot parse version from Ports package named "%(pkgname)s"'), {'pkgname': pkgname})
continue
else:
name = match.group(1)
if name != package:
continue
version = try_cleanup_distro_version(match.group(2))
machine = host_machine
impl = factory('package:ports:%s:%s:%s' % \
(package, version, machine))
impl.version = model.parse_version(version)
impl.machine = machine
class MacPortsDistribution(CachedDistribution):
system_paths = ['/opt/local/bin']
name = 'MacPorts'
def __init__(self, db_status_file):
"""@type db_status_file: str"""
super(MacPortsDistribution, self).__init__(db_status_file)
self.darwin = DarwinDistribution()
cache_leaf = 'macports-status.cache'
def generate_cache(self):
cache = []
child = subprocess.Popen(["port", "-v", "installed"],
stdout = subprocess.PIPE, universal_newlines = True)
for line in child.stdout:
if not line.startswith(" "):
continue
if line.strip().count(" ") > 1:
package, version, extra = line.split(None, 2)
else:
package, version = line.split()
extra = ""
if not extra.startswith("(active)"):
continue
version = version.lstrip('@')
version = re.sub(r"\+.*", "", version) # strip variants
zi_arch = '*'
clean_version = try_cleanup_distro_version(version)
if clean_version:
match = re.match(r" platform='([^' ]*)( \d+)?' archs='([^']*)'", extra)
if match:
platform, major, archs = match.groups()
for arch in archs.split():
zi_arch = canonical_machine(arch)
cache.append('%s\t%s\t%s' % (package, clean_version, zi_arch))
else:
cache.append('%s\t%s\t%s' % (package, clean_version, zi_arch))
else:
logger.warning(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': package})
self._write_cache(cache)
child.stdout.close()
child.wait()
def get_package_info(self, package, factory):
"""@type package: str"""
self.darwin.get_package_info(package, factory)
# Add installed versions...
versions = self.versions.get(package, [])
for version, machine in versions:
impl = factory('package:macports:%s:%s:%s' % (package, version, machine))
impl.version = model.parse_version(version)
if machine != '*':
impl.machine = machine
def get_score(self, distro_name):
# We support both sources of packages.
# In theory, we should route 'Darwin' package names to DarwinDistribution, and
# Mac Ports names to MacPortsDistribution. But since we only use Darwin for Java,
# having one object handle both is OK.
return int(distro_name in ('Darwin', 'MacPorts'))
class CygwinDistribution(CachedDistribution):
"""A Cygwin-based distribution."""
name = 'Cygwin'
cache_leaf = 'cygcheck-status.cache'
def generate_cache(self):
cache = []
zi_arch = '*'
for line in os.popen("cygcheck -c -d"):
if line == "Cygwin Package Information\r\n":
continue
if line == "\n":
continue
package, version = line.split()
if package == "Package" and version == "Version":
continue
clean_version = try_cleanup_distro_version(version)
if clean_version:
cache.append('%s\t%s\t%s' % (package, clean_version, zi_arch))
else:
logger.warning(_("Can't parse distribution version '%(version)s' for package '%(package)s'"), {'version': version, 'package': package})
self._write_cache(cache)
def get_package_info(self, package, factory):
# Add installed versions...
versions = self.versions.get(package, [])
for version, machine in versions:
impl = factory('package:cygwin:%s:%s:%s' % (package, version, machine))
impl.version = model.parse_version(version)
if machine != '*':
impl.machine = machine
_host_distribution = None
def get_host_distribution():
"""Get a Distribution suitable for the host operating system.
Calling this twice will return the same object.
@rtype: L{Distribution}"""
global _host_distribution
if not _host_distribution:
dpkg_db_status = '/var/lib/dpkg/status'
rpm_db_packages = '/var/lib/rpm/Packages'
_slack_db = '/var/log/packages'
_arch_db = '/var/lib/pacman'
_pkg_db = '/var/db/pkg'
_macports_db = '/opt/local/var/macports/registry/registry.db'
_cygwin_log = '/var/log/setup.log'
if sys.prefix == "/sw":
dpkg_db_status = os.path.join(sys.prefix, dpkg_db_status)
rpm_db_packages = os.path.join(sys.prefix, rpm_db_packages)
if os.name == "nt":
_host_distribution = WindowsDistribution()
elif os.path.isdir(_pkg_db):
if sys.platform.startswith("linux"):
_host_distribution = GentooDistribution(_pkg_db)
elif sys.platform.startswith("freebsd"):
_host_distribution = PortsDistribution(_pkg_db)
elif os.path.isfile(_macports_db):
_host_distribution = MacPortsDistribution(_macports_db)
elif os.path.isfile(_cygwin_log) and sys.platform == "cygwin":
_host_distribution = CygwinDistribution(_cygwin_log)
elif os.access(dpkg_db_status, os.R_OK) \
and os.path.getsize(dpkg_db_status) > 0:
_host_distribution = DebianDistribution(dpkg_db_status)
elif os.path.isfile(rpm_db_packages):
_host_distribution = RPMDistribution(rpm_db_packages)
elif os.path.isdir(_slack_db):
_host_distribution = SlackDistribution(_slack_db)
elif os.path.isdir(_arch_db):
_host_distribution = ArchDistribution(_arch_db)
elif sys.platform == "darwin":
_host_distribution = DarwinDistribution()
else:
_host_distribution = Distribution()
return _host_distribution
|
afb/0install
|
zeroinstall/injector/distro.py
|
Python
|
lgpl-2.1
| 40,463
|
[
"VisIt"
] |
fd66d2125e00156fa4d5c1d3512ca08e59289869d9f74786131312c22b4116dc
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RAffyexpress(RPackage):
"""The purpose of this package is to provide a comprehensive and
easy-to-use tool for quality assessment and to identify differentially
expressed genes in the Affymetrix gene expression data."""
homepage = "https://www.bioconductor.org/packages/AffyExpress/"
url = "https://git.bioconductor.org/packages/AffyExpress"
version('1.42.0', git='https://git.bioconductor.org/packages/AffyExpress', commit='f5c5cf6173f4419e25f4aeff5e6b705a40abc371')
depends_on('r@3.4.0:3.4.9', when='@1.42.0')
depends_on('r-affy', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
|
EmreAtes/spack
|
var/spack/repos/builtin/packages/r-affyexpress/package.py
|
Python
|
lgpl-2.1
| 1,907
|
[
"Bioconductor"
] |
dbdcaef98499e222ccdbefedc591175b8614fde90905780ba88d9b45c34f5a36
|
#!/usr/bin/python
# marina von steinkirch @2014
# steinkirch at gmail
''' This program implements the EM algorithm for a two-class
Gaussian mixture model '''
import numpy as np
def load_data(datafile_name):
data = np.mat(np.loadtxt(datafile_name))
dataT = data.T
return dataT
def perform_em(X, mu, theta, epsilon):
stop = 1.0
n = np.shape(X)[0]
P = np.mat(np.zeros((n, len(mu))))
while epsilon - stop < 0:
aux_gauss = np.mat(np.zeros((n, len(mu))))
P = e_step(aux_gauss, P, X, n, mu, theta)
mu_n, theta_n = m_step(mu, theta, P, X, n)
stop = sum(abs(mu_n-mu) + abs(theta_n - theta))/2.0
mu, theta = mu_n, theta_n
return mu, theta
def e_step(aux_gauss, P, X, n, mu, theta):
for i in range(n):
aux_gauss[i, :] = np.exp(-1.0 * np.square((X[i, :] - mu)) / 2.0)
for j in range(len(mu)):
P[:, j] = np.divide(aux_gauss[:, j]*theta[j], aux_gauss[:, 0]*theta[0] + aux_gauss[:, 1]*theta[1])
return P
def m_step(mu, theta, P, X, n):
for i in range(len(mu)):
theta[i] = sum(P[:, i])/n
mu[i] = sum(np.multiply(X, P[:, i]))/sum(P[:, i])
return mu, theta
def main():
X = load_data('hw5.data')
#print(X)
epsilon = [1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8, 1e-9] # we want to test convergence
mu = np.array([1.0, 2.0])
theta = np.array([0.33, 0.67])
for e in epsilon:
mu_n, theta_n = perform_em(X, mu, theta, e)
print '\nFor epsilon = ', e
print 'mu = ', mu_n
print 'theta = ', theta_n
if __name__ == '__main__':
main()
|
bt3gl/Advanced-Machine-Learning
|
expectation_maximization/em_main.py
|
Python
|
mit
| 1,672
|
[
"Gaussian"
] |
ab1defa1e83226ff92ae2f91078f07bac5b96d1673f58698d79be30f939d7c9a
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The 'grit build' tool.
'''
from __future__ import print_function
import collections
import codecs
import filecmp
import getopt
import gzip
import os
import shutil
import sys
import six
from grit import grd_reader
from grit import shortcuts
from grit import util
from grit.format import minifier
from grit.node import brotli_util
from grit.node import include
from grit.node import message
from grit.node import structure
from grit.tool import interface
# It would be cleaner to have each module register itself, but that would
# require importing all of them on every run of GRIT.
'''Map from <output> node types to modules under grit.format.'''
_format_modules = {
'android': 'android_xml',
'c_format': 'c_format',
'chrome_messages_json': 'chrome_messages_json',
'chrome_messages_json_gzip': 'chrome_messages_json',
'data_package': 'data_pack',
'policy_templates': 'policy_templates_json',
'rc_all': 'rc',
'rc_header': 'rc_header',
'rc_nontranslateable': 'rc',
'rc_translateable': 'rc',
'resource_file_map_source': 'resource_map',
'resource_map_header': 'resource_map',
'resource_map_source': 'resource_map',
}
def GetFormatter(type):
modulename = 'grit.format.' + _format_modules[type]
__import__(modulename)
module = sys.modules[modulename]
try:
return module.Format
except AttributeError:
return module.GetFormatter(type)
class RcBuilder(interface.Tool):
'''A tool that builds RC files and resource header files for compilation.
Usage: grit build [-o OUTPUTDIR] [-D NAME[=VAL]]*
All output options for this tool are specified in the input file (see
'grit help' for details on how to specify the input file - it is a global
option).
Options:
-a FILE Assert that the given file is an output. There can be
multiple "-a" flags listed for multiple outputs. If a "-a"
or "--assert-file-list" argument is present, then the list
of asserted files must match the output files or the tool
will fail. The use-case is for the build system to maintain
separate lists of output files and to catch errors if the
build system's list and the grit list are out-of-sync.
--assert-file-list Provide a file listing multiple asserted output files.
There is one file name per line. This acts like specifying
each file with "-a" on the command line, but without the
possibility of running into OS line-length limits for very
long lists.
-o OUTPUTDIR Specify what directory output paths are relative to.
Defaults to the current directory.
-p FILE Specify a file containing a pre-determined mapping from
resource names to resource ids which will be used to assign
resource ids to those resources. Resources not found in this
file will be assigned ids normally. The motivation is to run
your app's startup and have it dump the resources it loads,
and then pass these via this flag. This will pack startup
resources together, thus reducing paging while all other
resources are unperturbed. The file should have the format:
RESOURCE_ONE_NAME 123
RESOURCE_TWO_NAME 124
-D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional
value VAL (defaults to 1) which will be used to control
conditional inclusion of resources.
-E NAME=VALUE Set environment variable NAME to VALUE (within grit).
-f FIRSTIDSFILE Path to a python file that specifies the first id of
value to use for resources. A non-empty value here will
override the value specified in the <grit> node's
first_ids_file.
-w ALLOWLISTFILE Path to a file containing the string names of the
resources to include. Anything not listed is dropped.
-t PLATFORM Specifies the platform the build is targeting; defaults
to the value of sys.platform. The value provided via this
flag should match what sys.platform would report for your
target platform; see grit.node.base.EvaluateCondition.
--allowlist-support
Generate code to support extracting a resource allowlist
from executables.
--write-only-new flag
If flag is non-0, write output files to a temporary file
first, and copy it to the real output only if the new file
is different from the old file. This allows some build
systems to realize that dependent build steps might be
unnecessary, at the cost of comparing the output data at
grit time.
--depend-on-stamp
If specified along with --depfile and --depdir, the depfile
generated will depend on a stampfile instead of the first
output in the input .grd file.
--js-minifier A command to run the Javascript minifier. If not set then
Javascript won't be minified. The command should read the
original Javascript from standard input, and output the
minified Javascript to standard output. A non-zero exit
status will be taken as indicating failure.
--css-minifier A command to run the CSS minifier. If not set then CSS won't
be minified. The command should read the original CSS from
standard input, and output the minified CSS to standard
output. A non-zero exit status will be taken as indicating
failure.
--brotli The full path to the brotli executable generated by
third_party/brotli/BUILD.gn, required if any entries use
compress="brotli".
Conditional inclusion of resources only affects the output of files which
control which resources get linked into a binary, e.g. it affects .rc files
meant for compilation but it does not affect resource header files (that define
IDs). This helps ensure that values of IDs stay the same, that all messages
are exported to translation interchange files (e.g. XMB files), etc.
'''
def ShortDescription(self):
return 'A tool that builds RC files for compilation.'
def Run(self, opts, args):
brotli_util.SetBrotliCommand(None)
os.environ['cwd'] = os.getcwd()
self.output_directory = '.'
first_ids_file = None
predetermined_ids_file = None
allowlist_filenames = []
assert_output_files = []
target_platform = None
depfile = None
depdir = None
allowlist_support = False
write_only_new = False
depend_on_stamp = False
js_minifier = None
css_minifier = None
replace_ellipsis = True
(own_opts, args) = getopt.getopt(
args, 'a:p:o:D:E:f:w:t:',
('depdir=', 'depfile=', 'assert-file-list=', 'help',
'output-all-resource-defines', 'no-output-all-resource-defines',
'no-replace-ellipsis', 'depend-on-stamp', 'js-minifier=',
'css-minifier=', 'write-only-new=', 'allowlist-support', 'brotli='))
for (key, val) in own_opts:
if key == '-a':
assert_output_files.append(val)
elif key == '--assert-file-list':
with open(val) as f:
assert_output_files += f.read().splitlines()
elif key == '-o':
self.output_directory = val
elif key == '-D':
name, val = util.ParseDefine(val)
self.defines[name] = val
elif key == '-E':
(env_name, env_value) = val.split('=', 1)
os.environ[env_name] = env_value
elif key == '-f':
# TODO(joi@chromium.org): Remove this override once change
# lands in WebKit.grd to specify the first_ids_file in the
# .grd itself.
first_ids_file = val
elif key == '-w':
allowlist_filenames.append(val)
elif key == '--no-replace-ellipsis':
replace_ellipsis = False
elif key == '-p':
predetermined_ids_file = val
elif key == '-t':
target_platform = val
elif key == '--depdir':
depdir = val
elif key == '--depfile':
depfile = val
elif key == '--write-only-new':
write_only_new = val != '0'
elif key == '--depend-on-stamp':
depend_on_stamp = True
elif key == '--js-minifier':
js_minifier = val
elif key == '--css-minifier':
css_minifier = val
elif key == '--allowlist-support':
allowlist_support = True
elif key == '--brotli':
brotli_util.SetBrotliCommand([os.path.abspath(val)])
elif key == '--help':
self.ShowUsage()
sys.exit(0)
if len(args):
print('This tool takes no tool-specific arguments.')
return 2
self.SetOptions(opts)
self.VerboseOut('Output directory: %s (absolute path: %s)\n' %
(self.output_directory,
os.path.abspath(self.output_directory)))
if allowlist_filenames:
self.allowlist_names = set()
for allowlist_filename in allowlist_filenames:
self.VerboseOut('Using allowlist: %s\n' % allowlist_filename)
allowlist_contents = util.ReadFile(allowlist_filename, 'utf-8')
self.allowlist_names.update(allowlist_contents.strip().split('\n'))
if js_minifier:
minifier.SetJsMinifier(js_minifier)
if css_minifier:
minifier.SetCssMinifier(css_minifier)
self.write_only_new = write_only_new
self.res = grd_reader.Parse(opts.input,
debug=opts.extra_verbose,
first_ids_file=first_ids_file,
predetermined_ids_file=predetermined_ids_file,
defines=self.defines,
target_platform=target_platform)
# Set an output context so that conditionals can use defines during the
# gathering stage; we use a dummy language here since we are not outputting
# a specific language.
self.res.SetOutputLanguage('en')
self.res.SetAllowlistSupportEnabled(allowlist_support)
self.res.RunGatherers()
# Replace ... with the single-character version. http://crbug.com/621772
if replace_ellipsis:
for node in self.res:
if isinstance(node, message.MessageNode):
node.SetReplaceEllipsis(True)
self.Process()
if assert_output_files:
if not self.CheckAssertedOutputFiles(assert_output_files):
return 2
if depfile and depdir:
self.GenerateDepfile(depfile, depdir, first_ids_file, depend_on_stamp)
return 0
def __init__(self, defines=None):
# Default file-creation function is codecs.open(). Only done to allow
# overriding by unit test.
self.fo_create = codecs.open
# key/value pairs of C-preprocessor like defines that are used for
# conditional output of resources
self.defines = defines or {}
# self.res is a fully-populated resource tree if Run()
# has been called, otherwise None.
self.res = None
# The set of names that are allowlisted to actually be included in the
# output.
self.allowlist_names = None
# Whether to compare outputs to their old contents before writing.
self.write_only_new = False
@staticmethod
def AddAllowlistTags(start_node, allowlist_names):
# Walk the tree of nodes added attributes for the nodes that shouldn't
# be written into the target files (skip markers).
for node in start_node:
# Same trick data_pack.py uses to see what nodes actually result in
# real items.
if (isinstance(node, include.IncludeNode) or
isinstance(node, message.MessageNode) or
isinstance(node, structure.StructureNode)):
text_ids = node.GetTextualIds()
# Mark the item to be skipped if it wasn't in the allowlist.
if text_ids and text_ids[0] not in allowlist_names:
node.SetAllowlistMarkedAsSkip(True)
@staticmethod
def ProcessNode(node, output_node, outfile):
'''Processes a node in-order, calling its formatter before and after
recursing to its children.
Args:
node: grit.node.base.Node subclass
output_node: grit.node.io.OutputNode
outfile: open filehandle
'''
base_dir = util.dirname(output_node.GetOutputFilename())
formatter = GetFormatter(output_node.GetType())
formatted = formatter(node, output_node.GetLanguage(), output_dir=base_dir)
# NB: Formatters may be generators or return lists. The writelines API
# accepts iterables as a shortcut to calling write directly. That means
# you can pass strings (iteration yields characters), but not bytes (as
# iteration yields integers). Python 2 worked due to its quirks with
# bytes/string implementation, but Python 3 fails. It's also a bit more
# inefficient to call write once per character/byte. Handle all of this
# ourselves by calling write directly on strings/bytes before falling back
# to writelines.
if isinstance(formatted, (six.string_types, six.binary_type)):
outfile.write(formatted)
else:
outfile.writelines(formatted)
if output_node.GetType() == 'data_package':
with open(output_node.GetOutputFilename() + '.info', 'w') as infofile:
if node.info:
# We terminate with a newline so that when these files are
# concatenated later we consistently terminate with a newline so
# consumers can account for terminating newlines.
infofile.writelines(['\n'.join(node.info), '\n'])
@staticmethod
def _EncodingForOutputType(output_type):
# Microsoft's RC compiler can only deal with single-byte or double-byte
# files (no UTF-8), so we make all RC files UTF-16 to support all
# character sets.
if output_type in ('rc_header', 'resource_file_map_source',
'resource_map_header', 'resource_map_source'):
return 'cp1252'
if output_type in ('android', 'c_format', 'plist', 'plist_strings', 'doc',
'json', 'android_policy', 'chrome_messages_json',
'chrome_messages_json_gzip', 'policy_templates'):
return 'utf_8'
# TODO(gfeher) modify here to set utf-8 encoding for admx/adml
return 'utf_16'
def Process(self):
for output in self.res.GetOutputFiles():
output.output_filename = os.path.abspath(os.path.join(
self.output_directory, output.GetOutputFilename()))
# If there are allowlisted names, tag the tree once up front, this way
# while looping through the actual output, it is just an attribute check.
if self.allowlist_names:
self.AddAllowlistTags(self.res, self.allowlist_names)
for output in self.res.GetOutputFiles():
self.VerboseOut('Creating %s...' % output.GetOutputFilename())
# Set the context, for conditional inclusion of resources
self.res.SetOutputLanguage(output.GetLanguage())
self.res.SetOutputContext(output.GetContext())
self.res.SetFallbackToDefaultLayout(output.GetFallbackToDefaultLayout())
self.res.SetDefines(self.defines)
# Assign IDs only once to ensure that all outputs use the same IDs.
if self.res.GetIdMap() is None:
self.res.InitializeIds()
# Make the output directory if it doesn't exist.
self.MakeDirectoriesTo(output.GetOutputFilename())
# Write the results to a temporary file and only overwrite the original
# if the file changed. This avoids unnecessary rebuilds.
out_filename = output.GetOutputFilename()
tmp_filename = out_filename + '.tmp'
tmpfile = self.fo_create(tmp_filename, 'wb')
output_type = output.GetType()
if output_type != 'data_package':
encoding = self._EncodingForOutputType(output_type)
tmpfile = util.WrapOutputStream(tmpfile, encoding)
# Iterate in-order through entire resource tree, calling formatters on
# the entry into a node and on exit out of it.
with tmpfile:
self.ProcessNode(self.res, output, tmpfile)
if output_type == 'chrome_messages_json_gzip':
gz_filename = tmp_filename + '.gz'
with open(tmp_filename, 'rb') as tmpfile, open(gz_filename, 'wb') as f:
with gzip.GzipFile(filename='', mode='wb', fileobj=f, mtime=0) as fgz:
shutil.copyfileobj(tmpfile, fgz)
os.remove(tmp_filename)
tmp_filename = gz_filename
# Now copy from the temp file back to the real output, but on Windows,
# only if the real output doesn't exist or the contents of the file
# changed. This prevents identical headers from being written and .cc
# files from recompiling (which is painful on Windows).
if not os.path.exists(out_filename):
os.rename(tmp_filename, out_filename)
else:
# CHROMIUM SPECIFIC CHANGE.
# This clashes with gyp + vstudio, which expect the output timestamp
# to change on a rebuild, even if nothing has changed, so only do
# it when opted in.
if not self.write_only_new:
write_file = True
else:
files_match = filecmp.cmp(out_filename, tmp_filename)
write_file = not files_match
if write_file:
shutil.copy2(tmp_filename, out_filename)
os.remove(tmp_filename)
self.VerboseOut(' done.\n')
# Print warnings if there are any duplicate shortcuts.
warnings = shortcuts.GenerateDuplicateShortcutsWarnings(
self.res.UberClique(), self.res.GetTcProject())
if warnings:
print('\n'.join(warnings))
# Print out any fallback warnings, and missing translation errors, and
# exit with an error code if there are missing translations in a non-pseudo
# and non-official build.
warnings = (self.res.UberClique().MissingTranslationsReport().
encode('ascii', 'replace'))
if warnings:
self.VerboseOut(warnings)
if self.res.UberClique().HasMissingTranslations():
print(self.res.UberClique().missing_translations_)
sys.exit(-1)
def CheckAssertedOutputFiles(self, assert_output_files):
'''Checks that the asserted output files are specified in the given list.
Returns true if the asserted files are present. If they are not, returns
False and prints the failure.
'''
# Compare the absolute path names, sorted.
asserted = sorted([os.path.abspath(i) for i in assert_output_files])
actual = sorted([
os.path.abspath(os.path.join(self.output_directory,
i.GetOutputFilename()))
for i in self.res.GetOutputFiles()])
if asserted != actual:
missing = list(set(asserted) - set(actual))
extra = list(set(actual) - set(asserted))
duplicates = [
path for path, count in collections.Counter(actual).items()
if count > 1
]
error = '''Asserted file list does not match.
Missing output files:
%s
Extra output files:
%s
Duplicate actual output files:
%s
'''
print(error %
('\n'.join(missing), '\n'.join(extra), '\n'.join(duplicates)))
return False
return True
def GenerateDepfile(self, depfile, depdir, first_ids_file, depend_on_stamp):
'''Generate a depfile that contains the implicit dependencies of the input
grd. The depfile will be in the same format as a makefile, and will contain
references to files relative to |depdir|. It will be put in |depfile|.
For example, supposing we have three files in a directory src/
src/
blah.grd <- depends on input{1,2}.xtb
input1.xtb
input2.xtb
and we run
grit -i blah.grd -o ../out/gen \
--depdir ../out \
--depfile ../out/gen/blah.rd.d
from the directory src/ we will generate a depfile ../out/gen/blah.grd.d
that has the contents
gen/blah.h: ../src/input1.xtb ../src/input2.xtb
Where "gen/blah.h" is the first output (Ninja expects the .d file to list
the first output in cases where there is more than one). If the flag
--depend-on-stamp is specified, "gen/blah.rd.d.stamp" will be used that is
'touched' whenever a new depfile is generated.
Note that all paths in the depfile are relative to ../out, the depdir.
'''
depfile = os.path.abspath(depfile)
depdir = os.path.abspath(depdir)
infiles = self.res.GetInputFiles()
# We want to trigger a rebuild if the first ids change.
if first_ids_file is not None:
infiles.append(first_ids_file)
if (depend_on_stamp):
output_file = depfile + ".stamp"
# Touch the stamp file before generating the depfile.
with open(output_file, 'a'):
os.utime(output_file, None)
else:
# Get the first output file relative to the depdir.
outputs = self.res.GetOutputFiles()
output_file = os.path.join(self.output_directory,
outputs[0].GetOutputFilename())
output_file = os.path.relpath(output_file, depdir)
# The path prefix to prepend to dependencies in the depfile.
prefix = os.path.relpath(os.getcwd(), depdir)
deps_text = ' '.join([os.path.join(prefix, i) for i in infiles])
depfile_contents = output_file + ': ' + deps_text
self.MakeDirectoriesTo(depfile)
outfile = self.fo_create(depfile, 'w', encoding='utf-8')
outfile.write(depfile_contents)
@staticmethod
def MakeDirectoriesTo(file):
'''Creates directories necessary to contain |file|.'''
dir = os.path.split(file)[0]
if not os.path.exists(dir):
os.makedirs(dir)
|
chromium/chromium
|
tools/grit/grit/tool/build.py
|
Python
|
bsd-3-clause
| 22,102
|
[
"xTB"
] |
be67c29e28b7479a538894c53597c0f60dfd46fe9b7a144b2b63fcc7b8640354
|
import webbrowser
import os
import socket
from urllib.parse import urlparse
from plugin import plugin, alias, require
FILE_PATH = os.path.abspath(os.path.dirname(__file__))
@require(network=True)
@alias("visit website")
@plugin("website")
class OpenWebsite:
"""
This plugin will visit a website using some parameters.
The user can visit a simple website giving a complete link or
inputting the name of the website like the examples:
> visit website www.google.com
> visit website github
> visit website github username
You can find a csv file with a list of saved websites at:
Jarvis/jarviscli/data/website.csv
{Alternatively, you can also use only 'website'
instead of 'visit website'}
"""
def __call__(self, jarvis, link):
inputs = link.split(' ')
self.main_link = inputs[0]
self.complement = False
if len(inputs) > 1:
self.complement = inputs[1]
if self.has_on_saved_links():
webbrowser.open(self.main_link)
elif self.verify_link():
webbrowser.open(self.main_link)
else:
jarvis.say("Sorry, I can't open this link.")
def has_on_saved_links(self):
websites_csv = \
open(os.path.join(FILE_PATH, "../data/websites.csv"), 'r')
for website in websites_csv:
website = website.rstrip() # remove newline
information = website.split(',')
if self.main_link == information[0]:
if self.complement:
if len(information) > 2:
self.main_link = \
information[1] + information[2] + self.complement
else:
self.main_link = information[1] + self.complement
else:
self.main_link = information[1]
return True
return False
def verify_link(self):
self.fix_link()
domain = urlparse(self.main_link).netloc
try:
socket.getaddrinfo(domain, 80)
except socket.gaierror:
return False
return True
def fix_link(self):
if not self.main_link.startswith('http'):
self.main_link = "https://" + self.main_link
|
sukeesh/Jarvis
|
jarviscli/plugins/visit_website.py
|
Python
|
mit
| 2,297
|
[
"VisIt"
] |
9f60cc2e1b6046260cdcabc4b674cd31259d81a0f0a14e010549793c9bcf457b
|
from __future__ import with_statement
import hashlib
import os
import posixpath
import stat
import re
from fnmatch import filter as fnfilter
from fabric.state import output, connections, env
from fabric.utils import warn
from fabric.context_managers import settings
def _format_local(local_path, local_is_path):
"""Format a path for log output"""
if local_is_path:
return local_path
else:
# This allows users to set a name attr on their StringIO objects
# just like an open file object would have
return getattr(local_path, 'name', '<file obj>')
class SFTP(object):
"""
SFTP helper class, which is also a facade for ssh.SFTPClient.
"""
def __init__(self, host_string):
self.ftp = connections[host_string].open_sftp()
# Recall that __getattr__ is the "fallback" attribute getter, and is thus
# pretty safe to use for facade-like behavior as we're doing here.
def __getattr__(self, attr):
return getattr(self.ftp, attr)
def isdir(self, path):
try:
return stat.S_ISDIR(self.ftp.lstat(path).st_mode)
except IOError:
return False
def islink(self, path):
try:
return stat.S_ISLNK(self.ftp.lstat(path).st_mode)
except IOError:
return False
def exists(self, path):
try:
self.ftp.lstat(path).st_mode
except IOError:
return False
return True
def glob(self, path):
from fabric.state import win32
dirpart, pattern = os.path.split(path)
rlist = self.ftp.listdir(dirpart)
names = fnfilter([f for f in rlist if not f[0] == '.'], pattern)
ret = [path]
if len(names):
s = '/'
ret = [dirpart.rstrip(s) + s + name.lstrip(s) for name in names]
if not win32:
ret = [posixpath.join(dirpart, name) for name in names]
return ret
def walk(self, top, topdown=True, onerror=None, followlinks=False):
from os.path import join
# We may not have read permission for top, in which case we can't get a
# list of the files the directory contains. os.path.walk always
# suppressed the exception then, rather than blow up for a minor reason
# when (say) a thousand readable directories are still left to visit.
# That logic is copied here.
try:
# Note that listdir and error are globals in this module due to
# earlier import-*.
names = self.ftp.listdir(top)
except Exception, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if self.isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not self.islink(path):
for x in self.walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def mkdir(self, path, use_sudo):
from fabric.api import sudo, hide
if use_sudo:
with hide('everything'):
sudo('mkdir %s' % path)
else:
self.ftp.mkdir(path)
def get(self, remote_path, local_path, local_is_path, rremote=None):
# rremote => relative remote path, so get(/var/log) would result in
# this function being called with
# remote_path=/var/log/apache2/access.log and
# rremote=apache2/access.log
rremote = rremote if rremote is not None else remote_path
# Handle format string interpolation (e.g. %(dirname)s)
path_vars = {
'host': env.host_string.replace(':', '-'),
'basename': os.path.basename(rremote),
'dirname': os.path.dirname(rremote),
'path': rremote
}
if local_is_path:
# Naive fix to issue #711
escaped_path = re.sub(r'(%[^()]*\w)', r'%\1', local_path)
local_path = os.path.abspath(escaped_path % path_vars )
# Ensure we give ssh.SFTPCLient a file by prepending and/or
# creating local directories as appropriate.
dirpath, filepath = os.path.split(local_path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
if os.path.isdir(local_path):
local_path = os.path.join(local_path, path_vars['basename'])
if output.running:
print("[%s] download: %s <- %s" % (
env.host_string,
_format_local(local_path, local_is_path),
remote_path
))
# Warn about overwrites, but keep going
if local_is_path and os.path.exists(local_path):
msg = "Local file %s already exists and is being overwritten."
warn(msg % local_path)
# File-like objects: reset to file seek 0 (to ensure full overwrite)
# and then use Paramiko's getfo() directly
getter = self.ftp.get
if not local_is_path:
local_path.seek(0)
getter = self.ftp.getfo
getter(remote_path, local_path)
# Return local_path object for posterity. (If mutated, caller will want
# to know.)
return local_path
def get_dir(self, remote_path, local_path):
# Decide what needs to be stripped from remote paths so they're all
# relative to the given remote_path
if os.path.basename(remote_path):
strip = os.path.dirname(remote_path)
else:
strip = os.path.dirname(os.path.dirname(remote_path))
# Store all paths gotten so we can return them when done
result = []
# Use our facsimile of os.walk to find all files within remote_path
for context, dirs, files in self.walk(remote_path):
# Normalize current directory to be relative
# E.g. remote_path of /var/log and current dir of /var/log/apache2
# would be turned into just 'apache2'
lcontext = rcontext = context.replace(strip, '', 1).lstrip('/')
# Prepend local path to that to arrive at the local mirrored
# version of this directory. So if local_path was 'mylogs', we'd
# end up with 'mylogs/apache2'
lcontext = os.path.join(local_path, lcontext)
# Download any files in current directory
for f in files:
# Construct full and relative remote paths to this file
rpath = posixpath.join(context, f)
rremote = posixpath.join(rcontext, f)
# If local_path isn't using a format string that expands to
# include its remote path, we need to add it here.
if "%(path)s" not in local_path \
and "%(dirname)s" not in local_path:
lpath = os.path.join(lcontext, f)
# Otherwise, just passthrough local_path to self.get()
else:
lpath = local_path
# Now we can make a call to self.get() with specific file paths
# on both ends.
result.append(self.get(rpath, lpath, True, rremote))
return result
def put(self, local_path, remote_path, use_sudo, mirror_local_mode, mode,
local_is_path, temp_dir):
from fabric.api import sudo, hide
pre = self.ftp.getcwd()
pre = pre if pre else ''
if local_is_path and self.isdir(remote_path):
basename = os.path.basename(local_path)
remote_path = posixpath.join(remote_path, basename)
if output.running:
print("[%s] put: %s -> %s" % (
env.host_string,
_format_local(local_path, local_is_path),
posixpath.join(pre, remote_path)
))
# When using sudo, "bounce" the file through a guaranteed-unique file
# path in the default remote CWD (which, typically, the login user will
# have write permissions on) in order to sudo(mv) it later.
if use_sudo:
target_path = remote_path
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(target_path)
remote_path = posixpath.join(temp_dir, hasher.hexdigest())
# Read, ensuring we handle file-like objects correct re: seek pointer
putter = self.ftp.put
if not local_is_path:
old_pointer = local_path.tell()
local_path.seek(0)
putter = self.ftp.putfo
rattrs = putter(local_path, remote_path)
if not local_is_path:
local_path.seek(old_pointer)
# Handle modes if necessary
if (local_is_path and mirror_local_mode) or (mode is not None):
lmode = os.stat(local_path).st_mode if mirror_local_mode else mode
# Cast to octal integer in case of string
if isinstance(lmode, basestring):
lmode = int(lmode, 8)
lmode = lmode & 07777
rmode = rattrs.st_mode
# Only bitshift if we actually got an rmode
if rmode is not None:
rmode = (rmode & 07777)
if lmode != rmode:
if use_sudo:
with hide('everything'):
sudo('chmod %o \"%s\"' % (lmode, remote_path))
else:
self.ftp.chmod(remote_path, lmode)
if use_sudo:
# Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv command.
# (The target path has already been cwd-ified elsewhere.)
with settings(hide('everything'), cwd=""):
sudo("mv \"%s\" \"%s\"" % (remote_path, target_path))
# Revert to original remote_path for return value's sake
remote_path = target_path
return remote_path
def put_dir(self, local_path, remote_path, use_sudo, mirror_local_mode,
mode, temp_dir):
if os.path.basename(local_path):
strip = os.path.dirname(local_path)
else:
strip = os.path.dirname(os.path.dirname(local_path))
remote_paths = []
for context, dirs, files in os.walk(local_path):
rcontext = context.replace(strip, '', 1)
# normalize pathname separators with POSIX separator
rcontext = rcontext.replace(os.sep, '/')
rcontext = rcontext.lstrip('/')
rcontext = posixpath.join(remote_path, rcontext)
if not self.exists(rcontext):
self.mkdir(rcontext, use_sudo)
for d in dirs:
n = posixpath.join(rcontext, d)
if not self.exists(n):
self.mkdir(n, use_sudo)
for f in files:
local_path = os.path.join(context, f)
n = posixpath.join(rcontext, f)
p = self.put(local_path, n, use_sudo, mirror_local_mode, mode,
True, temp_dir)
remote_paths.append(p)
return remote_paths
|
noslenfa/tdjangorest
|
uw/lib/python2.7/site-packages/fabric/sftp.py
|
Python
|
apache-2.0
| 11,344
|
[
"VisIt"
] |
ecbc99c2e1b94c678958bf8e73f6f0214eda04b4cac487fbccce8069b5571136
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functional tests using WebTest."""
import datetime as dt
import httplib as http
import logging
import unittest
import markupsafe
import mock
import pytest
from nose.tools import * # noqa: F403
import re
from django.utils import timezone
from addons.wiki.utils import to_mongo_key
from framework.auth import exceptions as auth_exc
from framework.auth.core import Auth
from tests.base import OsfTestCase
from tests.base import fake
from osf_tests.factories import (
fake_email,
AuthUserFactory,
NodeFactory,
PreprintFactory,
PreprintProviderFactory,
PrivateLinkFactory,
ProjectFactory,
RegistrationFactory,
SubjectFactory,
UserFactory,
UnconfirmedUserFactory,
UnregUserFactory,
)
from osf.utils import permissions
from addons.wiki.models import WikiPage, WikiVersion
from addons.wiki.tests.factories import WikiFactory, WikiVersionFactory
from website import settings, language
from addons.osfstorage.models import OsfStorageFile
from website.util import web_url_for, api_url_for
from api_tests import utils as test_utils
logging.getLogger('website.project.model').setLevel(logging.ERROR)
def assert_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_in(member, container, **kwargs)
def assert_not_in_html(member, container, **kwargs):
"""Looks for the specified member in markupsafe-escaped HTML output"""
member = markupsafe.escape(member)
return assert_not_in(member, container, **kwargs)
class TestDisabledUser(OsfTestCase):
def setUp(self):
super(TestDisabledUser, self).setUp()
self.user = UserFactory()
self.user.set_password('Korben Dallas')
self.user.is_disabled = True
self.user.save()
def test_profile_disabled_returns_401(self):
res = self.app.get(self.user.url, expect_errors=True)
assert_equal(res.status_code, 410)
class TestAnUnregisteredUser(OsfTestCase):
def test_cant_see_profile_if_not_logged_in(self):
url = web_url_for('profile_view')
res = self.app.get(url)
res = res.follow()
assert_equal(res.status_code, 301)
assert_in('/login/', res.headers['Location'])
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_quickfiles_creation
class TestAUser(OsfTestCase):
def setUp(self):
super(TestAUser, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
def test_can_see_profile_url(self):
res = self.app.get(self.user.url).maybe_follow()
assert_in(self.user.url, res)
# `GET /login/` without parameters is redirected to `/dashboard/` page which has `@must_be_logged_in` decorator
# if user is not logged in, she/he is further redirected to CAS login page
def test_is_redirected_to_cas_if_not_logged_in_at_login_page(self):
res = self.app.get('/login/').follow()
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_in('login?service=', location)
def test_is_redirected_to_dashboard_if_already_logged_in_at_login_page(self):
res = self.app.get('/login/', auth=self.user.auth)
assert_equal(res.status_code, 302)
assert 'dashboard' in res.headers.get('Location')
def test_register_page(self):
res = self.app.get('/register/')
assert_equal(res.status_code, 200)
def test_is_redirected_to_dashboard_if_already_logged_in_at_register_page(self):
res = self.app.get('/register/', auth=self.user.auth)
assert_equal(res.status_code, 302)
assert 'dashboard' in res.headers.get('Location')
def test_sees_projects_in_her_dashboard(self):
# the user already has a project
project = ProjectFactory(creator=self.user)
project.add_contributor(self.user)
project.save()
res = self.app.get('/myprojects/', auth=self.user.auth)
assert_in('Projects', res) # Projects heading
def test_does_not_see_osffiles_in_user_addon_settings(self):
res = self.app.get('/settings/addons/', auth=self.auth, auto_follow=True)
assert_not_in('OSF Storage', res)
def test_sees_osffiles_in_project_addon_settings(self):
project = ProjectFactory(creator=self.user)
project.add_contributor(
self.user,
permissions=permissions.ADMIN,
save=True)
res = self.app.get('/{0}/addons/'.format(project._primary_key), auth=self.auth, auto_follow=True)
assert_in('OSF Storage', res)
def test_sees_correct_title_on_dashboard(self):
# User goes to dashboard
res = self.app.get('/myprojects/', auth=self.auth, auto_follow=True)
title = res.html.title.string
assert_equal('OSF | My Projects', title)
def test_can_see_make_public_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=permissions.ADMIN,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Public', res)
def test_cant_see_make_public_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory()
project.add_contributor(
self.user,
permissions=permissions.WRITE,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Public', res)
def test_can_see_make_private_button_if_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=permissions.ADMIN,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_in('Make Private', res)
def test_cant_see_make_private_button_if_not_admin(self):
# User is a contributor on a project
project = ProjectFactory(is_public=True)
project.add_contributor(
self.user,
permissions=permissions.WRITE,
save=True)
# User goes to the project page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
assert_not_in('Make Private', res)
def test_sees_logs_on_a_project(self):
project = ProjectFactory(is_public=True)
# User goes to the project's page
res = self.app.get(project.url, auth=self.auth).maybe_follow()
# Can see log event
assert_in('created', res)
def test_no_wiki_content_message(self):
project = ProjectFactory(creator=self.user)
# Goes to project's wiki, where there is no content
res = self.app.get('/{0}/wiki/home/'.format(project._primary_key), auth=self.auth)
# Sees a message indicating no content
assert_in('Add important information, links, or images here to describe your project.', res)
# Sees that edit panel is open by default when home wiki has no content
assert_in('panelsUsed: ["view", "menu", "edit"]', res)
def test_wiki_content(self):
project = ProjectFactory(creator=self.user)
wiki_page_name = 'home'
wiki_content = 'Kittens'
wiki_page = WikiFactory(
user=self.user,
node=project,
)
wiki = WikiVersionFactory(
wiki_page=wiki_page,
content=wiki_content
)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
wiki_page_name,
), auth=self.auth)
assert_not_in('Add important information, links, or images here to describe your project.', res)
assert_in(wiki_content, res)
assert_in('panelsUsed: ["view", "menu"]', res)
def test_wiki_page_name_non_ascii(self):
project = ProjectFactory(creator=self.user)
non_ascii = to_mongo_key('WöRlÐé')
WikiPage.objects.create_for_node(project, 'WöRlÐé', 'new content', Auth(self.user))
wv = WikiVersion.objects.get_for_node(project, non_ascii)
assert wv.wiki_page.page_name.upper() == non_ascii.decode('utf-8').upper()
def test_noncontributor_cannot_see_wiki_if_no_content(self):
user2 = UserFactory()
# user2 creates a public project and adds no wiki content
project = ProjectFactory(creator=user2, is_public=True)
# self navigates to project
res = self.app.get(project.url).maybe_follow()
# Should not see wiki widget (since non-contributor and no content)
assert_not_in('Add important information, links, or images here to describe your project.', res)
def test_wiki_does_not_exist(self):
project = ProjectFactory(creator=self.user)
res = self.app.get('/{0}/wiki/{1}/'.format(
project._primary_key,
'not a real page yet',
), auth=self.auth, expect_errors=True)
assert_in('Add important information, links, or images here to describe your project.', res)
def test_sees_own_profile(self):
res = self.app.get('/profile/', auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, self.user.display_absolute_url)
def test_sees_another_profile(self):
user2 = UserFactory()
res = self.app.get(user2.url, auth=self.auth)
td1 = res.html.find('td', text=re.compile(r'Public(.*?)Profile'))
td2 = td1.find_next_sibling('td')
assert_equal(td2.text, user2.display_absolute_url)
@pytest.mark.enable_bookmark_creation
class TestComponents(OsfTestCase):
def setUp(self):
super(TestComponents, self).setUp()
self.user = AuthUserFactory()
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
self.project.add_contributor(contributor=self.user, auth=self.consolidate_auth)
# A non-project componenet
self.component = NodeFactory(
category='hypothesis',
creator=self.user,
parent=self.project,
)
self.component.save()
self.component.set_privacy('public', self.consolidate_auth)
self.component.set_privacy('private', self.consolidate_auth)
self.project.save()
self.project_url = self.project.web_url_for('view_project')
def test_sees_parent(self):
res = self.app.get(self.component.url, auth=self.user.auth).maybe_follow()
parent_title = res.html.find_all('h2', class_='node-parent-title')
assert_equal(len(parent_title), 1)
assert_in(self.project.title, parent_title[0].text) # Bs4 will handle unescaping HTML here
def test_delete_project(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth
).maybe_follow()
assert_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_cant_delete_project_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=permissions.WRITE,
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in(
'Delete {0}'.format(self.component.project_or_component),
res
)
def test_can_configure_comments_if_admin(self):
res = self.app.get(
self.component.url + 'settings/',
auth=self.user.auth,
).maybe_follow()
assert_in('Commenting', res)
def test_cant_configure_comments_if_not_admin(self):
non_admin = AuthUserFactory()
self.component.add_contributor(
non_admin,
permissions=permissions.WRITE,
auth=self.consolidate_auth,
save=True,
)
res = self.app.get(
self.component.url + 'settings/',
auth=non_admin.auth
).maybe_follow()
assert_not_in('Commenting', res)
def test_components_should_have_component_list(self):
res = self.app.get(self.component.url, auth=self.user.auth)
assert_in('Components', res)
@pytest.mark.enable_bookmark_creation
class TestPrivateLinkView(OsfTestCase):
def setUp(self):
super(TestPrivateLinkView, self).setUp()
self.user = AuthUserFactory() # Is NOT a contributor
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory(anonymous=True)
self.link.nodes.add(self.project)
self.link.save()
self.project_url = self.project.web_url_for('view_project')
def test_anonymous_link_hide_contributor(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_in('Anonymous Contributors', res.body)
assert_not_in(self.user.fullname, res)
def test_anonymous_link_hides_citations(self):
res = self.app.get(self.project_url, {'view_only': self.link.key})
assert_not_in('Citation:', res)
def test_no_warning_for_read_only_user_with_valid_link(self):
link2 = PrivateLinkFactory(anonymous=False)
link2.nodes.add(self.project)
link2.save()
self.project.add_contributor(
self.user,
permissions=permissions.READ,
save=True,
)
res = self.app.get(self.project_url, {'view_only': link2.key},
auth=self.user.auth)
assert_not_in(
'is being viewed through a private, view-only link. '
'Anyone with the link can view this project. Keep '
'the link safe.',
res.body
)
def test_no_warning_for_read_only_user_with_invalid_link(self):
self.project.add_contributor(
self.user,
permissions=permissions.READ,
save=True,
)
res = self.app.get(self.project_url, {'view_only': 'not_valid'},
auth=self.user.auth)
assert_not_in(
'is being viewed through a private, view-only link. '
'Anyone with the link can view this project. Keep '
'the link safe.',
res.body
)
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_quickfiles_creation
class TestMergingAccounts(OsfTestCase):
def setUp(self):
super(TestMergingAccounts, self).setUp()
self.user = UserFactory.build()
self.user.fullname = "tess' test string"
self.user.set_password('science')
self.user.save()
self.dupe = UserFactory.build()
self.dupe.set_password('example')
self.dupe.save()
def test_merged_user_is_not_shown_as_a_contributor(self):
project = ProjectFactory(is_public=True)
# Both the master and dupe are contributors
project.add_contributor(self.dupe, log=False)
project.add_contributor(self.user, log=False)
project.save()
# At the project page, both are listed as contributors
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_in_html(self.dupe.fullname, res)
# The accounts are merged
self.user.merge_user(self.dupe)
self.user.save()
# Now only the master user is shown at the project page
res = self.app.get(project.url).maybe_follow()
assert_in_html(self.user.fullname, res)
assert_true(self.dupe.is_merged)
assert_not_in(self.dupe.fullname, res)
def test_merged_user_has_alert_message_on_profile(self):
# Master merges dupe
self.user.merge_user(self.dupe)
self.user.save()
# At the dupe user's profile there is an alert message at the top
# indicating that the user is merged
res = self.app.get('/profile/{0}/'.format(self.dupe._primary_key)).maybe_follow()
assert_in('This account has been merged', res)
@pytest.mark.enable_bookmark_creation
class TestShortUrls(OsfTestCase):
def setUp(self):
super(TestShortUrls, self).setUp()
self.user = AuthUserFactory()
self.auth = self.user.auth
self.consolidate_auth = Auth(user=self.user)
self.project = ProjectFactory(creator=self.user)
# A non-project componenet
self.component = NodeFactory(parent=self.project, category='hypothesis', creator=self.user)
# Hack: Add some logs to component; should be unnecessary pending
# improvements to factories from @rliebz
self.component.set_privacy('public', auth=self.consolidate_auth)
self.component.set_privacy('private', auth=self.consolidate_auth)
self.wiki = WikiFactory(
user=self.user,
node=self.component,
)
def _url_to_body(self, url):
return self.app.get(
url,
auth=self.auth
).maybe_follow(
auth=self.auth,
).normal_body
def test_project_url(self):
assert_equal(
self._url_to_body(self.project.deep_url),
self._url_to_body(self.project.url),
)
def test_component_url(self):
assert_equal(
self._url_to_body(self.component.deep_url),
self._url_to_body(self.component.url),
)
def test_wiki_url(self):
assert_equal(
self._url_to_body(self.wiki.deep_url),
self._url_to_body(self.wiki.url),
)
@pytest.mark.enable_bookmark_creation
@pytest.mark.enable_implicit_clean
class TestClaiming(OsfTestCase):
def setUp(self):
super(TestClaiming, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
def test_correct_name_shows_in_contributor_list(self):
name1, email = fake.name(), fake_email()
UnregUserFactory(fullname=name1, email=email)
name2, email = fake.name(), fake_email()
# Added with different name
self.project.add_unregistered_contributor(fullname=name2,
email=email, auth=Auth(self.referrer))
self.project.save()
res = self.app.get(self.project.url, auth=self.referrer.auth)
# Correct name is shown
assert_in_html(name2, res)
assert_not_in(name1, res)
def test_user_can_set_password_on_claim_page(self):
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
#form['username'] = new_user.username #Removed as long as E-mail can't be updated.
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().follow()
new_user.reload()
assert_true(new_user.check_password('killerqueen'))
def test_sees_is_redirected_if_user_already_logged_in(self):
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
existing = AuthUserFactory()
claim_url = new_user.get_claim_url(self.project._primary_key)
# a user is already logged in
res = self.app.get(claim_url, auth=existing.auth, expect_errors=True)
assert_equal(res.status_code, 302)
def test_unregistered_users_names_are_project_specific(self):
name1, name2, email = fake.name(), fake.name(), fake_email()
project2 = ProjectFactory(creator=self.referrer)
# different projects use different names for the same unreg contributor
self.project.add_unregistered_contributor(
email=email,
fullname=name1,
auth=Auth(self.referrer)
)
self.project.save()
project2.add_unregistered_contributor(
email=email,
fullname=name2,
auth=Auth(self.referrer)
)
project2.save()
self.app.authenticate(*self.referrer.auth)
# Each project displays a different name in the contributor list
res = self.app.get(self.project.url)
assert_in_html(name1, res)
res2 = self.app.get(project2.url)
assert_in_html(name2, res2)
@unittest.skip('as long as E-mails cannot be changed')
def test_cannot_set_email_to_a_user_that_already_exists(self):
reg_user = UserFactory()
name, email = fake.name(), fake_email()
new_user = self.project.add_unregistered_contributor(
email=email,
fullname=name,
auth=Auth(self.referrer)
)
self.project.save()
# Goes to claim url and successfully claims account
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
self.project.reload()
assert_in('Set Password', res)
form = res.forms['setPasswordForm']
# Fills out an email that is the username of another user
form['username'] = reg_user.username
form['password'] = 'killerqueen'
form['password2'] = 'killerqueen'
res = form.submit().maybe_follow(expect_errors=True)
assert_in(
language.ALREADY_REGISTERED.format(email=reg_user.username),
res
)
def test_correct_display_name_is_shown_at_claim_page(self):
original_name = fake.name()
unreg = UnregUserFactory(fullname=original_name)
different_name = fake.name()
new_user = self.project.add_unregistered_contributor(
email=unreg.username,
fullname=different_name,
auth=Auth(self.referrer),
)
self.project.save()
claim_url = new_user.get_claim_url(self.project._primary_key)
res = self.app.get(claim_url)
# Correct name (different_name) should be on page
assert_in_html(different_name, res)
class TestConfirmingEmail(OsfTestCase):
def setUp(self):
super(TestConfirmingEmail, self).setUp()
self.user = UnconfirmedUserFactory()
self.confirmation_url = self.user.get_confirmation_url(
self.user.username,
external=False,
)
self.confirmation_token = self.user.get_confirmation_token(
self.user.username
)
def test_cannot_remove_another_user_email(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
url = api_url_for('update_user')
header = {'id': user1.username, 'emails': [{'address': user1.username}]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_make_primary_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
user1.emails.create(address=email)
user1.save()
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': False, 'confirmed': True},
{'address': email, 'primary': True, 'confirmed': True}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_cannnot_add_email_for_another_user(self):
user1 = AuthUserFactory()
user2 = AuthUserFactory()
email = 'test@cos.io'
url = api_url_for('update_user')
header = {'id': user1.username,
'emails': [{'address': user1.username, 'primary': True, 'confirmed': True},
{'address': email, 'primary': False, 'confirmed': False}
]}
res = self.app.put_json(url, header, auth=user2.auth, expect_errors=True)
assert_equal(res.status_code, 403)
def test_error_page_if_confirm_link_is_used(self):
self.user.confirm_email(self.confirmation_token)
self.user.save()
res = self.app.get(self.confirmation_url, expect_errors=True)
assert_in(auth_exc.InvalidTokenError.message_short, res)
assert_equal(res.status_code, http.BAD_REQUEST)
@pytest.mark.enable_implicit_clean
@pytest.mark.enable_bookmark_creation
class TestClaimingAsARegisteredUser(OsfTestCase):
def setUp(self):
super(TestClaimingAsARegisteredUser, self).setUp()
self.referrer = AuthUserFactory()
self.project = ProjectFactory(creator=self.referrer, is_public=True)
name, email = fake.name(), fake_email()
self.user = self.project.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
self.project.save()
def test_claim_user_registered_with_correct_password(self):
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'queenfan86'
url = self.user.get_claim_url(self.project._primary_key)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body)
form = res.forms['claimContributorForm']
form['password'] = 'queenfan86'
res = form.submit(auth=reg_user.auth)
res = res.follow(auth=reg_user.auth)
self.project.reload()
self.user.reload()
# user is now a contributor to the project
assert_in(reg_user, self.project.contributors)
# the unregistered user (self.user) is removed as a contributor, and their
assert_not_in(self.user, self.project.contributors)
# unclaimed record for the project has been deleted
assert_not_in(self.project, self.user.unclaimed_records)
def test_claim_user_registered_preprint_with_correct_password(self):
preprint = PreprintFactory(creator=self.referrer)
name, email = fake.name(), fake_email()
unreg_user = preprint.add_unregistered_contributor(
fullname=name,
email=email,
auth=Auth(user=self.referrer)
)
reg_user = AuthUserFactory() # NOTE: AuthUserFactory sets password as 'queenfan86'
url = unreg_user.get_claim_url(preprint._id)
# Follow to password re-enter page
res = self.app.get(url, auth=reg_user.auth).follow(auth=reg_user.auth)
# verify that the "Claim Account" form is returned
assert_in('Claim Contributor', res.body)
form = res.forms['claimContributorForm']
form['password'] = 'queenfan86'
res = form.submit(auth=reg_user.auth)
preprint.reload()
unreg_user.reload()
# user is now a contributor to the project
assert_in(reg_user, preprint.contributors)
# the unregistered user (unreg_user) is removed as a contributor, and their
assert_not_in(unreg_user, preprint.contributors)
# unclaimed record for the project has been deleted
assert_not_in(preprint, unreg_user.unclaimed_records)
class TestResendConfirmation(OsfTestCase):
def setUp(self):
super(TestResendConfirmation, self).setUp()
self.unconfirmed_user = UnconfirmedUserFactory()
self.confirmed_user = UserFactory()
self.get_url = web_url_for('resend_confirmation_get')
self.post_url = web_url_for('resend_confirmation_post')
# test that resend confirmation page is load correctly
def test_resend_confirmation_get(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Resend Confirmation', res.body)
assert_in('resendForm', res.forms)
# test that unconfirmed user can receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_resend_confirmation_email(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.unconfirmed_emails[0]
res = form.submit()
# check email, request and response
assert_true(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that confirmed user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_1(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.confirmed_user.emails.first().address
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('has already been confirmed', res)
# test that non-existing user cannot receive resend confirmation email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_resend_confirmation_email_2(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = 'random@random.com'
res = form.submit()
# check email, request and response
assert_false(mock_send_mail.called)
assert_equal(res.status_code, 200)
assert_equal(res.request.path, self.post_url)
assert_in_html('If there is an OSF account', res)
# test that user cannot submit resend confirmation request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_resend_confirmation_twice_quickly(self, mock_send_mail):
# load resend confirmation page and submit email
res = self.app.get(self.get_url)
form = res.forms['resendForm']
form['email'] = self.unconfirmed_user.email
res = form.submit()
res = form.submit()
# check request and response
assert_equal(res.status_code, 200)
assert_in_html('Please wait', res)
class TestForgotPassword(OsfTestCase):
def setUp(self):
super(TestForgotPassword, self).setUp()
self.user = UserFactory()
self.auth_user = AuthUserFactory()
self.get_url = web_url_for('forgot_password_get')
self.post_url = web_url_for('forgot_password_post')
self.user.verification_key_v2 = {}
self.user.save()
# log users out before they land on forgot password page
def test_forgot_password_logs_out_user(self):
# visit forgot password link while another user is logged in
res = self.app.get(self.get_url, auth=self.auth_user.auth)
# check redirection to CAS logout
assert_equal(res.status_code, 302)
location = res.headers.get('Location')
assert_not_in('reauth', location)
assert_in('logout?service=', location)
assert_in('forgotpassword', location)
# test that forgot password page is loaded correctly
def test_get_forgot_password(self):
res = self.app.get(self.get_url)
assert_equal(res.status_code, 200)
assert_in('Forgot Password', res.body)
assert_in('forgotPasswordForm', res.forms)
# test that existing user can receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_can_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was sent
assert_true(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is set
self.user.reload()
assert_not_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_receive_reset_password_email(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = 'fake' + self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that non-existing user cannot receive reset password email
@mock.patch('framework.auth.views.mails.send_mail')
def test_not_active_user_no_reset_password_email(self, mock_send_mail):
self.user.disable_account()
self.user.save()
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
# check mail was not sent
assert_false(mock_send_mail.called)
# check http 200 response
assert_equal(res.status_code, 200)
# check request URL is /forgotpassword
assert_equal(res.request.path, self.post_url)
# check push notification
assert_in_html('If there is an OSF account', res)
assert_not_in_html('Please wait', res)
# check verification_key_v2 is not set
self.user.reload()
assert_equal(self.user.verification_key_v2, {})
# test that user cannot submit forgot password request too quickly
@mock.patch('framework.auth.views.mails.send_mail')
def test_cannot_reset_password_twice_quickly(self, mock_send_mail):
# load forgot password page and submit email
res = self.app.get(self.get_url)
form = res.forms['forgotPasswordForm']
form['forgot_password-email'] = self.user.username
res = form.submit()
res = form.submit()
# check http 200 response
assert_equal(res.status_code, 200)
# check push notification
assert_in_html('Please wait', res)
assert_not_in_html('If there is an OSF account', res)
@unittest.skip('Public projects/components are dynamically loaded now.')
class TestAUserProfile(OsfTestCase):
def setUp(self):
OsfTestCase.setUp(self)
self.user = AuthUserFactory()
self.me = AuthUserFactory()
self.project = ProjectFactory(creator=self.me, is_public=True, title=fake.bs())
self.component = NodeFactory(creator=self.me, parent=self.project, is_public=True, title=fake.bs())
# regression test for https://github.com/CenterForOpenScience/osf.io/issues/2623
def test_has_public_projects_and_components(self):
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.me.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
# Another user can also see my public project and component
url = web_url_for('profile_view_id', uid=self.me._primary_key)
# I see the title of both my project and component
res = self.app.get(url, auth=self.user.auth)
assert_in_html(self.component.title, res)
assert_in_html(self.project.title, res)
def test_shows_projects_with_many_contributors(self):
# My project has many contributors
for _ in range(5):
user = UserFactory()
self.project.add_contributor(user, auth=Auth(self.project.creator), save=True)
# I go to my own profile
url = web_url_for('profile_view_id', uid=self.me._primary_key)
res = self.app.get(url, auth=self.me.auth)
# I see '3 more' as a link
assert_in('3 more', res)
res = res.click('3 more')
assert_equal(res.request.path, self.project.url)
def test_has_no_public_projects_or_components_on_own_profile(self):
# User goes to their profile
url = web_url_for('profile_view_id', uid=self.user._id)
res = self.app.get(url, auth=self.user.auth)
# user has no public components/projects
assert_in('You have no public projects', res)
assert_in('You have no public components', res)
def test_user_no_public_projects_or_components(self):
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# User has no public components/projects
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public projects', res)
assert_in('This user has no public components', res)
# regression test
def test_does_not_show_registrations(self):
project = ProjectFactory(creator=self.user)
component = NodeFactory(parent=project, creator=self.user, is_public=False)
# User has a registration with public components
reg = RegistrationFactory(project=component.parent_node, creator=self.user, is_public=True)
for each in reg.nodes:
each.is_public = True
each.save()
# I go to other user's profile
url = web_url_for('profile_view_id', uid=self.user._id)
# Registration does not appear on profile
res = self.app.get(url, auth=self.me.auth)
assert_in('This user has no public components', res)
assert_not_in(reg.title, res)
assert_not_in(reg.nodes[0].title, res)
@pytest.mark.enable_bookmark_creation
class TestPreprintBannerView(OsfTestCase):
def setUp(self):
super(TestPreprintBannerView, self).setUp()
self.admin = AuthUserFactory()
self.write_contrib = AuthUserFactory()
self.read_contrib = AuthUserFactory()
self.non_contrib = AuthUserFactory()
self.provider_one = PreprintProviderFactory()
self.project_one = ProjectFactory(creator=self.admin, is_public=True)
self.project_one.add_contributor(self.write_contrib, permissions.WRITE)
self.project_one.add_contributor(self.read_contrib, permissions.READ)
self.subject_one = SubjectFactory()
self.preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider_one, subjects=[[self.subject_one._id]], project=self.project_one, is_published=True)
self.preprint.add_contributor(self.write_contrib, permissions.WRITE)
self.preprint.add_contributor(self.read_contrib, permissions.READ)
def test_public_project_published_preprint(self):
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_in('Has supplemental materials for', res.body)
def test_public_project_abandoned_preprint(self):
self.preprint.machine_state = 'initial'
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('Has supplemental materials for', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body)
def test_public_project_deleted_preprint(self):
self.preprint.deleted = timezone.now()
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_not_in('Has supplemental materials for', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body)
def test_public_project_private_preprint(self):
self.preprint.is_public = False
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body)
def test_public_project_orphaned_preprint(self):
self.preprint.primary_file = None
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body)
def test_public_project_unpublished_preprint(self):
self.preprint.is_published = False
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('Has supplemental materials for', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('Has supplemental materials for', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_not_in('Has supplemental materials for', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_not_in('Has supplemental materials for', res.body)
def test_public_project_pending_preprint_post_moderation(self):
self.preprint.machine_state = 'pending'
provider = PreprintProviderFactory(reviews_workflow='post-moderation')
self.preprint.provider = provider
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_in('on {}'.format(self.preprint.provider.name), res.body)
assert_not_in('Pending\n', res.body)
assert_not_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_in('on {}'.format(self.preprint.provider.name), res.body)
assert_not_in('Pending\n', res.body)
assert_not_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body)
def test_implicit_admins_can_see_project_status(self):
project = ProjectFactory(creator=self.admin)
component = NodeFactory(creator=self.admin, parent=project)
project.add_contributor(self.write_contrib, permissions.ADMIN)
project.save()
preprint = PreprintFactory(creator=self.admin, filename='mgla.pdf', provider=self.provider_one, subjects=[[self.subject_one._id]], project=component, is_published=True)
preprint.machine_state = 'pending'
provider = PreprintProviderFactory(reviews_workflow='post-moderation')
preprint.provider = provider
preprint.save()
url = component.web_url_for('view_project')
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('{}'.format(preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is publicly available and searchable but is subject to removal by a moderator.', res.body)
def test_public_project_pending_preprint_pre_moderation(self):
self.preprint.machine_state = 'pending'
provider = PreprintProviderFactory(reviews_workflow='pre-moderation')
self.preprint.provider = provider
self.preprint.save()
url = self.project_one.web_url_for('view_project')
# Admin - preprint
res = self.app.get(url, auth=self.admin.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body)
# Write - preprint
res = self.app.get(url, auth=self.write_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body)
# Read - preprint
res = self.app.get(url, auth=self.read_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_in('Pending\n', res.body)
assert_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body)
# Noncontrib - preprint
res = self.app.get(url, auth=self.non_contrib.auth)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_not_in('Pending\n', res.body)
assert_not_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body)
# Unauthenticated - preprint
res = self.app.get(url)
assert_in('{}'.format(self.preprint.provider.name), res.body)
assert_not_in('Pending\n', res.body)
assert_not_in('This preprint is not publicly available or searchable until approved by a moderator.', res.body)
if __name__ == '__main__':
unittest.main()
|
mattclark/osf.io
|
tests/test_webtests.py
|
Python
|
apache-2.0
| 50,803
|
[
"VisIt"
] |
7d155725e2c4794bd341eccca7d1c8b4fb0e9b35aab0e4578319745ea25509da
|
# This file is part of Copernicus
# http://www.copernicus-computing.org/
#
# Copyright (C) 2011, Sander Pronk, Iman Pouya, Erik Lindahl, and others.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import os
import re
import os.path
import shutil
import shlex
import glob
import stat
import subprocess
import logging
import time
log=logging.getLogger(__name__)
from cpc.dataflow import Value
from cpc.dataflow import FileValue
from cpc.dataflow import IntValue
from cpc.dataflow import FloatValue
from cpc.dataflow import StringValue
from cpc.dataflow import Resources
import cpc.command
import cpc.util
import cpc.lib.gromacs.tune as tune
import cpc.lib.gromacs.iterate as iterate
from cpc.lib.gromacs import cmds
from cpc.lib.gromacs.mdrun import extractConf, TrajFileCollection, MdrunError
class PLUMEDError(cpc.util.CpcError):
pass
def checkErr(stde, rsrc, tpr, persDir):
"""Check whether an error condition is recoverable.
Returns True if there is an issue, False if the error is recoverable"""
if not os.path.exists(stde):
# we assume it's a worker error
return False
inf=open(stde, 'r')
fatalErr=False
OK=True
for line in inf:
if re.match(r'.*Fatal error.*', line):
fatalErr=True
log.debug("Found fatal error")
OK=False
if re.match(r'.*PLUMED ERROR.*', line):
fatalErr=True
log.debug("Found a PLUMED error.")
OK=False
if fatalErr:
if re.match(r'.*domain decomposition.*', line):
# the number of cores is wrong
log.debug("Found domain decomp error")
confFile=os.path.join(persDir, 'conf.gro')
extractConf(tpr, confFile)
tune.tune(rsrc, confFile, tpr, persDir, rsrc.max.get('cores')-1)
OK=True
break
inf.close()
return not OK
def extractData(confout, outDir, persDir, fo):
"""Concatenate all output data from the partial runs into the end results"""
cmdnames = cmds.GromacsCommands()
#outputs=dict()
# Concatenate stuff
confoutPath=os.path.join(outDir, "confout.gro")
shutil.copy(confout[0], confoutPath )
#outputs['conf'] = Value(confoutPath,
# inp.function.getOutput('conf').getType())
fo.setOut('conf', FileValue(confoutPath))
# fix the xtc files
xtcso = sorted(glob.glob(os.path.join(persDir, "run_???",
"traj.part*.xtc")))
# cull empty files and duplicate trajectory names
xtcs=[]
xtcbase=[]
for file in xtcso:
st=os.stat(file)
base=os.path.split(file)[1]
if st.st_size>0:
if base not in xtcbase:
xtcs.append(file)
xtcbase.append(base)
else:
# there already was a file with this name. Overwrite
# it because mdrun wasn't aware of it when writing.
ind=xtcbase.index(base)
xtcs[ind]=file
# concatenate them
xtcoutname=os.path.join(outDir, "traj.xtc")
if len(xtcs) > 0:
cmd = cmdnames.trjcat.split() + ["-f"]
cmd.extend(xtcs)
cmd.extend(["-o", xtcoutname])
stdo=open(os.path.join(persDir,"trjcat_xtc.out"),"w")
sp=subprocess.Popen(cmd, stdout=stdo, stderr=subprocess.STDOUT)
sp.communicate(None)
stdo.close()
fo.setOut('xtc', FileValue(xtcoutname))
# do the trrs
trrso = sorted(glob.glob(os.path.join(persDir, "run_???",
"traj.part*.trr")))
# cull empty files and duplicate trajectory names
trrs=[]
trrbase=[]
for file in trrso:
st=os.stat(file)
base=os.path.split(file)[1]
if st.st_size>0:
if base not in trrbase:
trrs.append(file)
trrbase.append(base)
else:
# there already was a file with this name. Overwrite
# it because mdrun wasn't aware of it when writing.
ind=trrbase.index(base)
trrs[ind]=file
# concatenate them
trroutname=os.path.join(outDir, "traj.trr")
if len(trrs) > 0:
cmd = cmdnames.trjcat + ["-f"]
cmd.extend(trrs)
cmd.extend(["-o", trroutname])
stdo=open(os.path.join(persDir,"trjcat_trr.out"),"w")
sp=subprocess.Popen(cmd, stdout=stdo, stderr=subprocess.STDOUT)
sp.communicate(None)
stdo.close()
fo.setOut('trr', FileValue(trroutname))
# and the edrs
edrso = glob.glob(os.path.join(persDir, "run_???", "ener.part*.edr"))
# cull empty files and duplicate trajectory names
edrs=[]
edrbase=[]
for file in edrso:
st=os.stat(file)
base=os.path.split(file)[1]
if st.st_size>0:
if base not in edrbase:
edrs.append(file)
edrbase.append(base)
else:
# there already was a file with this name. Overwrite
# it because mdrun wasn't aware of it when writing.
ind=edrbase.index(base)
edrs[ind]=file
edroutname=os.path.join(outDir, "ener.edr")
# concatenate them
if len(edrs) > 0:
cmd = cmdnames.eneconv.split() + ["-f"]
cmd.extend(edrs)
cmd.extend(["-o", edroutname])
stdo=open(os.path.join(persDir,"eneconv.out"),"w")
sp=subprocess.Popen(cmd, stdout=stdo, stderr=subprocess.STDOUT)
sp.communicate(None)
stdo.close()
fo.setOut('edr', FileValue(edroutname))
# do the stdout
stdouto = glob.glob(os.path.join(persDir, "run_???", "stdout"))
stdoutname=os.path.join(outDir, "stdout")
outf=open(stdoutname,"w")
for infile in stdouto:
inf=open(infile, "r")
outf.write(inf.read())
inf.close()
outf.write("%s\n"%time.strftime("%a, %d %b %Y %H:%M:%S"))
outf.write("%f\n"%time.time())
outf.close()
#outputs['stdout'] = Value(stdoutname,
# inp.function.getOutput('trr').getType())
fo.setOut('stdout', FileValue(stdoutname))
# do the stderr
stderro = glob.glob(os.path.join(persDir, "run_???", "stderr"))
stderrname=os.path.join(outDir, "stderr")
outf=open(stderrname,"w")
for infile in stderro:
inf=open(infile, "r")
outf.write(inf.read())
inf.close()
outf.close()
fo.setOut('stderr', FileValue(stderrname))
log.debug("Returning without command.")
log.debug("fo.cmds=%s"%str(fo.cmds))
# do the COLVAR file
colvaro = glob.glob(os.path.join(persDir, "run_???", "COLVAR"))
colvarname=os.path.join(outDir, "COLVAR")
outf=open(colvarname,'w')
for cvfile in colvaro:
inf=open(cvfile,'r')
outf.write(inf.read())
inf.close()
outf.close()
fo.setOut('COLVAR',FileValue(colvarname))
# take the last HILLS file and the bias.dat file
hillso = glob.glob(os.path.join(persDir, "run_???", "HILLS"))
if len(hillso)>0:
hillsname = os.path.join(outDir, "HILLS")
outf = open(hillsname,'w')
inf = open(hillso[-1],'r')
outf.write(inf.read())
inf.close()
log.debug("Set the HILLS outfile")
fo.setOut('HILLS',FileValue(hillsname))
biaso = glob.glob(os.path.join(persDir, "run_???", "bias.dat"))
if len(biaso)>0:
biasname = os.path.join(outDir, "bias.dat")
outf = open(biasname,'w')
inf = open(biaso[-1],'r')
outf.write(inf.read())
inf.close()
fo.setOut('bias',FileValue(biasname))
def mdrun(inp):
if inp.testing():
# if there are no inputs, we're testing wheter the command can run
cpc.util.plugin.testCommand("trjcat -version")
cpc.util.plugin.testCommand("eneconv -version")
cpc.util.plugin.testCommand("gmxdump -version")
return
persDir=inp.getPersistentDir()
outDir=inp.getOutputDir()
fo=inp.getFunctionOutput()
rsrc=Resources(inp.getInputValue("resources"))
rsrcFilename=os.path.join(persDir, 'rsrc.dat')
# check whether we need to reinit
pers=cpc.dataflow.Persistence(os.path.join(inp.getPersistentDir(),
"persistent.dat"))
init=False
lasttpr=pers.get('lasttpr')
newtpr=inp.getInput('tpr')
#if inp.getInputValue('tpr').isUpdated():
if newtpr!= lasttpr:
lasttpr=newtpr
# there was no previous command.
# purge the persistent directory, by moving the confout files to a
# backup directory
log.debug("(Re)initializing mdrun")
confout=glob.glob(os.path.join(persDir, "run_???"))
if len(confout)>0:
backupDir=os.path.join(persDir, "backup")
try:
os.mkdir(backupDir)
except:
pass
for conf in confout:
try:
os.rename(conf, os.path.join(backupDir,
os.path.split(conf)[-1]))
except:
pass
init=True
pers.set('lasttpr', lasttpr)
elif inp.cmd is None:
return fo
if init:
if rsrc.max.get('cores') is None:
confFile=os.path.join(persDir, 'conf.gro')
extractConf(newtpr, confFile)
tune.tune(rsrc, confFile, newtpr, persDir)
if inp.cmd is not None:
log.debug("Canceling commands")
fo.cancelPrevCommands()
pers.set('initialized', True)
else:
if rsrc.max.get('cores') is None:
rsrc.load(rsrcFilename)
if inp.cmd is not None:
log.debug("Return code was %s"%str(inp.cmd.getReturncode()))
# try to find out whether the run has already finished
confout=glob.glob(os.path.join(persDir, "run_???", "confout.part*.gro"))
if len(confout) > 0:
log.debug("Extracting data. ")
# confout exists. we're finished. Concatenate all the runs if
# we need to, but first create the output dict
extractData(confout, outDir, persDir, fo)
return fo
else:
tfc=TrajFileCollection(persDir)
# first check whether we got an error code back
if (inp.cmd is not None) and inp.cmd.getReturncode()!=0:
# there was a problem. Check the log
stde=os.path.join(tfc.getLastDir(), "stderr")
if checkErr(stde, rsrc, newtpr, persDir):
if os.path.exists(stde):
stdef=open(stde, 'r')
errmsg=unicode(stdef.read(), errors='ignore')
stdef.close()
raise MdrunError("Error running mdrun: %s"%errmsg)
else:
# now check whether any of the last 4 iterations produced
# trajectories
trajlist=tfc.getTrajList()
if len(trajlist) > 4:
ret=False
for j in range(4):
haveTraj=(len(trajlist[-j-1]) > 0)
ret=ret or haveTraj #prevtraj[-j-1]
if not ret:
stde=os.path.join(tfc.getLastDir(), "stderr")
if os.path.exists(stde):
stdef=open(stde, 'r')
errmsg=unicode(stdef.read(), errors='ignore')
stdef.close()
else:
errmsg=""
raise MdrunError("Error running mdrun. No trajectories: %s"%
errmsg)
# Make a new directory with the continuation of this run
#newdirname=currundir #"run_%03d"%(i+1)
newdirname=tfc.getNewRunDir()
try:
os.mkdir(newdirname)
except OSError:
pass
tpr=newtpr
src=os.path.join(inp.getBaseDir(), tpr)
dst=os.path.join(newdirname,"topol.tpr")
shutil.copy(src,dst)
# handle command line inputs
if inp.getInput('cmdline_options') is not None:
cmdlineOpts=shlex.split(inp.getInput('cmdline_options'))
else:
cmdlineOpts=[]
if inp.getInput('priority') is not None:
prio=inp.getInput('priority')
else:
prio=0
lastcpt=tfc.getLastCpt()
# copy the checkpoint to the new cmd dir
if lastcpt is not None:
shutil.copy(lastcpt, os.path.join(newdirname,"state.cpt"))
# now add to the priority if this run has already been started
completed=tfc.getFractionCompleted(tpr)
if completed > 0:
# now the priority ranges from 1 to 4, depending on how
# far along the simulation is.
prio += 1+int(3*(completed))
log.debug("Setting new priority to %d because it's in progress"%
prio)
# we can always add state.cpt, even if it doesn't exist.
# include the plumed file here
args=["-quiet", "-s", "topol.tpr", "-noappend", "-cpi", "state.cpt",
"-rcon", "0.7", "-plumed", "plumed.dat" ]
args.extend(cmdlineOpts)
# for the new neighbor search scheme in Gromacs 4.6, set this env
# variable
if lastcpt is not None:
shutil.copy(lastcpt, os.path.join(newdirname,"state.cpt"))
# any expected output files.
newFileNr=tfc.getLastTrajNr()+1
outputFiles=[ "traj.part%04d.xtc"%newFileNr,
"traj.part%04d.trr"%newFileNr,
"confout.part%04d.gro"%newFileNr,
"ener.part%04d.edr"%newFileNr,
"dhdl.part%04d.xvg"%newFileNr,
"pullx.part%04d.xvg"%newFileNr,
"pullf.part%04d.xvg"%newFileNr,
"COLVAR",
"HILLS",
"bias.dat",
"state.cpt", "state_prev.cpt" ]
log.debug("Expected output files: %s"%outputFiles)
cmd=cpc.command.Command(newdirname, "plumed/mdrun",args,
minVersion=cpc.command.Version("4.5"),
addPriority=prio,
outputFiles=outputFiles)
if inp.hasInput("resources") and inp.getInput("resources") is not None:
#log.debug("resources is %s"%(inp.getInput("resources")))
#rsrc=Resources(inp.getInputValue("resources"))
rsrc.updateCmd(cmd)
log.debug("Adding command")
# copy the plumed file to the run dir
plumed_inp=inp.getInput("plumed")
log.debug("Adding the PLUMED file: %s"%plumed_inp)
src=os.path.join(inp.getBaseDir(),plumed_inp)
dst=os.path.join(newdirname,"plumed.dat")
# check if we need to restart metadynamics
if tfc.lastDir is not None:
lasthills=os.path.join(tfc.lastDir,"HILLS")
if os.path.isfile(lasthills):
plumed_dat=open(plumed_inp,'r').read()
log.debug("Adding a RESTART statement to the PLUMED file.")
newplumed=re.sub(r"HILLS","HILLS RESTART",plumed_dat)
open(dst,"w").write(newplumed)
newhills=os.path.join(newdirname,"HILLS")
shutil.copy(lasthills,newhills)
else: shutil.copy(src,dst)
else: shutil.copy(src,dst)
fo.addCommand(cmd)
if inp.getInputValue('tpr').isUpdated() and inp.cmd is not None:
log.debug("Canceling commands")
fo.cancelPrevCommands()
# and save for further invocations
rsrc.save(rsrcFilename)
pers.write()
return fo
def grompp_mdruns(inp):
if inp.testing():
# if there are no inputs, we're testing wheter the command can run
cpc.util.plugin.testCommand("grompp -version")
return
pers=cpc.dataflow.Persistence(os.path.join(inp.getPersistentDir(),
"persistent.dat"))
grompp_inputs = ['mdp','top','conf', 'ndx', 'settings', 'include' ]
mdrun_inputs = [ 'priority', 'cmdline_options', 'resources', 'plumed']
inputs = grompp_inputs + mdrun_inputs
grompp_outputs = [ 'tpr' ]
mdrun_outputs = [ 'conf', 'xtc', 'trr', 'edr','COLVAR','HILLS','bias' ]
outputs = grompp_outputs + mdrun_outputs
running=0
if(pers.get("running")):
running=pers.get("running")
it=iterate.iterations(inp, inputs, outputs, pers)
out=inp.getFunctionOutput()
for i in range(running, it.getN()):
gromppInstName="grompp_%d"%i
mdrunInstName="mdrun_%d"%i
try:
out.addInstance(gromppInstName, "gromacs::grompp")
except:
log.debug("Error: You must import the gromacs module to use this function.")
out.addInstance(mdrunInstName, "mdrun")
out.addConnection('%s:out.tpr'%gromppInstName,
'%s:in.tpr'%mdrunInstName)
it.connectOnly(grompp_inputs, grompp_outputs, out, i, gromppInstName)
it.connectOnly(mdrun_inputs, mdrun_outputs, out, i, mdrunInstName)
running+=1
pers.set("running", running)
pers.write()
return out
def mdruns(inp):
if inp.testing():
# if there are no inputs, we're testing wheter the command can run
cpc.util.plugin.testCommand("trjcat -version")
cpc.util.plugin.testCommand("eneconv -version")
cpc.util.plugin.testCommand("gmxdump -version")
return
pers=cpc.dataflow.Persistence(os.path.join(inp.getPersistentDir(),
"persistent.dat"))
inputs = ['tpr','priority','cmdline_options','resources','plumed']
outputs = [ 'conf', 'xtc', 'trr', 'edr','COLVAR', 'HILLS', 'bias']
running=0
if(pers.get("running")):
running=pers.get("running")
it=iterate.iterations(inp, inputs, outputs, pers)
out=inp.getFunctionOutput()
for i in range(running, it.getN()):
instName="mdrun_%d"%i
out.addInstance(instName, "mdrun")
it.connect(out, i, instName)
running+=1
pers.set("running", running)
pers.write()
return out
|
gromacs/copernicus
|
cpc/lib/plumed/mdrun.py
|
Python
|
gpl-2.0
| 18,804
|
[
"Gromacs"
] |
34d28845c51b8a3d199f79621ba9c66c986e28a65ff7fab21621dd377f457251
|
# This file is part of RMANAGER.
#
# RMANAGER is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# RMANAGER is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RMANAGER. If not, see <http://www.gnu.org/licenses/>.
###########################################################################################################
# #
# RMANAGER additional library #
# rename roms files according to their time and date from ocean_time #
# Author : Raphael Dussin 2014- #
# #
###########################################################################################################
import datetime as dt
import netCDF4 as nc
import numpy as np
import os_utils
class tagfile():
def __init__(self,filename):
self.filein = filename
self.fail=False
return None
def __call__(self,leap=True):
# read time in netcdf
time, timeunits = self.read_time(self.filein)
if self.fail is False:
# create the date tag
if leap:
tag = self.create_tag(time, timeunits)
else:
tag = self.create_tag_noleap(time, timeunits)
# define a new filename
self.create_new_filename(tag)
# rename file
self.rename_file()
else:
pass
return None
def read_time(self,filein):
''' read ocean_time variable and units in netcdf file'''
try:
fid = nc.Dataset(filein,'r')
except:
print 'could not open file ', filein
time = fid.variables['ocean_time'][:]
timeunits = fid.variables['ocean_time'].units
fid.close()
if len(time) > 1:
print '>>> error : multiple values in time array\n>>> skipping file ' + filein
self.fail=True
else:
time = time[0]
return time, timeunits
def create_tag(self,time, timeunits):
''' create a datetime object from reference date and ocean_time'''
delta_type = timeunits.split()[0]
# RD : assuming units are in the format " something since 1900-01-01 0:00:00 "
date_string = timeunits.split()[2]
time_string = timeunits.split()[3]
ref_string = date_string + ' ' + time_string
# handle files written with reference different from 1900-01-01
fmt = '%Y-%m-%d %H:%M:%S'
dateref = dt.datetime.strptime(ref_string,fmt)
# create a datetime object for current time
if delta_type == 'seconds':
tag = dateref + dt.timedelta(seconds=time)
elif delta_type == 'days':
tag = dateref + dt.timedelta(days=time)
return tag
def create_tag_noleap(self, time, timeunits):
''' create a datetime object from reference date and ocean_time (noleap version)'''
# first we need to figure out how many seconds are ellaped between ref_date
# and start date of the run
delta_type = timeunits.split()[0]
date_string = timeunits.split()[2]
time_string = timeunits.split()[3]
ref_string = date_string + ' ' + time_string
fmt = '%Y-%m-%d %H:%M:%S'
dateref_dstart = dt.datetime.strptime(ref_string,fmt)
if delta_type == 'seconds':
seconds_from_init = float(time)
elif delta_type == 'days':
seconds_from_init = float(time) * 86400.
nyear = int(np.floor(seconds_from_init / 365 / 86400))
rm = np.remainder(seconds_from_init,365*86400)
ndays = int(np.floor(rm / 86400))
rm2 = np.remainder(rm,86400)
nhours = int(np.floor(rm2 / 3600))
rm3 = np.remainder(rm2,3600)
nmin = int(np.floor(rm3 / 60))
nsec = int(np.remainder(rm3,60))
# pick a year we are sure is not a leap year
fakeref = dt.datetime(1901,1,1,0,0)
fakedate = fakeref + dt.timedelta(days=ndays)
month = fakedate.month
day = fakedate.day
tag=dt.datetime(nyear + dateref_dstart.year,month, day, nhours, nmin, nsec)
return tag
def create_new_filename(self,tag):
''' based on tag, generate a new filename '''
# get rid of full path (if any)
filein = self.filein.replace('/',' ').split()[-1]
# get the pieces we want to keep in filename
filein_wrk = filein.replace('_',' ').split()
runname = filein_wrk[0]
filetype = filein_wrk[1]
# write our new filename
self.fileout = runname + '_' + filetype + '_' + tag.isoformat() + '.nc'
return None
def rename_file(self):
''' call unix command mv '''
# remove filein from full path
wrk = self.filein.replace('/',' ').split()[0:-1]
# re-create path
path = ''
for part in wrk:
path = path + '/' + part
# rename file only if different
if self.filein == path + '/' + self.fileout:
pass
else:
os_utils.execute('mv ' + self.filein + ' ' + path + '/' + self.fileout)
return None
|
raphaeldussin/RMANAGER
|
src/python/libdatetag4roms.py
|
Python
|
gpl-3.0
| 5,309
|
[
"NetCDF"
] |
3ddd9cc6a4ce4bf5c0179eee00f6af1c48b4f01df39aba69f0f9b28415f7e546
|
"""
UI-level acceptance tests for OpenAssessment.
"""
from __future__ import absolute_import
from functools import wraps
import os
import time
import unittest
from bok_choy.promise import BrokenPromise, EmptyPromise
from bok_choy.web_app_test import WebAppTest
import ddt
from nose.plugins.attrib import attr
from pyinstrument import Profiler
from acceptance.auto_auth import AutoAuthPage
from acceptance.pages import AssessmentPage, GradePage, StaffAreaPage, SubmissionPage
# This value is generally used in jenkins, but not locally
PROFILING_ENABLED = os.environ.get('ORA_PROFILING_ENABLED', False)
def retry(tries=2, delay=4, backoff=2):
"""
Retry decorator with exponential backoff.
Kwargs:
tries (int): Maximum number of times to execute the function.
delay (int): Starting delay between retries.
backoff (int): Multiplier applied to the delay.
"""
def _decorator(func):
@wraps(func)
def _inner(*args, **kwargs):
_delay = delay
for attempt_num in range(tries):
try:
return func(*args, **kwargs)
except (BrokenPromise, AssertionError) as ex:
if attempt_num >= (tries - 1):
raise
else:
print "Test failed with {err}, retrying in {sec} seconds...".format(err=ex, sec=_delay)
time.sleep(_delay)
_delay *= backoff
return _inner
return _decorator
class OpenAssessmentTest(WebAppTest):
"""
UI-level acceptance tests for Open Assessment.
"""
TEST_COURSE_ID = "course-v1:edx+ORA203+course"
PROBLEM_LOCATIONS = {
'staff_only':
u'courses/{test_course_id}/courseware/'
u'61944efb38a349edb140c762c7419b50/415c3ee1b7d04b58a1887a6fe82b31d6/'.format(test_course_id=TEST_COURSE_ID),
'self_only':
u'courses/{test_course_id}/courseware/'
u'a4dfec19cf9b4a6fb5b18be6ccd9cecc/338a4affb58a45459629e0566291381e/'.format(test_course_id=TEST_COURSE_ID),
'peer_only':
u'courses/{test_course_id}/courseware/'
u'a4dfec19cf9b4a6fb5b18be6ccd9cecc/417e47b2663a4f79b62dba20b21628c8/'.format(test_course_id=TEST_COURSE_ID),
'student_training':
u'courses/{test_course_id}/courseware/'
u'676026889c884ac1827688750871c825/5663e9b038434636977a4226d668fe02/'.format(test_course_id=TEST_COURSE_ID),
'file_upload':
u'courses/{test_course_id}/courseware/'
u'57a3f9d51d424f6cb922f0d69cba868d/bb563abc989340d8806920902f267ca3/'.format(test_course_id=TEST_COURSE_ID),
'full_workflow_staff_override':
u'courses/{test_course_id}/courseware/'
u'676026889c884ac1827688750871c825/181ea9ff144c4766be44eb8cb360e34f/'.format(test_course_id=TEST_COURSE_ID),
'full_workflow_staff_required':
u'courses/{test_course_id}/courseware/'
u'8d9584d242b44343bc270ea5ef04ab03/0b0dcc728abe45138c650732af178afb/'.format(test_course_id=TEST_COURSE_ID),
'feedback_only':
u'courses/{test_course_id}/courseware/'
u'8d9584d242b44343bc270ea5ef04ab03/a2875e0db1454d0b94728b9a7b28000b/'.format(test_course_id=TEST_COURSE_ID),
'multiple_ora':
u'courses/{test_course_id}/courseware/'
u'3b9aa6e06d8f48818ff6f364b5586f38/b79abd43bb11445486cd1874e6c71a64/'.format(test_course_id=TEST_COURSE_ID),
}
SUBMISSION = u"This is a test submission."
LATEX_SUBMISSION = u"[mathjaxinline]( \int_{0}^{1}xdx )[/mathjaxinline]"
OPTIONS_SELECTED = [1, 2]
STAFF_OVERRIDE_OPTIONS_SELECTED = [0, 1]
STAFF_OVERRIDE_SCORE = 1
STAFF_GRADE_EXISTS = "COMPLETE"
STAFF_AREA_SCORE = "Final grade: {} out of 8"
STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE = "The problem has not been completed."
EXPECTED_SCORE = 6
STUDENT_TRAINING_OPTIONS = [
[1, 2],
[0, 2]
]
TEST_PASSWORD = "test_password"
def setUp(self, problem_type, staff=False):
"""
Configure page objects to test Open Assessment.
Args:
problem_type (str): The type of problem being tested,
used to choose which part of the course to load.
staff (bool): If True, runs the test with a staff user (defaults to False).
"""
super(OpenAssessmentTest, self).setUp()
if PROFILING_ENABLED:
self.profiler = Profiler(use_signal=False)
self.profiler.start()
self.problem_loc = self.PROBLEM_LOCATIONS[problem_type]
self.auto_auth_page = AutoAuthPage(self.browser, course_id=self.TEST_COURSE_ID, staff=staff)
self.submission_page = SubmissionPage(self.browser, self.problem_loc)
self.self_asmnt_page = AssessmentPage('self-assessment', self.browser, self.problem_loc)
self.peer_asmnt_page = AssessmentPage('peer-assessment', self.browser, self.problem_loc)
self.student_training_page = AssessmentPage('student-training', self.browser, self.problem_loc)
self.staff_asmnt_page = AssessmentPage('staff-assessment', self.browser, self.problem_loc)
self.grade_page = GradePage(self.browser, self.problem_loc)
def log_to_file(self):
with open('{}-profile.log'.format(self.id()), 'w') as f:
f.write(self.profiler.output_text())
def tearDown(self):
if PROFILING_ENABLED:
self.profiler.stop()
self.log_to_file()
def login_user(self, learner, email):
"""
Logs in an already existing user.
Args:
learner (str): the username of the user.
email (str): email address of the user.
"""
auto_auth_page = AutoAuthPage(
self.browser, email=email, password=self.TEST_PASSWORD, username=learner,
course_id=self.TEST_COURSE_ID, staff=True
)
auto_auth_page.visit()
def do_self_assessment(self):
"""
Creates a user, submits a self assessment, verifies the grade, and returns the username of the
learner for which the self assessment was submitted.
"""
self.auto_auth_page.visit()
username, _ = self.auto_auth_page.get_username_and_email()
self.submission_page.visit().submit_response(self.SUBMISSION)
self.assertTrue(self.submission_page.has_submitted)
# Submit a self-assessment
self.submit_self_assessment(self.OPTIONS_SELECTED)
# Verify the grade
self.assertEqual(self.EXPECTED_SCORE, self.grade_page.wait_for_page().score)
return username
def submit_self_assessment(self, options=OPTIONS_SELECTED):
"""
Submit a self assessment for the currently logged in student. Do not verify grade.
Args:
options: the options to select for the self assessment
(will use OPTIONS_SELECTED if not specified)
"""
self.self_asmnt_page.wait_for_page().wait_for_response()
self.assertIn(self.SUBMISSION, self.self_asmnt_page.response_text)
self.self_asmnt_page.assess(options).wait_for_complete()
self.assertTrue(self.self_asmnt_page.is_complete)
def _verify_staff_grade_section(self, expected_status):
"""
Verifies the expected status and message text in the Staff Grade section
(as shown to the learner).
"""
self.staff_asmnt_page.wait_for_page()
self.assertEqual("Staff Grade", self.staff_asmnt_page.label)
self.staff_asmnt_page.verify_status_value(expected_status)
def do_training(self):
"""
Complete two training examples, satisfying the requirements.
"""
for example_num, options_selected in enumerate(self.STUDENT_TRAINING_OPTIONS):
if example_num > 0:
try:
self.student_training_page.wait_for_num_completed(example_num)
except BrokenPromise:
msg = "Did not complete at least {num} student training example(s).".format(num=example_num)
self.fail(msg)
self.student_training_page.wait_for_page().wait_for_response().assess(options_selected)
# Check that we've completed student training
try:
self.student_training_page.wait_for_complete()
except BrokenPromise:
self.fail("Student training was not marked complete.")
def do_peer_assessment(self, count=1, options=OPTIONS_SELECTED):
"""
Does the specified number of peer assessments.
Args:
count: the number of assessments that must be completed (defaults to 1)
options: the options to use (defaults to OPTIONS_SELECTED)
"""
self.peer_asmnt_page.visit()
for count_assessed in range(1, count + 1):
self.peer_asmnt_page.wait_for_page().wait_for_response().assess(options)
self.peer_asmnt_page.wait_for_num_completed(count_assessed)
def do_staff_override(self, username, final_score=STAFF_AREA_SCORE.format(STAFF_OVERRIDE_SCORE)):
"""
Complete a staff assessment (grade override).
Args:
username: the learner to grade
final_score: the expected final score as shown in the staff area
(defaults to the staff override score value)
"""
self.staff_area_page.visit()
self.staff_area_page.show_learner(username)
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.staff_assess(self.STAFF_OVERRIDE_OPTIONS_SELECTED)
self.staff_area_page.verify_learner_final_score(final_score)
def do_staff_assessment(self, number_to_assess=0, options_selected=OPTIONS_SELECTED, feedback=None):
"""
Use staff tools to assess available responses.
Args:
number_to_assess: the number of submissions to assess. If not provided (or 0),
will grade all available submissions.
options_selected (dict): the options to choose when grading. Defaults to OPTIONS_SELECTED.
feedback (function(feedback_type)): if feedback is set, it will be used as a function that takes one
parameter to generate a feedback string.
"""
self.staff_area_page.visit()
self.staff_area_page.click_staff_toolbar_button("staff-grading")
# Get the counts before checking out a submission for assessment.
start_numbers = self.staff_area_page.available_checked_out_numbers
# Check out a submission.
self.staff_area_page.expand_staff_grading_section()
# Checked out number should increase, ungraded decrease.
ungraded = start_numbers[0]-1
checked_out = start_numbers[1]+1
self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out))
assessed = 0
while number_to_assess == 0 or assessed < number_to_assess:
continue_after = False if number_to_assess-1 == assessed else ungraded > 0
if feedback:
self.staff_area_page.provide_criterion_feedback(feedback("criterion"))
self.staff_area_page.provide_overall_feedback(feedback("overall"))
if options_selected:
self.staff_area_page.staff_assess(options_selected, continue_after)
assessed += 1
if not continue_after:
self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out-1))
break
else:
ungraded -= 1
self.staff_area_page.verify_available_checked_out_numbers((ungraded, checked_out))
def refresh_page(self):
"""
Helper method that waits for "unsaved changes" warnings to clear before refreshing the page.
"""
EmptyPromise(
lambda: self.browser.execute_script("return window.onbeforeunload === null"),
"Unsubmitted changes exist on page."
).fulfill()
self.browser.refresh()
class SelfAssessmentTest(OpenAssessmentTest):
"""
Test the self-assessment flow.
"""
def setUp(self):
super(SelfAssessmentTest, self).setUp('self_only')
@retry()
@attr('acceptance')
def test_self_assessment(self):
# Submit a response
self.do_self_assessment()
@retry()
@attr('acceptance')
def test_latex(self):
self.auto_auth_page.visit()
self.submission_page.visit()
# 'Preview in Latex' button should be disabled at the page load
self.assertTrue(self.submission_page.latex_preview_button_is_disabled)
# Fill latex expression, & Verify if 'Preview in Latex is enabled'
self.submission_page.visit().fill_latex(self.LATEX_SUBMISSION)
self.assertFalse(self.submission_page.latex_preview_button_is_disabled)
# Click 'Preview in Latex' button & Verify if it was rendered
self.submission_page.preview_latex()
class StaffAssessmentTest(OpenAssessmentTest):
"""
Test the staff-assessment flow.
"""
def setUp(self):
super(StaffAssessmentTest, self).setUp('staff_only', staff=True)
@retry()
@attr('acceptance')
def test_staff_assessment(self):
# Set up user and navigate to submission page
self.auto_auth_page.visit()
username, _ = self.auto_auth_page.get_username_and_email()
self.submission_page.visit()
# Verify that staff grade step is shown initially
self._verify_staff_grade_section("NOT AVAILABLE")
# User submits a response
self.submission_page.submit_response(self.SUBMISSION)
self.assertTrue(self.submission_page.has_submitted)
# Verify staff grade section appears as expected
self._verify_staff_grade_section("NOT AVAILABLE")
message_title = self.staff_asmnt_page.open_step().message_title
self.assertEqual("Waiting for a Staff Grade", message_title)
# Perform staff assessment
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
self.do_staff_assessment()
# Verify staff grade section appears as expected
self.staff_asmnt_page.visit()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS)
self.assertEqual(self.EXPECTED_SCORE, self.grade_page.wait_for_page().score)
# Verify that staff scores can be overriden
self.do_staff_override(username)
self.refresh_page()
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
class PeerAssessmentTest(OpenAssessmentTest):
"""
Test the peer-assessment flow.
It's complicated to guarantee that a student will both give and
receive enough assessments to receive a grade, so we stop
once we've given one peer assessment.
"""
def setUp(self):
super(PeerAssessmentTest, self).setUp('peer_only')
@retry()
@attr('acceptance')
def test_peer_assessment(self):
# Create a submission for the first student, so there's
# at least one submission to assess.
self.auto_auth_page.visit()
self.submission_page.visit().submit_response(self.SUBMISSION)
# Create a submission for the second student
self.auto_auth_page.visit()
self.submission_page.visit().submit_response(self.SUBMISSION)
# Assess the submission (there should be at least one available)
self.do_peer_assessment()
class StaffOverrideTest(OpenAssessmentTest):
"""
Test setting a staff override on a problem which requires peer or self assessment.
This is used as a base class, as the problem type defined by subclasses must be known in setUp().
"""
def __init__(self, *args, **kwargs):
super(StaffOverrideTest, self).__init__(*args, **kwargs)
self.problem_type = None
def setUp(self):
if self.problem_type is None:
self.fail("Please define self.problem_type in a sub-class")
super(StaffOverrideTest, self).setUp(self.problem_type, staff=True)
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
@retry()
@attr('acceptance')
def _test_staff_override(self):
"""
Scenario: staff can override a learner's grade
Given I am viewing a new peer assessment problem as a learner
And if I create a response to the problem
Then there is no Staff Grade section present
And if a staff member creates a grade override
Then I can see my final grade, even though no peers have assessed me
"""
# Create a submission
self.auto_auth_page.visit()
username, _ = self.auto_auth_page.get_username_and_email()
self.submission_page.visit().submit_response(self.SUBMISSION)
# Staff Grade field should not be visible yet.
self.assertFalse(self.staff_asmnt_page.is_browser_on_page())
# Submit a staff override.
self.do_staff_override(username)
# Refresh the page so the learner sees the Staff Grade section.
self.refresh_page()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS)
# Verify the staff override grade
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
class StaffOverrideSelfTest(StaffOverrideTest):
"""
Subclass of StaffOverrideTest for a 'self_only' problem.
"""
def __init__(self, *args, **kwargs):
super(StaffOverrideSelfTest, self).__init__(*args, **kwargs)
self.problem_type = 'self_only'
@retry()
@attr('acceptance')
def test_staff_override(self):
super(StaffOverrideSelfTest, self)._test_staff_override()
class StaffOverridePeerTest(StaffOverrideTest):
"""
Subclass of StaffOverrideTest for a 'peer_only' problem.
"""
def __init__(self, *args, **kwargs):
super(StaffOverridePeerTest, self).__init__(*args, **kwargs)
self.problem_type = 'peer_only'
@retry()
@attr('acceptance')
def test_staff_override(self):
super(StaffOverridePeerTest, self)._test_staff_override()
class StudentTrainingTest(OpenAssessmentTest):
"""
Test student training (the "learning to assess" step).
"""
def setUp(self):
super(StudentTrainingTest, self).setUp('student_training')
@retry()
@attr('acceptance')
def test_student_training(self):
# Create a submission so we can get to student training
self.auto_auth_page.visit()
self.submission_page.visit().submit_response(self.SUBMISSION)
self.do_training()
@ddt.ddt
class StaffAreaTest(OpenAssessmentTest):
"""
Test the staff area.
This is testing a problem with "self assessment only".
"""
def setUp(self):
super(StaffAreaTest, self).setUp('self_only', staff=True)
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
@retry()
@attr('acceptance')
def test_staff_area_buttons(self):
"""
Scenario: the staff area buttons should behave correctly
Given I am viewing the staff area of an ORA problem
Then none of the buttons should be active
When I click the "Manage Individual Learners" button
Then only the "Manage Individual Learners" button should be active
When I click the "View Assignment Statistics" button
Then only the "View Assignment Statistics" button should be active
When I click the "Staff Info" button again
Then none of the buttons should be active
"""
self.auto_auth_page.visit()
self.staff_area_page.visit()
self.assertEqual(self.staff_area_page.selected_button_names, [])
self.staff_area_page.click_staff_toolbar_button("staff-tools")
self.assertEqual(self.staff_area_page.selected_button_names, ["MANAGE INDIVIDUAL LEARNERS"])
self.staff_area_page.click_staff_toolbar_button("staff-info")
self.assertEqual(self.staff_area_page.selected_button_names, ["VIEW ASSIGNMENT STATISTICS"])
self.staff_area_page.click_staff_toolbar_button("staff-info")
self.assertEqual(self.staff_area_page.selected_button_names, [])
@retry()
@attr('acceptance')
def test_staff_area_panel(self):
"""
Scenario: the staff area panels should be shown correctly
Given I am viewing the staff area of an ORA problem
Then none of the panels should be shown
When I click a staff button
Then only the related panel should be shown
When I click the close button in the panel
Then none of the panels should be shown
"""
self.auto_auth_page.visit()
self.staff_area_page.visit()
# Verify that there is no selected panel initially
self.assertEqual(self.staff_area_page.selected_button_names, [])
self.assertEqual(self.staff_area_page.visible_staff_panels, [])
for panel_name, button_label in [
("staff-tools", "MANAGE INDIVIDUAL LEARNERS"),
("staff-info", "VIEW ASSIGNMENT STATISTICS"),
]:
# Click on the button and verify that the panel has opened
self.staff_area_page.click_staff_toolbar_button(panel_name)
self.assertEqual(self.staff_area_page.selected_button_names, [button_label])
visible_panels = self.staff_area_page.visible_staff_panels
self.assertEqual(1, len(visible_panels))
self.assertIn(u'openassessment__{button_name}'.format(button_name=panel_name), visible_panels[0])
# Click 'Close' and verify that the panel has been closed
self.staff_area_page.click_staff_panel_close_button(panel_name)
self.assertEqual(self.staff_area_page.selected_button_names, [])
self.assertEqual(self.staff_area_page.visible_staff_panels, [])
@retry()
@attr('acceptance')
def test_student_info(self):
"""
Scenario: staff tools shows learner response information
Given I am viewing the staff area of an ORA problem
When I search for a learner in staff tools
And the learner has submitted a response to an ORA problem with self-assessment
And I've made a staff override assessment of the learner
Then I see the correct learner information sections
"""
username = self.do_self_assessment()
self.do_staff_override(username)
self.staff_area_page.visit()
# Click on staff tools and search for user
self.staff_area_page.show_learner(username)
self.assertEqual(
[u"Learner's Response", u"Learner's Self Assessment", u"Staff Assessment for This Learner",
u"Learner's Final Grade", u"Submit Assessment Grade Override", u"Remove Submission From Peer Grading"],
self.staff_area_page.learner_report_sections
)
self.assertNotIn('A response was not found for this learner', self.staff_area_page.learner_report_text)
@retry()
@attr('acceptance')
def test_student_info_no_submission(self):
"""
Scenario: staff tools indicates if no submission has been received for a given learner
Given I am viewing the staff area of an ORA problem
And I myself have submitted a response with self-assessment
When I search for a learner in staff tools
And the learner has not submitted a response to the ORA problem
Then I see a message indicating that the learner has not submitted a response
And there are no student information sections displayed
"""
self.auto_auth_page.visit()
# This is to catch a bug that existed when the user viewing staff tools had submitted an assessment,
# and had a grade stored (TNL-4060).
self.do_self_assessment()
self.staff_area_page.visit()
# Click on staff tools and search for user
self.staff_area_page.show_learner('no-submission-learner')
self.staff_area_page.verify_learner_report_text('A response was not found for this learner.')
@retry()
@attr('acceptance')
def test_staff_override(self):
"""
Scenario: staff can override a learner's grade
Given I am viewing the staff area of an ORA problem
When I search for a learner in staff tools
And the learner has submitted a response to an ORA problem with self-assessment
Then I can submit a staff override of the self-assessment
And I see the updated final score
"""
username = self.do_self_assessment()
self.staff_area_page.visit()
# Click on staff tools and search for user
self.staff_area_page.show_learner(username)
# Check the learner's current score.
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.EXPECTED_SCORE))
self.assertEquals(
['CRITERION', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Fair - 3 points', 'Good - 3 points'], self.staff_area_page.learner_final_score_table_values
)
# Do staff override and wait for final score to change.
self.staff_area_page.assess(self.STAFF_OVERRIDE_OPTIONS_SELECTED)
# Verify that the new student score is different from the original one.
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.STAFF_OVERRIDE_SCORE))
self.assertEquals(
['CRITERION', 'STAFF GRADE', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
[u'Poor - 0 points', u'Fair',
u'Fair - 1 point', u'Good'],
self.staff_area_page.learner_final_score_table_values
)
@retry()
@attr('acceptance')
def test_cancel_submission(self):
"""
Scenario: staff can cancel a learner's submission
Given I am viewing the staff area of an ORA problem
When I search for a learner in staff tools
And the learner has submitted a response to an ORA problem with self-assessment
Then I can cancel the learner's submission
And I see an updated message indicating that the submission has been canceled.
"""
username = self.do_self_assessment()
self.staff_area_page.visit()
# Click on staff tools and search for user
self.staff_area_page.show_learner(username)
# Check the learner's current score.
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.EXPECTED_SCORE))
# Cancel the student submission
self.staff_area_page.cancel_submission()
self.staff_area_page.verify_learner_final_score(
"The learner's submission has been removed from peer assessment. "
"The learner receives a grade of zero unless you delete the learner's state for the "
"problem to allow them to resubmit a response."
)
# Verify that the staff override and submission removal sections are now gone.
self.assertEqual(
[u"Learner's Response", u"Learner's Self Assessment", u"Learner's Final Grade"],
self.staff_area_page.learner_report_sections
)
# Verify that the Learner Response has been replaced with a message about the removal
self.staff_area_page.expand_learner_report_sections()
self.assertIn("Learner submission removed", self.staff_area_page.learner_response)
@retry()
@attr('acceptance')
def test_staff_grade_override_cancelled(self):
"""
Scenario: the staff grade section displays cancelled when the submission is cancelled
Given I have created a response and a self-assessment
And a staff member creates a grade override and then cancels my submission
Then when I refresh the page, the Staff Grade section is marked cancelled
And I have no final grade
"""
username = self.do_self_assessment()
# Submit a staff override
self.do_staff_override(username)
# And cancel the submission
self.staff_area_page.expand_learner_report_sections()
self.staff_area_page.cancel_submission()
# Refresh the page so the learner sees the Staff Grade section shows the submission has been cancelled.
self.refresh_page()
self._verify_staff_grade_section("CANCELLED")
self.assertIsNone(self.grade_page.wait_for_page().score)
class FileUploadTest(OpenAssessmentTest):
"""
Test file upload
"""
def setUp(self):
super(FileUploadTest, self).setUp('file_upload')
@retry()
@attr('acceptance')
def test_file_upload(self):
self.auto_auth_page.visit()
# trying to upload a unacceptable file
self.submission_page.visit()
# hide django debug tool, otherwise, it will cover the button on the right side,
# which will cause the button non-clickable and tests to fail
self.submission_page.hide_django_debug_tool()
self.submission_page.select_file(os.path.dirname(os.path.realpath(__file__)) + '/__init__.py')
self.assertTrue(self.submission_page.has_file_error)
# trying to upload a acceptable file
readme = os.path.dirname(os.path.realpath(__file__)) + '/README.rst'
self.submission_page.visit().select_file(readme)
self.assertFalse(self.submission_page.has_file_error)
self.assertTrue(self.submission_page.upload_file_button_is_disabled)
self.submission_page.add_file_description(0, 'file description 1')
self.assertTrue(self.submission_page.upload_file_button_is_enabled)
self.submission_page.upload_file()
self.assertTrue(self.submission_page.have_files_uploaded)
class FullWorkflowMixin(object):
"""
Mixin with helper methods and constants for testing a full workflow
(training, self assessment, peer assessment, staff override).
"""
PEER_ASSESSMENT = [0, 0]
STAFF_AREA_PEER_ASSESSMENT = ['Poor', u'', u'0', u'5', u'Poor', u'', u'0', u'3']
PEER_ASSESSMENT_SCORE = 0
PEER_ASSESSMENT_STAFF_AREA_SCORE = "Final grade: 0 out of 8"
SELF_ASSESSMENT = [2, 3]
STAFF_AREA_SELF_ASSESSMENT = ['Good', u'', u'5', u'5', u'Excellent', u'', u'3', u'3']
SUBMITTED_ASSESSMENT = [0, 3]
STAFF_AREA_SUBMITTED = ['Poor', u'', u'0', u'5', u'Excellent', u'', u'3', u'3']
def do_submission(self):
"""
Creates a user and submission.
Returns:
(str, str): the username and email of the newly created user
"""
auto_auth_page = AutoAuthPage(
self.browser, password=self.TEST_PASSWORD, course_id=self.TEST_COURSE_ID, staff=True
)
auto_auth_page.visit()
username_email = auto_auth_page.get_username_and_email()
self.submission_page.visit().submit_response(self.SUBMISSION)
EmptyPromise(self.submission_page.button(".step--student-training").is_focused(),
"Student training button should be focused")
return username_email
def do_submission_training_self_assessment(self):
"""
Creates a user and then does submission, training, and self assessment.
Returns:
(str, str): the username and password of the newly created user
"""
username, email = self.do_submission()
EmptyPromise(self.submission_page.button(".step--student-training").is_focused(),
"Student training button should be focused")
self.submission_page.confirm_feedback_text('Your Response Complete')
self.submission_page.confirm_feedback_text('Learn to Assess Responses In Progress (1 of 2)')
self.do_training()
EmptyPromise(self.submission_page.button(".step--self-assessment").is_focused(),
"Self assessment button should be focused")
self.submission_page.confirm_feedback_text('Learn to Assess Responses Complete')
self.submission_page.confirm_feedback_text('Assess Your Response In Progress')
self.submit_self_assessment(self.SELF_ASSESSMENT)
EmptyPromise(self.submission_page.button(".step--grade").is_focused(),
"Grade button should be focused")
self.submission_page.confirm_feedback_text('Assess Your Response Complete')
self.submission_page.confirm_feedback_text('Assess Peers In Progress (1 of 1)')
return username, email
def do_train_self_peer(self, peer_to_grade=True):
"""
Common functionality for executing training, self, and peer assessment steps.
Args:
peer_to_grade: boolean, defaults to True. Set to False to have learner complete their required steps,
but no peers to submit a grade for learner in return.
"""
# Create a learner with submission, training, and self assessment completed.
learner, learner_email = self.do_submission_training_self_assessment()
# Now create a second learner so that learner 1 has someone to assess.
# The second learner does all the steps as well (submission, training, self assessment, peer assessment).
self.do_submission_training_self_assessment()
if peer_to_grade:
self.do_peer_assessment(options=self.PEER_ASSESSMENT)
# Go back to the first learner to complete her workflow.
self.login_user(learner, learner_email)
# Learner 1 does peer assessment of learner 2 to complete workflow.
self.do_peer_assessment(options=self.SUBMITTED_ASSESSMENT)
# Continue grading by other students if necessary to ensure learner has a peer grade.
if peer_to_grade:
self.verify_submission_has_peer_grade(learner, learner_email)
return learner
def staff_assessment(self, peer_grades_me=True):
""" Do staff assessment workflow """
# Ensure grade is not present, since staff assessment has not been made
self.assertIsNone(self.grade_page.wait_for_page().score)
# Now do a staff assessment.
self.do_staff_assessment(options_selected=self.STAFF_OVERRIDE_OPTIONS_SELECTED)
# As an add-on, let's make sure that both submissions (the learner's, and the additional one created
# in do_train_self_peer() above) were assessed using staff-grading's "submit and keep going"
self.assertEqual(0, self.staff_area_page.available_checked_out_numbers[0])
# At this point, the learner sees the score (1).
self.refresh_page()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS)
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
if peer_grades_me:
self.verify_grade_entries(
[(u"STAFF GRADE - 0 POINTS", u"Poor", u"PEER MEDIAN GRADE", u"Poor", u"PEER 1", u"- POOR",
u"YOUR SELF ASSESSMENT", u"Good"),
(u"STAFF GRADE - 1 POINT", u"Fair", u"PEER MEDIAN GRADE", u"Poor", u"PEER 1", u"- POOR",
u"YOUR SELF ASSESSMENT", u"Excellent")]
)
else:
self.verify_grade_entries(
[(u"STAFF GRADE - 0 POINTS", u"Poor", u'PEER MEDIAN GRADE',
u'Waiting for peer reviews', u"YOUR SELF ASSESSMENT", u"Good"),
(u"STAFF GRADE - 1 POINT", u"Fair", u'PEER MEDIAN GRADE',
u'Waiting for peer reviews', u"YOUR SELF ASSESSMENT", u"Excellent")
]
)
def verify_staff_area_fields(self, username, peer_assessments, submitted_assessments, self_assessment):
"""
Verifies the expected entries in the staff area for peer assessments,
submitted assessments, and self assessment.
Args:
username (str): the username of the learner to check
peer_assessments: the expected fields in the peer assessment section
submitted_assessments: the expected fields in the submitted assessments section
self_assessment: the expected fields in the self assessment section
"""
self.staff_area_page.visit()
self.staff_area_page.show_learner(username)
self.staff_area_page.expand_learner_report_sections()
self.assertEqual(peer_assessments, self.staff_area_page.status_text('peer__assessments'))
self.assertEqual(submitted_assessments, self.staff_area_page.status_text('submitted__assessments'))
self.assertEqual(self_assessment, self.staff_area_page.status_text('self__assessments'))
def verify_submission_has_peer_grade(self, learner, learner_email, max_attempts=5):
"""
If learner does not now have a score, it means that "extra" submissions are in the system,
and more need to be scored. Create additional learners and have them grade until learner has
a grade (stopping after specified max attempts).
Args:
learner: the learner whose grade will be checked
max_attempts: the maximum number of times an additional peer grading should be done
"""
def peer_grade_exists():
self.staff_area_page.visit()
self.staff_area_page.show_learner(learner)
return "Peer Assessments for This Learner" in self.staff_area_page.learner_report_sections
count = 0
while not peer_grade_exists() and count < max_attempts:
count += 1
self.do_submission_training_self_assessment()
self.do_peer_assessment(options=self.PEER_ASSESSMENT)
self.login_user(learner, learner_email)
self.assertTrue(
peer_grade_exists(),
"Learner still not graded after {} additional attempts".format(max_attempts)
)
def verify_grade_entries(self, expected_entries):
"""
Verify the grade entries as shown in the "Your Grade" section.
Args:
expected_entries: array of expected entries, with each entry being an tuple
consisting of the data for a particular question. Note that order is important.
"""
for index, expected_entry in enumerate(expected_entries):
self.assertEqual(expected_entry, self.grade_page.grade_entry(index))
class MultipleOpenAssessmentMixin(FullWorkflowMixin):
"""
A Multiple ORA assessment mixin with helper methods and constants for testing a full workflow
(training, self assessment, peer assessment, staff override).
"""
def setup_vertical_index(self, vertical_index):
"""
Set the vertical index on the page.
Each problem has vertical index assigned and has a `vert-{vertical_index}` top level class.
Set up vertical index on the page so as to move to a different problem.
"""
self.submission_page.vertical_index = vertical_index
self.self_asmnt_page.vertical_index = vertical_index
self.peer_asmnt_page.vertical_index = vertical_index
self.student_training_page.vertical_index = vertical_index
self.staff_asmnt_page.vertical_index = vertical_index
self.grade_page.vertical_index = vertical_index
self.staff_area_page.vertical_index = vertical_index
def assess_component(self, vertical_index, peer_grades_me=True):
""" Assess the complete flow of an open assessment."""
self.setup_vertical_index(vertical_index)
self.do_train_self_peer(peer_grades_me)
self.staff_assessment(peer_grades_me)
class FullWorkflowOverrideTest(OpenAssessmentTest, FullWorkflowMixin):
"""
Tests of complete workflows, combining multiple required steps together.
"""
def setUp(self):
super(FullWorkflowOverrideTest, self).setUp("full_workflow_staff_override", staff=True)
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
@retry()
@attr('acceptance')
def test_staff_override_at_end(self):
"""
Scenario: complete workflow with staff override at the very end
Given that I have created a submission, completed training, and done a self assessment
And a second learner has also created a submission, training, and self assessment
Then I can assess a learner
And when another learner assesses me
Then I see my score based on the peer assessment
And when a staff member overrides the score
Then I see the staff override score
And all fields in the staff area tool are correct
"""
learner = self.do_train_self_peer()
# At this point, the learner sees the peer assessment score (0).
self.assertEqual(self.PEER_ASSESSMENT_SCORE, self.grade_page.wait_for_page().score)
self.verify_staff_area_fields(
learner, self.STAFF_AREA_PEER_ASSESSMENT, self.STAFF_AREA_SUBMITTED, self.STAFF_AREA_SELF_ASSESSMENT
)
self.staff_area_page.verify_learner_final_score(self.PEER_ASSESSMENT_STAFF_AREA_SCORE)
self.assertEquals(
['CRITERION', 'PEER MEDIAN GRADE', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Poor - 0 points\nPeer 1 - Poor', 'Good',
'Poor - 0 points\nPeer 1 - Poor', 'Excellent'],
self.staff_area_page.learner_final_score_table_values
)
self.verify_grade_entries(
[(u"PEER MEDIAN GRADE - 0 POINTS", u"Poor", u"PEER 1", u"- POOR", u"YOUR SELF ASSESSMENT", u"Good"),
(u"PEER MEDIAN GRADE - 0 POINTS", u"Poor", u"PEER 1", u"- POOR", u"YOUR SELF ASSESSMENT", u"Excellent")]
)
# Now do a staff override, changing the score (to 1).
self.do_staff_override(learner)
self.refresh_page()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS)
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
self.verify_staff_area_fields(
learner, self.STAFF_AREA_PEER_ASSESSMENT, self.STAFF_AREA_SUBMITTED, self.STAFF_AREA_SELF_ASSESSMENT
)
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.STAFF_OVERRIDE_SCORE))
self.assertEquals(
['CRITERION', 'STAFF GRADE', 'PEER MEDIAN GRADE', 'SELF ASSESSMENT GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
['Poor - 0 points', 'Peer 1 - Poor', 'Good',
'Fair - 1 point', 'Peer 1 - Poor', 'Excellent'],
self.staff_area_page.learner_final_score_table_values
)
self.verify_grade_entries(
[(u"STAFF GRADE - 0 POINTS", u"Poor", u"PEER MEDIAN GRADE", u"Poor",
u"PEER 1", u"- POOR", u"YOUR SELF ASSESSMENT", u"Good"),
(u"STAFF GRADE - 1 POINT", u"Fair", u"PEER MEDIAN GRADE",
u"Poor", u"PEER 1", u"- POOR", u"YOUR SELF ASSESSMENT", u"Excellent")
]
)
@retry()
@attr('acceptance')
def test_staff_override_at_beginning(self):
"""
Scenario: complete workflow with staff override at the very beginning
Given that I have created a submission
Then I see no score yet
And when a staff member creates a grade override
Then I see my staff override score
And all fields in the staff area tool are correct
"""
# Create only the initial submission before doing the staff override.
learner, learner_email = self.do_submission()
# Verify no grade present (and no staff grade section), no assessment information in staff area.
self.assertIsNone(self.grade_page.wait_for_page().score)
self.assertFalse(self.staff_asmnt_page.is_browser_on_page())
self.verify_staff_area_fields(learner, [], [], [])
self.staff_area_page.verify_learner_final_score(self.STAFF_OVERRIDE_STAFF_AREA_NOT_COMPLETE)
# Do staff override
self.do_staff_override(learner)
# Refresh the page so the learner sees the Staff Grade section.
self.refresh_page()
self._verify_staff_grade_section(self.STAFF_GRADE_EXISTS)
# Grade is now visible to the learner despite not having made any assessments
self.assertEqual(self.STAFF_OVERRIDE_SCORE, self.grade_page.wait_for_page().score)
self.verify_staff_area_fields(learner, [], [], [])
self.staff_area_page.verify_learner_final_score(self.STAFF_AREA_SCORE.format(self.STAFF_OVERRIDE_SCORE))
self.assertEquals(
['CRITERION', 'STAFF GRADE', 'PEER MEDIAN GRADE'],
self.staff_area_page.learner_final_score_table_headers
)
self.assertEquals(
[u'Poor - 0 points', u'Waiting for peer reviews',
u'Fair - 1 point', u'Waiting for peer reviews'],
self.staff_area_page.learner_final_score_table_values
)
self.verify_grade_entries(
[(u"STAFF GRADE - 0 POINTS", u"Poor", u'PEER MEDIAN GRADE', u'Waiting for peer reviews'),
(u"STAFF GRADE - 1 POINT", u"Fair", u'PEER MEDIAN GRADE', u'Waiting for peer reviews')
]
)
@ddt.ddt
class FullWorkflowRequiredTest(OpenAssessmentTest, FullWorkflowMixin):
"""
Tests of complete workflows, combining multiple required steps together.
"""
def setUp(self):
super(FullWorkflowRequiredTest, self).setUp("full_workflow_staff_required", staff=True)
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
@retry()
@attr('acceptance')
@ddt.data(True, False)
def test_train_self_peer_staff(self, peer_grades_me):
"""
Scenario: complete workflow that included staff required step.
Given that I have created a submission, completed training, and done a self assessment
And a second learner has also created a submission, training, and self assessment
Then I can assess a learner
And when another learner assesses me
And a staff member submits a score
Then I see the staff score
And all fields in the staff area tool are correct
"""
# Using ddt booleans to confirm behavior independent of whether I receive a peer score or not
self.do_train_self_peer(peer_grades_me)
# Do staff assessment step
self.staff_assessment(peer_grades_me)
@ddt.ddt
class FeedbackOnlyTest(OpenAssessmentTest, FullWorkflowMixin):
"""
Test for a problem that containing a criterion that only accepts feedback. Will make and verify self and staff
assessments.
"""
def setUp(self):
super(FeedbackOnlyTest, self).setUp("feedback_only", staff=True)
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
def generate_feedback(self, assessment_type, feedback_type):
return "{}: {} feedback".format(assessment_type, feedback_type)
def assess_feedback(self, self_or_peer=""):
if self_or_peer != "self" and self_or_peer != "peer":
raise AssertionError("assert_feedback only works for self or peer assessments")
page = self.self_asmnt_page if self_or_peer == "self" else self.peer_asmnt_page
page.wait_for_page()
page.submit_assessment()
@retry()
@attr('acceptance')
def test_feedback_only(self):
# Make submission
user, pwd = self.do_submission()
# Make self assessment
self.self_asmnt_page.visit()
self.self_asmnt_page.wait_for_page()
self.self_asmnt_page.provide_criterion_feedback(self.generate_feedback("self", "criterion"))
self.self_asmnt_page.provide_overall_feedback(self.generate_feedback("self", "overall"))
self.self_asmnt_page.assess([0])
self.self_asmnt_page.wait_for_complete()
self.assertTrue(self.self_asmnt_page.is_complete)
# Staff assess all available submissions
self.do_staff_assessment(
options_selected=[0], # Select the 0-th option (Yes) on the single scored criterion
feedback=lambda feedback_type: self.generate_feedback("staff", feedback_type)
)
# Verify student-viewable grade report
self.refresh_page()
self.grade_page.wait_for_page()
self.verify_grade_entries(
[(u'STAFF GRADE - 1 POINT', u'Yes', u'YOUR SELF ASSESSMENT', u'Yes')]
)
for i, assessment_type in enumerate(["staff", "self"]):
# Criterion feedback first
expected = self.generate_feedback(assessment_type, "criterion")
actual = self.grade_page.feedback_entry(1, i)
self.assertEqual(actual, expected) # Reported answers 3 and 4
# Then overall
expected = self.generate_feedback(assessment_type, "overall")
actual = self.grade_page.feedback_entry("feedback", i)
self.assertEqual(actual, expected) # Reported answers 5 and 6
# Verify that no reported answers other than the 6 we already verified are present
self.assertEqual(self.grade_page.total_reported_answers, 6)
# Verify that the feedback-only criterion has no score
self.assertEqual(self.grade_page.number_scored_criteria, 1)
# Verify feedback appears from all assessments in staff tools
self.staff_area_page.show_learner(user)
self.staff_area_page.expand_learner_report_sections()
self.assertEqual(
self.staff_area_page.learner_final_score_table_headers,
[u'CRITERION', u'STAFF GRADE', u'SELF ASSESSMENT GRADE']
)
self.assertEqual(
self.staff_area_page.learner_final_score_table_values,
[u'Yes - 1 point', u'Yes', u'Feedback Recorded', u'Feedback Recorded']
)
self.assertEqual(
self.staff_area_page.status_text('staff__assessments')[5],
self.generate_feedback("staff", "criterion")
)
self.assertEqual(
self.staff_area_page.overall_feedback('staff__assessments'),
self.generate_feedback("staff", "overall")
)
self.assertEqual(
self.staff_area_page.status_text('self__assessments')[5],
self.generate_feedback("self", "criterion")
)
self.assertEqual(
self.staff_area_page.overall_feedback('self__assessments'),
self.generate_feedback("self", "overall")
)
# Verify correct score is shown
self.staff_area_page.verify_learner_final_score("Final grade: 1 out of 1")
class MultipleOpenAssessmentTest(OpenAssessmentTest, MultipleOpenAssessmentMixin):
"""
Test the multiple peer-assessment flow.
"""
def setUp(self):
super(MultipleOpenAssessmentTest, self).setUp('multiple_ora')
# Staff area page is not present in OpenAssessmentTest base class, so we are adding it here.
self.staff_area_page = StaffAreaPage(self.browser, self.problem_loc)
@retry()
@attr('acceptance')
def test_multiple_ora_complete_flow(self):
"""
Scenario: complete workflow on a unit containing multiple ORA blocks.
"""
# Each problem has vertical index assigned and has a `vert-{vertical_index}` top level class.
# That also means that all pages are being differentiated by their vertical index number that is assigned to
# each problem type. We are passing vertical index number and setting it by `self.setup_vertical_index` method
# so as to move to a different problem.
# Assess first ORA problem, pass the vertical index number
self.assess_component(0)
# Assess second ORA problem, pass the vertical index number
self.assess_component(1)
if __name__ == "__main__":
# Configure the screenshot directory
if 'SCREENSHOT_DIR' not in os.environ:
tests_dir = os.path.dirname(__file__)
os.environ['SCREENSHOT_DIR'] = os.path.join(tests_dir, 'screenshots')
unittest.main()
|
Stanford-Online/edx-ora2
|
test/acceptance/tests.py
|
Python
|
agpl-3.0
| 52,571
|
[
"VisIt"
] |
65db6cf122d7a9d24c5888db895da0e27af397fce1dfa6ddf1659a0ac9f46de0
|
#
# This Juno application runs my site (http://brianreily.com).
#
# It has been modified for added comments and improved readability.
# If you are interested in the full project for this site (i.e. with
# templates, static file setup, etc.), see my Github repository for it:
# http://github.com/breily/brianreily.com
#
from juno import *
# Initialize the database, and set Juno to run the SCGI server
init({'db_location': 'brian.db', 'mode': 'scgi'})
# Represents software with READMEs/repositories
Project = model('Project', name = 'string',
about = 'string', # Short description
readme = 'string', # Location of the README
repo = 'string', # URL of Github repository
status = 'int', # Current/Completed/Old
# Replace the built in __repr__ with a more
# descriptive version of it.
__repr__ = lambda self: '<Project: %s>' %self.name)
# Represents shorter pieces of code (without READMEs/repositories)
Code = model('Code', name = 'string',
about = 'string', # Short description
gist_id = 'int', # ID of the Github Gist
__repr__ = lambda self: '<Code: %s>' %self.name)
# Redirect these two URLs to the home page; I didn't want separate list pages
assign(['/project/', '/code/'], '/')
# The home page - lists all Project and Code items
@get('/')
def home(web):
projects = find(Project).all() # Retrieve Project items
code = find(Code).all() # Retrieve Code items
# Render the main template, while splitting Projects based on status
template('main.html', {
'current_projects': [p for p in projects if p.status == 0],
'complete_projects': [p for p in projects if p.status == 1],
'incomplete_projects': [p for p in projects if p.status == 2],
'code': code
})
# Currently checks for Project existence and returns the README
@get('/project/:name/')
def project(web, name):
# Check if a project with a similar/same name exists
# This just uses SQLAlchemy's filter() function
proj = find(Project).filter(Project.name.like('%' + name + '%')).all()
# If not found, return a 404
if len(proj) == 0: return notfound("That Project (%s) cannot be found" %name)
# Return the README file, as plain text
yield_file(proj[0].readme, type='text/plain')
# Currently checks for Code existence, and renders the generic Code template
@get('/code/:name/')
def code(web, name):
# Same as in project function
c = find(Code).filter(Code.name.like('%' + name + '%')).all()
if len(c) == 0: return notfound("That Code (%s) cannot be found" %name)
# Render the generic code template (which has javascript to insert the Gist)
template('code.html', { 'c': c[0] })
# Run Juno
if __name__ == '__main__': run()
|
breily/juno
|
doc/examples/basic_site.py
|
Python
|
mit
| 3,013
|
[
"Brian"
] |
6d5edf4aae28b00dde4c735705ed6edd01ba55bf65a67a92f25fbf31a5fda7d7
|
# convert -scale 30% /tmp/sim/*.png /tmp/balls6.gif
from random import random
from collections import defaultdict
import numpy as np, datetime
import sys, numpy.linalg as lin
from mayavi import mlab
G = np.array([0.0, 0.0, -0.8])
m = 0.1
B = 8 # top
EPS = 0.1
BOUND_DAMPING = -0.6
class Simulation:
def __init__(self):
self.r = 0.2
self.rvec = np.ones(B) * self.r
self.dt = 0.1
self.balls = []
self.cor = 0.5
self.mmax = 2.0-self.r
self.mmin = 0.0+self.r
def init(self):
for b in range(B):
v = np.array([0.0, 0.0, 0.0])
p = np.array([np.random.rand(), np.random.rand(), np.random.rand()])
f = 5*np.array([np.random.rand(), np.random.rand(), np.random.rand()])
self.balls.append({'x':p, 'f':f, 'v': v, 'i': b})
def computeForces(self, i):
if (i==0):
for j,b in enumerate(self.balls):
b['f'] = b['f'] + (G * m)
else:
for b in self.balls:
b['f'] = G * m
def integrate(self):
for j,p in enumerate(self.balls):
p['v'] += self.dt*(p['f']/m)
p['x'] += self.dt*p['v']
if p['x'][0]-EPS < 0:
p['v'][0] *= BOUND_DAMPING
p['x'][0] = 0
if p['x'][0]+EPS > 2.0:
p['v'][0] *= BOUND_DAMPING
p['x'][0] = 2.0-EPS
if p['x'][1]-EPS < 0:
p['v'][1] *= BOUND_DAMPING
p['x'][1] = 0
if p['x'][1]+EPS > 2.0:
p['v'][1] *= BOUND_DAMPING
p['x'][1] = 2.0-EPS
if p['x'][2]-EPS < 0:
p['v'][2] *= BOUND_DAMPING
p['x'][2] = 0
if p['x'][2]+EPS > 2.0:
p['v'][2] *= BOUND_DAMPING
p['x'][2] = 2.0-EPS
vDone = {}
for j,b in enumerate(self.balls):
for other in self.balls:
if (other['i'] != b['i'] and b['i'] not in vDone and other['i'] not in vDone):
dist = lin.norm(other['x']-b['x'])
if (dist < (2*self.r)):
#print ('collision')
vrel = b['v']-other['v']
n = (other['x']-b['x']) / dist
vnorm = np.dot(vrel,n)*n
#print (vnorm)
b['v'] = b['v'] - vnorm
other['v'] = other['v'] + vnorm
vDone[b['i']] = 1
vDone[other['i']] = 1
def update(self,i):
self.computeForces(i)
self.integrate()
def display(self, i):
mlab.options.offscreen = True
ball_vect = [[b['x'][0],b['x'][1],b['x'][2]] for b in self.balls]
ball_vect = np.array(ball_vect)
fig = mlab.figure(figure=None, fgcolor=(0., 0., 0.), bgcolor=(1, 1, 1), engine=None)
color=(0.2, 0.4, 0.5)
mlab.points3d(ball_vect[:,0], ball_vect[:,1], ball_vect[:,2], self.rvec, color=color, colormap = 'gnuplot', scale_factor=1, figure=fig)
mlab.points3d(0, 0, 0, 0.1, color=(1,0,0), scale_factor=1.0)
BS = 2.0
mlab.plot3d([0.0,0.0],[0.0, 0.0],[0.0, BS], color=(0,0,0), tube_radius=None, figure=fig)
mlab.plot3d([0.0,BS],[0.0, 0.0],[0.0, 0.0], color=(1,0,0), tube_radius=None, figure=fig)
mlab.plot3d([0.0,0.0],[0.0, BS],[0.0, 0.0], color=(0,1,0), tube_radius=None, figure=fig)
mlab.plot3d([0.0,0.0],[0.0, BS],[BS, BS], color=(0,0,0), tube_radius=None, figure=fig)
mlab.plot3d([0.0,BS],[0.0,0.0],[BS,BS], color=(0,0,0), tube_radius=None, figure=fig)
mlab.plot3d([BS,BS],[0.0,BS],[BS,BS], color=(0,0,0), tube_radius=None, figure=fig)
mlab.plot3d([BS,0],[BS,BS],[BS,BS], color=(0,0,0), tube_radius=None, figure=fig)
mlab.plot3d([0,0],[BS,BS],[BS,0], color=(0,0,0), tube_radius=None, figure=fig)
mlab.plot3d([BS,BS],[0.0,0.0],[0.0,BS], color=(0,0,0), tube_radius=None, figure=fig)
mlab.plot3d([BS,BS],[0.0,BS],[0.0,0.0], color=(0,0,0), tube_radius=None, figure=fig)
mlab.plot3d([BS,0.0],[BS,BS],[0.0,0.0], color=(0,0,0), tube_radius=None, figure=fig)
mlab.plot3d([BS,BS],[BS,BS],[0.0,BS], color=(0,0,0), tube_radius=None, figure=fig)
mlab.view(azimuth=50, elevation=80, focalpoint=[1, 1, 1], distance=8.0, figure=fig)
mlab.savefig(filename='/tmp/sim/out-%02d.png' % i)
#exit()
if __name__ == '__main__':
s = Simulation()
s.init()
for i in range(40):
s.update(i)
s.display(i)
#exit()
|
burakbayramli/dersblog
|
compscieng/compscieng_bpp30sim/sim.py
|
Python
|
gpl-3.0
| 4,829
|
[
"Mayavi"
] |
493cbb2003fb1b5763aedce62e2e937448c00a4aa07a3615d193126c2bc265de
|
from graphql.core.language.ast import Field, Name, SelectionSet
from graphql.core.language.parser import parse
from graphql.core.language.visitor import visit, Visitor, REMOVE, BREAK
from fixtures import KITCHEN_SINK
def test_allows_for_editing_on_enter():
ast = parse('{ a, b, c { a, b, c } }', no_location=True)
class TestVisitor(Visitor):
def enter(self, node, *args):
if isinstance(node, Field) and node.name.value == 'b':
return REMOVE
edited_ast = visit(ast, TestVisitor())
assert ast == parse('{ a, b, c { a, b, c } }', no_location=True)
assert edited_ast == parse('{ a, c { a, c } }', no_location=True)
def test_allows_for_editing_on_leave():
ast = parse('{ a, b, c { a, b, c } }', no_location=True)
class TestVisitor(Visitor):
def leave(self, node, *args):
if isinstance(node, Field) and node.name.value == 'b':
return REMOVE
edited_ast = visit(ast, TestVisitor())
assert ast == parse('{ a, b, c { a, b, c } }', no_location=True)
assert edited_ast == parse('{ a, c { a, c } }', no_location=True)
def test_visits_edited_node():
added_field = Field(name=Name(value='__typename'))
ast = parse('{ a { x } }')
class TestVisitor(Visitor):
def __init__(self):
self.did_visit_added_field = False
def enter(self, node, *args):
if isinstance(node, Field) and node.name.value == 'a':
selection_set = node.selection_set
selections = []
if selection_set:
selections = selection_set.selections
new_selection_set = SelectionSet(selections=[added_field] + selections)
return Field(name=None, selection_set=new_selection_set)
if node is added_field:
self.did_visit_added_field = True
visitor = TestVisitor()
visit(ast, visitor)
assert visitor.did_visit_added_field
def test_allows_skipping_a_subtree():
visited = []
ast = parse('{ a, b { x }, c }')
class TestVisitor(Visitor):
def enter(self, node, *args):
visited.append(['enter', type(node).__name__, getattr(node, 'value', None)])
if isinstance(node, Field) and node.name.value == 'b':
return False
def leave(self, node, *args):
visited.append(['leave', type(node).__name__, getattr(node, 'value', None)])
visit(ast, TestVisitor())
assert visited == [
[ 'enter', 'Document', None ],
[ 'enter', 'OperationDefinition', None ],
[ 'enter', 'SelectionSet', None ],
[ 'enter', 'Field', None ],
[ 'enter', 'Name', 'a' ],
[ 'leave', 'Name', 'a' ],
[ 'leave', 'Field', None ],
[ 'enter', 'Field', None ],
[ 'enter', 'Field', None ],
[ 'enter', 'Name', 'c' ],
[ 'leave', 'Name', 'c' ],
[ 'leave', 'Field', None ],
[ 'leave', 'SelectionSet', None ],
[ 'leave', 'OperationDefinition', None ],
[ 'leave', 'Document', None ],
]
def test_allows_early_exit_while_visiting():
visited = []
ast = parse('{ a, b { x }, c }')
class TestVisitor(Visitor):
def enter(self, node, *args):
visited.append(['enter', type(node).__name__, getattr(node, 'value', None)])
if isinstance(node, Name) and node.value == 'x':
return BREAK
def leave(self, node, *args):
visited.append(['leave', type(node).__name__, getattr(node, 'value', None)])
visit(ast, TestVisitor())
assert visited == [
[ 'enter', 'Document', None ],
[ 'enter', 'OperationDefinition', None ],
[ 'enter', 'SelectionSet', None ],
[ 'enter', 'Field', None ],
[ 'enter', 'Name', 'a' ],
[ 'leave', 'Name', 'a' ],
[ 'leave', 'Field', None ],
[ 'enter', 'Field', None ],
[ 'enter', 'Name', 'b' ],
[ 'leave', 'Name', 'b' ],
[ 'enter', 'SelectionSet', None ],
[ 'enter', 'Field', None ],
[ 'enter', 'Name', 'x' ],
]
def test_allows_a_named_functions_visitor_api():
visited = []
ast = parse('{ a, b { x }, c }')
class TestVisitor(Visitor):
def enter_Name(self, node, *args):
visited.append(['enter', type(node).__name__, getattr(node, 'value', None)])
def enter_SelectionSet(self, node, *args):
visited.append(['enter', type(node).__name__, getattr(node, 'value', None)])
def leave_SelectionSet(self, node, *args):
visited.append(['leave', type(node).__name__, getattr(node, 'value', None)])
visit(ast, TestVisitor())
assert visited == [
[ 'enter', 'SelectionSet', None ],
[ 'enter', 'Name', 'a' ],
[ 'enter', 'Name', 'b' ],
[ 'enter', 'SelectionSet', None ],
[ 'enter', 'Name', 'x' ],
[ 'leave', 'SelectionSet', None ],
[ 'enter', 'Name', 'c' ],
[ 'leave', 'SelectionSet', None ],
]
def test_visits_kitchen_sink():
visited = []
ast = parse(KITCHEN_SINK)
class TestVisitor(Visitor):
def enter(self, node, key, parent, *args):
kind = parent and type(parent).__name__
if kind == 'list':
kind = None
visited.append(['enter', type(node).__name__, key, kind])
def leave(self, node, key, parent, *args):
kind = parent and type(parent).__name__
if kind == 'list':
kind = None
visited.append(['leave', type(node).__name__, key, kind])
visit(ast, TestVisitor())
assert visited == [
[ 'enter', 'Document', None, None ],
[ 'enter', 'OperationDefinition', 0, None ],
[ 'enter', 'Name', 'name', 'OperationDefinition' ],
[ 'leave', 'Name', 'name', 'OperationDefinition' ],
[ 'enter', 'VariableDefinition', 0, None ],
[ 'enter', 'Variable', 'variable', 'VariableDefinition' ],
[ 'enter', 'Name', 'name', 'Variable' ],
[ 'leave', 'Name', 'name', 'Variable' ],
[ 'leave', 'Variable', 'variable', 'VariableDefinition' ],
[ 'enter', 'NamedType', 'type', 'VariableDefinition' ],
[ 'enter', 'Name', 'name', 'NamedType' ],
[ 'leave', 'Name', 'name', 'NamedType' ],
[ 'leave', 'NamedType', 'type', 'VariableDefinition' ],
[ 'leave', 'VariableDefinition', 0, None ],
[ 'enter', 'VariableDefinition', 1, None ],
[ 'enter', 'Variable', 'variable', 'VariableDefinition' ],
[ 'enter', 'Name', 'name', 'Variable' ],
[ 'leave', 'Name', 'name', 'Variable' ],
[ 'leave', 'Variable', 'variable', 'VariableDefinition' ],
[ 'enter', 'NamedType', 'type', 'VariableDefinition' ],
[ 'enter', 'Name', 'name', 'NamedType' ],
[ 'leave', 'Name', 'name', 'NamedType' ],
[ 'leave', 'NamedType', 'type', 'VariableDefinition' ],
[ 'enter', 'EnumValue', 'default_value', 'VariableDefinition' ],
[ 'leave', 'EnumValue', 'default_value', 'VariableDefinition' ],
[ 'leave', 'VariableDefinition', 1, None ],
[ 'enter', 'SelectionSet', 'selection_set', 'OperationDefinition' ],
[ 'enter', 'Field', 0, None ],
[ 'enter', 'Name', 'alias', 'Field' ],
[ 'leave', 'Name', 'alias', 'Field' ],
[ 'enter', 'Name', 'name', 'Field' ],
[ 'leave', 'Name', 'name', 'Field' ],
[ 'enter', 'Argument', 0, None ],
[ 'enter', 'Name', 'name', 'Argument' ],
[ 'leave', 'Name', 'name', 'Argument' ],
[ 'enter', 'ListValue', 'value', 'Argument' ],
[ 'enter', 'IntValue', 0, None ],
[ 'leave', 'IntValue', 0, None ],
[ 'enter', 'IntValue', 1, None ],
[ 'leave', 'IntValue', 1, None ],
[ 'leave', 'ListValue', 'value', 'Argument' ],
[ 'leave', 'Argument', 0, None ],
[ 'enter', 'SelectionSet', 'selection_set', 'Field' ],
[ 'enter', 'Field', 0, None ],
[ 'enter', 'Name', 'name', 'Field' ],
[ 'leave', 'Name', 'name', 'Field' ],
[ 'leave', 'Field', 0, None ],
[ 'enter', 'InlineFragment', 1, None ],
[ 'enter', 'NamedType', 'type_condition', 'InlineFragment' ],
[ 'enter', 'Name', 'name', 'NamedType' ],
[ 'leave', 'Name', 'name', 'NamedType' ],
[ 'leave', 'NamedType', 'type_condition', 'InlineFragment' ],
[ 'enter', 'Directive', 0, None ],
[ 'enter', 'Name', 'name', 'Directive' ],
[ 'leave', 'Name', 'name', 'Directive' ],
[ 'leave', 'Directive', 0, None ],
[ 'enter', 'SelectionSet', 'selection_set', 'InlineFragment' ],
[ 'enter', 'Field', 0, None ],
[ 'enter', 'Name', 'name', 'Field' ],
[ 'leave', 'Name', 'name', 'Field' ],
[ 'enter', 'SelectionSet', 'selection_set', 'Field' ],
[ 'enter', 'Field', 0, None ],
[ 'enter', 'Name', 'name', 'Field' ],
[ 'leave', 'Name', 'name', 'Field' ],
[ 'leave', 'Field', 0, None ],
[ 'enter', 'Field', 1, None ],
[ 'enter', 'Name', 'alias', 'Field' ],
[ 'leave', 'Name', 'alias', 'Field' ],
[ 'enter', 'Name', 'name', 'Field' ],
[ 'leave', 'Name', 'name', 'Field' ],
[ 'enter', 'Argument', 0, None ],
[ 'enter', 'Name', 'name', 'Argument' ],
[ 'leave', 'Name', 'name', 'Argument' ],
[ 'enter', 'IntValue', 'value', 'Argument' ],
[ 'leave', 'IntValue', 'value', 'Argument' ],
[ 'leave', 'Argument', 0, None ],
[ 'enter', 'Argument', 1, None ],
[ 'enter', 'Name', 'name', 'Argument' ],
[ 'leave', 'Name', 'name', 'Argument' ],
[ 'enter', 'Variable', 'value', 'Argument' ],
[ 'enter', 'Name', 'name', 'Variable' ],
[ 'leave', 'Name', 'name', 'Variable' ],
[ 'leave', 'Variable', 'value', 'Argument' ],
[ 'leave', 'Argument', 1, None ],
[ 'enter', 'Directive', 0, None ],
[ 'enter', 'Name', 'name', 'Directive' ],
[ 'leave', 'Name', 'name', 'Directive' ],
[ 'enter', 'Argument', 0, None ],
[ 'enter', 'Name', 'name', 'Argument' ],
[ 'leave', 'Name', 'name', 'Argument' ],
[ 'enter', 'Variable', 'value', 'Argument' ],
[ 'enter', 'Name', 'name', 'Variable' ],
[ 'leave', 'Name', 'name', 'Variable' ],
[ 'leave', 'Variable', 'value', 'Argument' ],
[ 'leave', 'Argument', 0, None ],
[ 'leave', 'Directive', 0, None ],
[ 'enter', 'SelectionSet', 'selection_set', 'Field' ],
[ 'enter', 'Field', 0, None ],
[ 'enter', 'Name', 'name', 'Field' ],
[ 'leave', 'Name', 'name', 'Field' ],
[ 'leave', 'Field', 0, None ],
[ 'enter', 'FragmentSpread', 1, None ],
[ 'enter', 'Name', 'name', 'FragmentSpread' ],
[ 'leave', 'Name', 'name', 'FragmentSpread' ],
[ 'leave', 'FragmentSpread', 1, None ],
[ 'leave', 'SelectionSet', 'selection_set', 'Field' ],
[ 'leave', 'Field', 1, None ],
[ 'leave', 'SelectionSet', 'selection_set', 'Field' ],
[ 'leave', 'Field', 0, None ],
[ 'leave', 'SelectionSet', 'selection_set', 'InlineFragment' ],
[ 'leave', 'InlineFragment', 1, None ],
[ 'leave', 'SelectionSet', 'selection_set', 'Field' ],
[ 'leave', 'Field', 0, None ],
[ 'leave', 'SelectionSet', 'selection_set', 'OperationDefinition' ],
[ 'leave', 'OperationDefinition', 0, None ],
[ 'enter', 'OperationDefinition', 1, None ],
[ 'enter', 'Name', 'name', 'OperationDefinition' ],
[ 'leave', 'Name', 'name', 'OperationDefinition' ],
[ 'enter', 'SelectionSet', 'selection_set', 'OperationDefinition' ],
[ 'enter', 'Field', 0, None ],
[ 'enter', 'Name', 'name', 'Field' ],
[ 'leave', 'Name', 'name', 'Field' ],
[ 'enter', 'Argument', 0, None ],
[ 'enter', 'Name', 'name', 'Argument' ],
[ 'leave', 'Name', 'name', 'Argument' ],
[ 'enter', 'IntValue', 'value', 'Argument' ],
[ 'leave', 'IntValue', 'value', 'Argument' ],
[ 'leave', 'Argument', 0, None ],
[ 'enter', 'Directive', 0, None ],
[ 'enter', 'Name', 'name', 'Directive' ],
[ 'leave', 'Name', 'name', 'Directive' ],
[ 'leave', 'Directive', 0, None ],
[ 'enter', 'SelectionSet', 'selection_set', 'Field' ],
[ 'enter', 'Field', 0, None ],
[ 'enter', 'Name', 'name', 'Field' ],
[ 'leave', 'Name', 'name', 'Field' ],
[ 'enter', 'SelectionSet', 'selection_set', 'Field' ],
[ 'enter', 'Field', 0, None ],
[ 'enter', 'Name', 'name', 'Field' ],
[ 'leave', 'Name', 'name', 'Field' ],
[ 'leave', 'Field', 0, None ],
[ 'leave', 'SelectionSet', 'selection_set', 'Field' ],
[ 'leave', 'Field', 0, None ],
[ 'leave', 'SelectionSet', 'selection_set', 'Field' ],
[ 'leave', 'Field', 0, None ],
[ 'leave', 'SelectionSet', 'selection_set', 'OperationDefinition' ],
[ 'leave', 'OperationDefinition', 1, None ],
[ 'enter', 'FragmentDefinition', 2, None ],
[ 'enter', 'Name', 'name', 'FragmentDefinition' ],
[ 'leave', 'Name', 'name', 'FragmentDefinition' ],
[ 'enter', 'NamedType', 'type_condition', 'FragmentDefinition' ],
[ 'enter', 'Name', 'name', 'NamedType' ],
[ 'leave', 'Name', 'name', 'NamedType' ],
[ 'leave', 'NamedType', 'type_condition', 'FragmentDefinition' ],
[ 'enter', 'SelectionSet', 'selection_set', 'FragmentDefinition' ],
[ 'enter', 'Field', 0, None ],
[ 'enter', 'Name', 'name', 'Field' ],
[ 'leave', 'Name', 'name', 'Field' ],
[ 'enter', 'Argument', 0, None ],
[ 'enter', 'Name', 'name', 'Argument' ],
[ 'leave', 'Name', 'name', 'Argument' ],
[ 'enter', 'Variable', 'value', 'Argument' ],
[ 'enter', 'Name', 'name', 'Variable' ],
[ 'leave', 'Name', 'name', 'Variable' ],
[ 'leave', 'Variable', 'value', 'Argument' ],
[ 'leave', 'Argument', 0, None ],
[ 'enter', 'Argument', 1, None ],
[ 'enter', 'Name', 'name', 'Argument' ],
[ 'leave', 'Name', 'name', 'Argument' ],
[ 'enter', 'Variable', 'value', 'Argument' ],
[ 'enter', 'Name', 'name', 'Variable' ],
[ 'leave', 'Name', 'name', 'Variable' ],
[ 'leave', 'Variable', 'value', 'Argument' ],
[ 'leave', 'Argument', 1, None ],
[ 'enter', 'Argument', 2, None ],
[ 'enter', 'Name', 'name', 'Argument' ],
[ 'leave', 'Name', 'name', 'Argument' ],
[ 'enter', 'ObjectValue', 'value', 'Argument' ],
[ 'enter', 'ObjectField', 0, None ],
[ 'enter', 'Name', 'name', 'ObjectField' ],
[ 'leave', 'Name', 'name', 'ObjectField' ],
[ 'enter', 'StringValue', 'value', 'ObjectField' ],
[ 'leave', 'StringValue', 'value', 'ObjectField' ],
[ 'leave', 'ObjectField', 0, None ],
[ 'leave', 'ObjectValue', 'value', 'Argument' ],
[ 'leave', 'Argument', 2, None ],
[ 'leave', 'Field', 0, None ],
[ 'leave', 'SelectionSet', 'selection_set', 'FragmentDefinition' ],
[ 'leave', 'FragmentDefinition', 2, None ],
[ 'enter', 'OperationDefinition', 3, None ],
[ 'enter', 'SelectionSet', 'selection_set', 'OperationDefinition' ],
[ 'enter', 'Field', 0, None ],
[ 'enter', 'Name', 'name', 'Field' ],
[ 'leave', 'Name', 'name', 'Field' ],
[ 'enter', 'Argument', 0, None ],
[ 'enter', 'Name', 'name', 'Argument' ],
[ 'leave', 'Name', 'name', 'Argument' ],
[ 'enter', 'BooleanValue', 'value', 'Argument' ],
[ 'leave', 'BooleanValue', 'value', 'Argument' ],
[ 'leave', 'Argument', 0, None ],
[ 'enter', 'Argument', 1, None ],
[ 'enter', 'Name', 'name', 'Argument' ],
[ 'leave', 'Name', 'name', 'Argument' ],
[ 'enter', 'BooleanValue', 'value', 'Argument' ],
[ 'leave', 'BooleanValue', 'value', 'Argument' ],
[ 'leave', 'Argument', 1, None ],
[ 'leave', 'Field', 0, None ],
[ 'enter', 'Field', 1, None ],
[ 'enter', 'Name', 'name', 'Field' ],
[ 'leave', 'Name', 'name', 'Field' ],
[ 'leave', 'Field', 1, None ],
[ 'leave', 'SelectionSet', 'selection_set', 'OperationDefinition' ],
[ 'leave', 'OperationDefinition', 3, None ],
[ 'leave', 'Document', None, None ]
]
|
gabriel-laet/graphql-py
|
tests/core_language/test_visitor.py
|
Python
|
mit
| 16,608
|
[
"VisIt"
] |
77c3c4785344e2a217864ca9aac60195a2c95046f60407280d421b3f716b3cf5
|
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
from . import login_manager
from flask_sqlalchemy import SQLAlchemy
import enum
#TODO mixin created_at, modified_at
print('Initializing database')
db = SQLAlchemy()
# A registered user
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(80), unique=True)
password_hash = db.Column(db.String(128))
registration_date = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
# The events a user has created and has the right to modify
events_created = db.relationship('Event', back_populates='creator', lazy='joined')
# The events a user has subscribed to
subscriptions = db.relationship('Subscription', back_populates='user', lazy='joined')
@property
def subscription_ids(self): #poly-orm rewrite
return [ s.event.id for s in self.subscriptions ]
def __init__(self, username, password, email=None):
self.username = username
self.password = password
self.email = email
def __repr__(self):
return '<User {}: {}>'.format(self.id, self.username)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def visit(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def is_authenticated(self):
return True
def is_active(self):
return True
@property
def is_anonymous(self):
return False
def get_id(self):
return str(self.id)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# returns the timedelta as a string (currently english)
# if it is less than a minute, returns "now"
# if the timedelta is negative, returns "already passed"
def timedelta_to_string(rem):
def with_word(num, singular, multiple):
if num < 0:
return ''
elif num == 1:
return str(num) + ' ' + singular + ' '
else:
return str(num) + ' ' + multiple + ' '
if rem.seconds < 60:
return 'now'
if rem.days < 0:
return 'already passed'
(minutes, _) = divmod(rem.seconds, 60)
(hours, minutes) = divmod(minutes, 60)
s = with_word(rem.days, 'day', 'days')
s += with_word(hours, 'hour', 'hours')
s += with_word(minutes, 'minute', 'minutes')
return s.strip()
# An event (aka calendar entry)
class Event(db.Model):
__tablename__ = 'event'
id = db.Column(db.Integer, primary_key=True)
event_date = db.Column(db.DateTime())
name = db.Column(db.String(80))
description = db.Column(db.Text)
# The creator of this particular event
creator_id = db.Column(db.Integer, db.ForeignKey('user.id'))
creator = db.relationship('User', back_populates='events_created', lazy='joined')
# All the users which have subscribed to this event
subscriptions = db.relationship('Subscription', back_populates='event', lazy='joined')
def __init__(self, event_date, name, description, user_id):
self.event_date = event_date
self.name = name
self.description = description
self.creator_id = user_id
def __repr__(self):
return '<Event {}: {}>'.format(self.id, self.name, self.event_date)
@property
def remaining_time(self):
now = datetime.now()
return timedelta_to_string(self.event_date - now)
class Commitment(enum.Enum):
yes = 'yes'
no = 'no'
maybe = 'maybe'
# A subscription of a user to an event. M to N relation
class Subscription(db.Model):
__tablename__ = 'subscription'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', back_populates='subscriptions', lazy='joined')
event_id = db.Column(db.Integer, db.ForeignKey('event.id'))
event = db.relationship('Event', back_populates='subscriptions', lazy='joined')
# Whether the user actively takes part in the event or not
commitment = db.Column(db.Enum('Yes', 'No', 'Maybe'))
# Each user can add a comment (for example the role he is going to play in the event)
comment = db.Column(db.Text)
def __init__(self, user_id, event_id):
self.user_id = user_id
self.event_id = event_id
self.commitment = 'Yes'
def __repr__(self):
return '<Subscr {}: {} to {}>'.format(self.id, self.user.username, self.event.name)
|
alexd2580/evelyn
|
evelyn/models.py
|
Python
|
mit
| 4,893
|
[
"VisIt"
] |
fac638299b80ede284d5a10a0e6c6213ca76b208b5b780bd02d3fa1a07f2e99c
|
from ovito import *
from ovito.io import *
node = import_file("../../files/CFG/shear.void.120.cfg")
ptype_property = node.source.particle_type
assert(len(ptype_property.type_list) == 3)
assert(ptype_property.type_list[0].id == 1)
print(ptype_property.type_list[0].id)
print(ptype_property.type_list[0].color)
print(ptype_property.type_list[0].name)
print(ptype_property.type_list[0].radius)
print(ptype_property.array)
|
srinath-chakravarthy/ovito
|
tests/scripts/test_suite/particle_type.py
|
Python
|
gpl-3.0
| 423
|
[
"OVITO"
] |
888745454aa8548fa01408444bbc8f2909db388875e40f7b5e6fd4f2506e08cd
|
"""
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Types.py,v 1.19 2005/02/22 04:29:43 warnes Exp $'
from version import __version__
from __future__ import nested_scopes
import UserList
import base64
import cgi
import urllib
import copy
import re
import time
from types import *
# SOAPpy modules
from Errors import *
from NS import NS
from Utilities import encodeHexString, cleanDate
from Config import Config
###############################################################################
# Utility functions
###############################################################################
def isPrivate(name): return name[0]=='_'
def isPublic(name): return name[0]!='_'
###############################################################################
# Types and Wrappers
###############################################################################
class anyType:
_validURIs = (NS.XSD, NS.XSD2, NS.XSD3, NS.ENC)
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == anyType:
raise Error, "anyType can't be instantiated directly"
if type(name) in (ListType, TupleType):
self._ns, self._name = name
else:
self._ns = self._validURIs[0]
self._name = name
self._typed = typed
self._attrs = {}
self._cache = None
self._type = self._typeName()
self._data = self._checkValueSpace(data)
if attrs != None:
self._setAttrs(attrs)
def __str__(self):
if hasattr(self,'_name') and self._name:
return "<%s %s at %d>" % (self.__class__, self._name, id(self))
return "<%s at %d>" % (self.__class__, id(self))
__repr__ = __str__
def _checkValueSpace(self, data):
return data
def _marshalData(self):
return str(self._data)
def _marshalAttrs(self, ns_map, builder):
a = ''
for attr, value in self._attrs.items():
ns, n = builder.genns(ns_map, attr[0])
a += n + ' %s%s="%s"' % \
(ns, attr[1], cgi.escape(str(value), 1))
return a
def _fixAttr(self, attr):
if type(attr) in (StringType, UnicodeType):
attr = (None, attr)
elif type(attr) == ListType:
attr = tuple(attr)
elif type(attr) != TupleType:
raise AttributeError, "invalid attribute type"
if len(attr) != 2:
raise AttributeError, "invalid attribute length"
if type(attr[0]) not in (NoneType, StringType, UnicodeType):
raise AttributeError, "invalid attribute namespace URI type"
return attr
def _getAttr(self, attr):
attr = self._fixAttr(attr)
try:
return self._attrs[attr]
except:
return None
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if type(value) is StringType:
value = unicode(value)
self._attrs[attr] = value
def _setAttrs(self, attrs):
if type(attrs) in (ListType, TupleType):
for i in range(0, len(attrs), 2):
self._setAttr(attrs[i], attrs[i + 1])
return
if type(attrs) == DictType:
d = attrs
elif isinstance(attrs, anyType):
d = attrs._attrs
else:
raise AttributeError, "invalid attribute type"
for attr, value in d.items():
self._setAttr(attr, value)
def _setMustUnderstand(self, val):
self._setAttr((NS.ENV, "mustUnderstand"), val)
def _getMustUnderstand(self):
return self._getAttr((NS.ENV, "mustUnderstand"))
def _setActor(self, val):
self._setAttr((NS.ENV, "actor"), val)
def _getActor(self):
return self._getAttr((NS.ENV, "actor"))
def _typeName(self):
return self.__class__.__name__[:-4]
def _validNamespaceURI(self, URI, strict):
if not hasattr(self, '_typed') or not self._typed:
return None
if URI in self._validURIs:
return URI
if not strict:
return self._ns
raise AttributeError, \
"not a valid namespace for type %s" % self._type
class voidType(anyType):
pass
class stringType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type:" % self._type
return data
class untypedType(stringType):
def __init__(self, data = None, name = None, attrs = None):
stringType.__init__(self, data, name, 0, attrs)
class IDType(stringType): pass
class NCNameType(stringType): pass
class NameType(stringType): pass
class ENTITYType(stringType): pass
class IDREFType(stringType): pass
class languageType(stringType): pass
class NMTOKENType(stringType): pass
class QNameType(stringType): pass
class tokenType(anyType):
_validURIs = (NS.XSD2, NS.XSD3)
__invalidre = '[\n\t]|^ | $| '
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class normalizedStringType(anyType):
_validURIs = (NS.XSD3,)
__invalidre = '[\n\r\t]'
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
if type(self.__invalidre) == StringType:
self.__invalidre = re.compile(self.__invalidre)
if self.__invalidre.search(data):
raise ValueError, "invalid %s value" % self._type
return data
class CDATAType(normalizedStringType):
_validURIs = (NS.XSD2,)
class booleanType(anyType):
def __int__(self):
return self._data
__nonzero__ = __int__
def _marshalData(self):
return ['false', 'true'][self._data]
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if data in (0, '0', 'false', ''):
return 0
if data in (1, '1', 'true'):
return 1
raise ValueError, "invalid %s value" % self._type
class decimalType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType):
raise Error, "invalid %s value" % self._type
return data
class floatType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -3.4028234663852886E+38 or \
data > 3.4028234663852886E+38:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class doubleType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType, FloatType) or \
data < -1.7976931348623158E+308 or \
data > 1.7976931348623157E+308:
raise ValueError, "invalid %s value: %s" % (self._type, repr(data))
return data
def _marshalData(self):
return "%.18g" % self._data # More precision
class durationType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
try:
# A tuple or a scalar is OK, but make them into a list
if type(data) == TupleType:
data = list(data)
elif type(data) != ListType:
data = [data]
if len(data) > 6:
raise Exception, "too many values"
# Now check the types of all the components, and find
# the first nonzero element along the way.
f = -1
for i in range(len(data)):
if data[i] == None:
data[i] = 0
continue
if type(data[i]) not in \
(IntType, LongType, FloatType):
raise Exception, "element %d a bad type" % i
if data[i] and f == -1:
f = i
# If they're all 0, just use zero seconds.
if f == -1:
self._cache = 'PT0S'
return (0,) * 6
# Make sure only the last nonzero element has a decimal fraction
# and only the first element is negative.
d = -1
for i in range(f, len(data)):
if data[i]:
if d != -1:
raise Exception, \
"all except the last nonzero element must be " \
"integers"
if data[i] < 0 and i > f:
raise Exception, \
"only the first nonzero element can be negative"
elif data[i] != long(data[i]):
d = i
# Pad the list on the left if necessary.
if len(data) < 6:
n = 6 - len(data)
f += n
d += n
data = [0] * n + data
# Save index of the first nonzero element and the decimal
# element for _marshalData.
self.__firstnonzero = f
self.__decimal = d
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
t = 0
if d[self.__firstnonzero] < 0:
s = '-P'
else:
s = 'P'
t = 0
for i in range(self.__firstnonzero, len(d)):
if d[i]:
if i > 2 and not t:
s += 'T'
t = 1
if self.__decimal == i:
s += "%g" % abs(d[i])
else:
s += "%d" % long(abs(d[i]))
s += ['Y', 'M', 'D', 'H', 'M', 'S'][i]
self._cache = s
return self._cache
class timeDurationType(durationType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class dateTimeType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.time()
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 6:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
cleanDate(data)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dT%02d:%02d:%02d" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
f = d[5] - int(d[5])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class recurringInstantType(anyType):
_validURIs = (NS.XSD,)
def _checkValueSpace(self, data):
try:
if data == None:
data = list(time.gmtime(time.time())[:6])
if (type(data) in (IntType, LongType)):
data = list(time.gmtime(data)[:6])
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[:6])
data[5] += f
elif type(data) in (ListType, TupleType):
if len(data) < 1:
raise Exception, "not enough values"
if len(data) > 9:
raise Exception, "too many values"
data = list(data[:6])
if len(data) < 6:
data += [0] * (6 - len(data))
f = len(data)
for i in range(f):
if data[i] == None:
if f < i:
raise Exception, \
"only leftmost elements can be none"
else:
f = i
break
cleanDate(data, f)
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
e = list(d)
neg = ''
if not e[0]:
e[0] = '--'
else:
if e[0] < 0:
neg = '-'
e[0] = abs(e[0])
if e[0] < 100:
e[0] = '-' + "%02d" % e[0]
else:
e[0] = "%04d" % e[0]
for i in range(1, len(e)):
if e[i] == None or (i < 3 and e[i] == 0):
e[i] = '-'
else:
if e[i] < 0:
neg = '-'
e[i] = abs(e[i])
e[i] = "%02d" % e[i]
if d[5]:
f = abs(d[5] - int(d[5]))
if f:
e[5] += ("%g" % f)[1:]
s = "%s%s-%s-%sT%s:%s:%sZ" % ((neg,) + tuple(e))
self._cache = s
return self._cache
class timeInstantType(dateTimeType):
_validURIs = (NS.XSD, NS.XSD2, NS.ENC)
class timePeriodType(dateTimeType):
_validURIs = (NS.XSD2, NS.ENC)
class timeType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[3:6]
elif (type(data) == FloatType):
f = data - int(data)
data = list(time.gmtime(int(data))[3:6])
data[2] += f
elif type(data) in (IntType, LongType):
data = time.gmtime(data)[3:6]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[3:6]
elif len(data) > 3:
raise Exception, "too many values"
data = [None, None, None] + list(data)
if len(data) < 6:
data += [0] * (6 - len(data))
cleanDate(data, 3)
data = data[3:]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = ''
s = time.strftime("%H:%M:%S", (0, 0, 0) + d + (0, 0, -1))
f = d[2] - int(d[2])
if f != 0:
s += ("%g" % f)[1:]
s += 'Z'
self._cache = s
return self._cache
class dateType(anyType):
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:3]
elif len(data) > 3:
raise Exception, "too many values"
data = list(data)
if len(data) < 3:
data += [1, 1, 1][len(data):]
data += [0, 0, 0]
cleanDate(data)
data = data[:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:2]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[0:2]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data += [1, 0, 0, 0]
cleanDate(data)
data = data[:2]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04d-%02dZ" % ((abs(d[0]),) + d[1:])
if d[0] < 0:
s = '-' + s
self._cache = s
return self._cache
class gYearType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%04dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class centuryType(anyType):
_validURIs = (NS.XSD2, NS.ENC)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[0:1] / 100
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:1] / 100
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
d = self._data
s = "%02dZ" % abs(d)
if d < 0:
s = '-' + s
self._cache = s
return self._cache
class yearType(gYearType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:3]
elif type(data) in (IntType, LongType, FloatType):
data = time.gmtime(data)[1:3]
elif type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[0:2]
elif len(data) > 2:
raise Exception, "too many values"
data = list(data)
if len(data) < 2:
data += [1, 1][len(data):]
data = [0] + data + [0, 0, 0]
cleanDate(data, 1)
data = data[1:3]
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return tuple(data)
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d-%02dZ" % self._data
return self._cache
class recurringDateType(gMonthDayType):
_validURIs = (NS.XSD2, NS.ENC)
class gMonthType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[1:2]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[1:2]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 12:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "--%02d--Z" % self._data
return self._cache
class monthType(gMonthType):
_validURIs = (NS.XSD2, NS.ENC)
class gDayType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
try:
if data == None:
data = time.gmtime(time.time())[2:3]
elif type(data) in (IntType, LongType, FloatType):
data = [data]
if type(data) in (ListType, TupleType):
if len(data) == 9:
data = data[2:3]
elif len(data) < 1:
raise Exception, "too few values"
elif len(data) > 1:
raise Exception, "too many values"
if type(data[0]) == FloatType:
try: s = int(data[0])
except: s = long(data[0])
if s != data[0]:
raise Exception, "not integral"
data = [s]
elif type(data[0]) not in (IntType, LongType):
raise Exception, "bad type"
if data[0] < 1 or data[0] > 31:
raise Exception, "bad value"
else:
raise Exception, "invalid type"
except Exception, e:
raise ValueError, "invalid %s value - %s" % (self._type, e)
return data[0]
def _marshalData(self):
if self._cache == None:
self._cache = "---%02dZ" % self._data
return self._cache
class recurringDayType(gDayType):
_validURIs = (NS.XSD2, NS.ENC)
class hexBinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = encodeHexString(self._data)
return self._cache
class base64BinaryType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = base64.encodestring(self._data)
return self._cache
class base64Type(base64BinaryType):
_validURIs = (NS.ENC,)
class binaryType(anyType):
_validURIs = (NS.XSD, NS.ENC)
def __init__(self, data, name = None, typed = 1, encoding = 'base64',
attrs = None):
anyType.__init__(self, data, name, typed, attrs)
self._setAttr('encoding', encoding)
def _marshalData(self):
if self._cache == None:
if self._getAttr((None, 'encoding')) == 'base64':
self._cache = base64.encodestring(self._data)
else:
self._cache = encodeHexString(self._data)
return self._cache
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _setAttr(self, attr, value):
attr = self._fixAttr(attr)
if attr[1] == 'encoding':
if attr[0] != None or value not in ('base64', 'hex'):
raise AttributeError, "invalid encoding"
self._cache = None
anyType._setAttr(self, attr, value)
class anyURIType(anyType):
_validURIs = (NS.XSD3,)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (StringType, UnicodeType):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
if self._cache == None:
self._cache = urllib.quote(self._data)
return self._cache
class uriType(anyURIType):
_validURIs = (NS.XSD,)
class uriReferenceType(anyURIType):
_validURIs = (NS.XSD2,)
class NOTATIONType(anyType):
def __init__(self, data, name = None, typed = 1, attrs = None):
if self.__class__ == NOTATIONType:
raise Error, "a NOTATION can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
class ENTITIESType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) in (StringType, UnicodeType):
return (data,)
if type(data) not in (ListType, TupleType) or \
filter (lambda x: type(x) not in (StringType, UnicodeType), data):
raise AttributeError, "invalid %s type" % self._type
return data
def _marshalData(self):
return ' '.join(self._data)
class IDREFSType(ENTITIESType): pass
class NMTOKENSType(ENTITIESType): pass
class integerType(anyType):
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType):
raise ValueError, "invalid %s value" % self._type
return data
class nonPositiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data > 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Positive_IntegerType(nonPositiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-positive-integer'
class negativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data >= 0:
raise ValueError, "invalid %s value" % self._type
return data
class negative_IntegerType(negativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'negative-integer'
class longType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -9223372036854775808L or \
data > 9223372036854775807L:
raise ValueError, "invalid %s value" % self._type
return data
class intType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -2147483648L or \
data > 2147483647:
raise ValueError, "invalid %s value" % self._type
return data
class shortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -32768 or \
data > 32767:
raise ValueError, "invalid %s value" % self._type
return data
class byteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < -128 or \
data > 127:
raise ValueError, "invalid %s value" % self._type
return data
class nonNegativeIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data < 0:
raise ValueError, "invalid %s value" % self._type
return data
class non_Negative_IntegerType(nonNegativeIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'non-negative-integer'
class unsignedLongType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 18446744073709551615L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedIntType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 4294967295L:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedShortType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 65535:
raise ValueError, "invalid %s value" % self._type
return data
class unsignedByteType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or \
data < 0 or \
data > 255:
raise ValueError, "invalid %s value" % self._type
return data
class positiveIntegerType(anyType):
_validURIs = (NS.XSD2, NS.XSD3, NS.ENC)
def _checkValueSpace(self, data):
if data == None:
raise ValueError, "must supply initial %s value" % self._type
if type(data) not in (IntType, LongType) or data <= 0:
raise ValueError, "invalid %s value" % self._type
return data
class positive_IntegerType(positiveIntegerType):
_validURIs = (NS.XSD,)
def _typeName(self):
return 'positive-integer'
# Now compound types
class compoundType(anyType):
def __init__(self, data = None, name = None, typed = 1, attrs = None):
if self.__class__ == compoundType:
raise Error, "a compound can't be instantiated directly"
anyType.__init__(self, data, name, typed, attrs)
self._keyord = []
if type(data) == DictType:
self.__dict__.update(data)
def _aslist(self, item=None):
if item is not None:
return self.__dict__[self._keyord[item]]
else:
return map( lambda x: self.__dict__[x], self._keyord)
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.__dict__[item]
else:
retval = {}
def fun(x): retval[x.encode(encoding)] = self.__dict__[x]
if hasattr(self, '_keyord'):
map( fun, self._keyord)
else:
for name in dir(self):
if isPublic(name):
retval[name] = getattr(self,name)
return retval
def __getitem__(self, item):
if type(item) == IntType:
return self.__dict__[self._keyord[item]]
else:
return getattr(self, item)
def __len__(self):
return len(self._keyord)
def __nonzero__(self):
return 1
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs = None):
if name in self._keyord:
if type(self.__dict__[name]) != ListType:
self.__dict__[name] = [self.__dict__[name]]
self.__dict__[name].append(value)
else:
self.__dict__[name] = value
self._keyord.append(name)
def _placeItem(self, name, value, pos, subpos = 0, attrs = None):
if subpos == 0 and type(self.__dict__[name]) != ListType:
self.__dict__[name] = value
else:
self.__dict__[name][subpos] = value
self._keyord[pos] = name
def _getItemAsList(self, name, default = []):
try:
d = self.__dict__[name]
except:
return default
if type(d) == ListType:
return d
return [d]
def __str__(self):
return anyType.__str__(self) + ": " + str(self._asdict())
def __repr__(self):
return self.__str__()
class structType(compoundType):
pass
class headerType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Header", typed, attrs)
class bodyType(structType):
_validURIs = (NS.ENV,)
def __init__(self, data = None, typed = 1, attrs = None):
structType.__init__(self, data, "Body", typed, attrs)
class arrayType(UserList.UserList, compoundType):
def __init__(self, data = None, name = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None):
if data:
if type(data) not in (ListType, TupleType):
raise Error, "Data must be a sequence"
UserList.UserList.__init__(self, data)
compoundType.__init__(self, data, name, 0, attrs)
self._elemsname = elemsname or "item"
if data == None:
self._rank = rank
# According to 5.4.2.2 in the SOAP spec, each element in a
# sparse array must have a position. _posstate keeps track of
# whether we've seen a position or not. It's possible values
# are:
# -1 No elements have been added, so the state is indeterminate
# 0 An element without a position has been added, so no
# elements can have positions
# 1 An element with a position has been added, so all elements
# must have positions
self._posstate = -1
self._full = 0
if asize in ('', None):
asize = '0'
self._dims = map (lambda x: int(x), str(asize).split(','))
self._dims.reverse() # It's easier to work with this way
self._poss = [0] * len(self._dims) # This will end up
# reversed too
for i in range(len(self._dims)):
if self._dims[i] < 0 or \
self._dims[i] == 0 and len(self._dims) > 1:
raise TypeError, "invalid Array dimensions"
if offset > 0:
self._poss[i] = offset % self._dims[i]
offset = int(offset / self._dims[i])
# Don't break out of the loop if offset is 0 so we test all the
# dimensions for > 0.
if offset:
raise AttributeError, "invalid Array offset"
a = [None] * self._dims[0]
for i in range(1, len(self._dims)):
b = []
for j in range(self._dims[i]):
b.append(copy.deepcopy(a))
a = b
self.data = a
def _aslist(self, item=None):
if item is not None:
return self.data[int(item)]
else:
return self.data
def _asdict(self, item=None, encoding=Config.dict_encoding):
if item is not None:
if type(item) in (UnicodeType,StringType):
item = item.encode(encoding)
return self.data[int(item)]
else:
retval = {}
def fun(x): retval[str(x).encode(encoding)] = self.data[x]
map( fun, range(len(self.data)) )
return retval
def __getitem__(self, item):
try:
return self.data[int(item)]
except ValueError:
return getattr(self, item)
def __len__(self):
return len(self.data)
def __nonzero__(self):
return 1
def __str__(self):
return anyType.__str__(self) + ": " + str(self._aslist())
def _keys(self):
return filter(lambda x: x[0] != '_', self.__dict__.keys())
def _addItem(self, name, value, attrs):
if self._full:
raise ValueError, "Array is full"
pos = attrs.get((NS.ENC, 'position'))
if pos != None:
if self._posstate == 0:
raise AttributeError, \
"all elements in a sparse Array must have a " \
"position attribute"
self._posstate = 1
try:
if pos[0] == '[' and pos[-1] == ']':
pos = map (lambda x: int(x), pos[1:-1].split(','))
pos.reverse()
if len(pos) == 1:
pos = pos[0]
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if pos:
raise Exception
elif len(pos) != len(self._dims):
raise Exception
else:
for i in range(len(self._dims)):
if pos[i] >= self._dims[i]:
raise Exception
curpos = pos
else:
raise Exception
except:
raise AttributeError, \
"invalid Array element position %s" % str(pos)
else:
if self._posstate == 1:
raise AttributeError, \
"only elements in a sparse Array may have a " \
"position attribute"
self._posstate = 0
curpos = self._poss
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
if pos == None:
self._poss[0] += 1
for i in range(len(self._dims) - 1):
if self._poss[i] < self._dims[i]:
break
self._poss[i] = 0
self._poss[i + 1] += 1
if self._dims[-1] and self._poss[-1] >= self._dims[-1]:
#self._full = 1
#FIXME: why is this occuring?
pass
def _placeItem(self, name, value, pos, subpos, attrs = None):
curpos = [0] * len(self._dims)
for i in range(len(self._dims)):
if self._dims[i] == 0:
curpos[0] = pos
break
curpos[i] = pos % self._dims[i]
pos = int(pos / self._dims[i])
if pos == 0:
break
if self._dims[i] != 0 and pos:
raise Error, "array index out of range"
a = self.data
for i in range(len(self._dims) - 1, 0, -1):
a = a[curpos[i]]
if curpos[0] >= len(a):
a += [None] * (len(a) - curpos[0] + 1)
a[curpos[0]] = value
class typedArrayType(arrayType):
def __init__(self, data = None, name = None, typed = None, attrs = None,
offset = 0, rank = None, asize = 0, elemsname = None, complexType = 0):
arrayType.__init__(self, data, name, attrs, offset, rank, asize,
elemsname)
self._typed = 1
self._type = typed
self._complexType = complexType
class faultType(structType, Error):
def __init__(self, faultcode = "", faultstring = "", detail = None):
self.faultcode = faultcode
self.faultstring = faultstring
if detail != None:
self.detail = detail
structType.__init__(self, None, 0)
def _setDetail(self, detail = None):
if detail != None:
self.detail = detail
else:
try: del self.detail
except AttributeError: pass
def __repr__(self):
if getattr(self, 'detail', None) != None:
return "<Fault %s: %s: %s>" % (self.faultcode,
self.faultstring,
self.detail)
else:
return "<Fault %s: %s>" % (self.faultcode, self.faultstring)
__str__ = __repr__
def __call__(self):
return (self.faultcode, self.faultstring, self.detail)
class SOAPException(Exception):
def __init__(self, code="", string="", detail=None):
self.value = ("SOAPpy SOAP Exception", code, string, detail)
self.code = code
self.string = string
self.detail = detail
def __str__(self):
return repr(self.value)
class RequiredHeaderMismatch(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodNotFound(Exception):
def __init__(self, value):
(val, detail) = value.split(":")
self.value = val
self.detail = detail
def __str__(self):
return repr(self.value, self.detail)
class AuthorizationFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class MethodFailed(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
#######
# Convert complex SOAPpy objects to native python equivalents
#######
def simplify(object, level=0):
"""
Convert the SOAPpy objects and thier contents to simple python types.
This function recursively converts the passed 'container' object,
and all public subobjects. (Private subobjects have names that
start with '_'.)
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level > 10:
return object
if isinstance( object, faultType ):
if object.faultstring == "Required Header Misunderstood":
raise RequiredHeaderMismatch(object.detail)
elif object.faultstring == "Method Not Found":
raise MethodNotFound(object.detail)
elif object.faultstring == "Authorization Failed":
raise AuthorizationFailed(object.detail)
elif object.faultstring == "Method Failed":
raise MethodFailed(object.detail)
else:
se = SOAPException(object.faultcode, object.faultstring,
object.detail)
raise se
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
data[k] = simplify(data[k], level=level+1)
return data
elif isinstance( object, compoundType ) or isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
data[k] = simplify(data[k], level=level+1)
return data
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
return object
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
else:
return object
def simplify_contents(object, level=0):
"""
Convert the contents of SOAPpy objects to simple python types.
This function recursively converts the sub-objects contained in a
'container' object to simple python types.
Conversions:
- faultType --> raise python exception
- arrayType --> array
- compoundType --> dictionary
"""
if level>10: return object
if isinstance( object, faultType ):
for k in object._keys():
if isPublic(k):
setattr(object, k, simplify(object[k], level=level+1))
raise object
elif isinstance( object, arrayType ):
data = object._aslist()
for k in range(len(data)):
object[k] = simplify(data[k], level=level+1)
elif isinstance(object, structType):
data = object._asdict()
for k in data.keys():
if isPublic(k):
setattr(object, k, simplify(data[k], level=level+1))
elif isinstance( object, compoundType ) :
data = object._asdict()
for k in data.keys():
if isPublic(k):
object[k] = simplify(data[k], level=level+1)
elif type(object)==DictType:
for k in object.keys():
if isPublic(k):
object[k] = simplify(object[k])
elif type(object)==list:
for k in range(len(object)):
object[k] = simplify(object[k])
return object
|
audaciouscode/Books-Mac-OS-X
|
Export Plugins/WhatsOnMyBookShelf Exporter/SOAPpy/Types.py
|
Python
|
mit
| 51,871
|
[
"Brian"
] |
068dd60d06b64699e19ef4c5554e91eb2188d9dbf91492a6aee95e29cfa324e7
|
"""Analyze python import statements."""
from __future__ import absolute_import, print_function
import ast
import os
import uuid
from lib.util import (
display,
ApplicationError,
)
VIRTUAL_PACKAGES = set([
'ansible.module_utils.six',
])
def get_python_module_utils_imports(compile_targets):
"""Return a dictionary of module_utils names mapped to sets of python file paths.
:type compile_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
module_utils -= virtual_utils
imports_by_target_path = {}
for target in compile_targets:
imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
def recurse_import(import_name, depth=0, seen=None):
"""Recursively expand module_utils imports from module_utils files.
:type import_name: str
:type depth: int
:type seen: set[str] | None
:rtype set[str]
"""
display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
if seen is None:
seen = set([import_name])
results = set([import_name])
# virtual packages depend on the modules they contain instead of the reverse
if import_name in VIRTUAL_PACKAGES:
for sub_import in sorted(virtual_utils):
if sub_import.startswith('%s.' % import_name):
if sub_import in seen:
continue
seen.add(sub_import)
matches = sorted(recurse_import(sub_import, depth + 1, seen))
for result in matches:
results.add(result)
import_path = os.path.join('lib/', '%s.py' % import_name.replace('.', '/'))
if import_path not in imports_by_target_path:
import_path = os.path.join('lib/', import_name.replace('.', '/'), '__init__.py')
if import_path not in imports_by_target_path:
raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
# process imports in reverse so the deepest imports come first
for name in sorted(imports_by_target_path[import_path], reverse=True):
if name in virtual_utils:
continue
if name in seen:
continue
seen.add(name)
matches = sorted(recurse_import(name, depth + 1, seen))
for result in matches:
results.add(result)
return results
for module_util in module_utils:
# recurse over module_utils imports while excluding self
module_util_imports = recurse_import(module_util)
module_util_imports.remove(module_util)
# add recursive imports to all path entries which import this module_util
for target_path in imports_by_target_path:
if module_util in imports_by_target_path[target_path]:
for module_util_import in sorted(module_util_imports):
if module_util_import not in imports_by_target_path[target_path]:
display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
imports_by_target_path[target_path].add(module_util_import)
imports = dict([(module_util, set()) for module_util in module_utils | virtual_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
# for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
for virtual_util in virtual_utils:
parent_package = '.'.join(virtual_util.split('.')[:-1])
imports[virtual_util] = imports[parent_package]
display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
for module_util in sorted(imports):
if not len(imports[module_util]):
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
module_utils = []
base_path = 'lib/ansible/module_utils'
for root, _, file_names in os.walk(base_path):
for file_name in file_names:
path = os.path.join(root, file_name)
name, ext = os.path.splitext(file_name)
if path == 'lib/ansible/module_utils/__init__.py':
continue
if ext != '.py':
continue
if name == '__init__':
module_util = root
else:
module_util = os.path.join(root, name)
module_utils.append(module_util[4:].replace('/', '.'))
return set(module_utils)
def extract_python_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
with open(path, 'r') as module_fd:
code = module_fd.read()
try:
tree = ast.parse(code)
except SyntaxError as ex:
# Setting the full path to the filename results in only the filename being given for str(ex).
# As a work-around, set the filename to a UUID and replace it in the final string output with the actual path.
ex.filename = str(uuid.uuid4())
error = str(ex).replace(ex.filename, path)
raise ApplicationError('AST parse error: %s' % error)
finder = ModuleUtilFinder(path, module_utils)
finder.visit(tree)
return finder.imports
class ModuleUtilFinder(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
"""
self.path = path
self.module_utils = module_utils
self.imports = set()
# implicitly import parent package
if path.endswith('/__init__.py'):
path = os.path.split(path)[0]
if path.startswith('lib/ansible/module_utils/'):
package = os.path.split(path)[0].replace('/', '.')[4:]
if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
self.add_import(package, 0)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node):
"""
:type node: ast.Import
"""
self.generic_visit(node)
for alias in node.names:
if alias.name.startswith('ansible.module_utils.'):
# import ansible.module_utils.MODULE[.MODULE]
self.add_import(alias.name, node.lineno)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node):
"""
:type node: ast.ImportFrom
"""
self.generic_visit(node)
if not node.module:
return
if node.module == 'ansible.module_utils' or node.module.startswith('ansible.module_utils.'):
for alias in node.names:
# from ansible.module_utils import MODULE[, MODULE]
# from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
self.add_import('%s.%s' % (node.module, alias.name), node.lineno)
def add_import(self, name, line_number):
"""
:type name: str
:type line_number: int
"""
import_name = name
while len(name) > len('ansible.module_utils.'):
if name in self.module_utils:
if name not in self.imports:
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
return # duplicate imports are ignored
name = '.'.join(name.split('.')[:-1])
if self.path.startswith('test/'):
return # invalid imports in tests are ignored
raise ApplicationError('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
|
bjolivot/ansible
|
test/runner/lib/import_analysis.py
|
Python
|
gpl-3.0
| 8,577
|
[
"VisIt"
] |
c39ccdc30d64b0e8514a55371d798ce90c45852ec9cbb395ca73955605de1f68
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
This module has methods for parsing names and versions of packages from URLs.
The idea is to allow package creators to supply nothing more than the
download location of the package, and figure out version and name information
from there.
**Example:** when spack is given the following URL:
https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.12/src/hdf-4.2.12.tar.gz
It can figure out that the package name is ``hdf``, and that it is at version
``4.2.12``. This is useful for making the creation of packages simple: a user
just supplies a URL and skeleton code is generated automatically.
Spack can also figure out that it can most likely download 4.2.6 at this URL:
https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.6/src/hdf-4.2.6.tar.gz
This is useful if a user asks for a package at a particular version number;
spack doesn't need anyone to tell it where to get the tarball even though
it's never been told about that version before.
"""
import os
import re
from six import StringIO
from six.moves.urllib.parse import urlsplit, urlunsplit
import llnl.util.tty as tty
from llnl.util.tty.color import colorize
import spack.error
import spack.util.compression as comp
from spack.version import Version
#
# Note: We call the input to most of these functions a "path" but the functions
# work on paths and URLs. There's not a good word for both of these, but
# "path" seemed like the most generic term.
#
def find_list_url(url):
"""Finds a good list URL for the supplied URL.
By default, returns the dirname of the archive path.
Provides special treatment for the following websites, which have a
unique list URL different from the dirname of the download URL:
========= =======================================================
GitHub https://github.com/<repo>/<name>/releases
GitLab https://gitlab.\*/<repo>/<name>/tags
BitBucket https://bitbucket.org/<repo>/<name>/downloads/?tab=tags
CRAN https://\*.r-project.org/src/contrib/Archive/<name>
========= =======================================================
Parameters:
url (str): The download URL for the package
Returns:
str: The list URL for the package
"""
url_types = [
# GitHub
# e.g. https://github.com/llnl/callpath/archive/v1.0.1.tar.gz
(r'(.*github\.com/[^/]+/[^/]+)',
lambda m: m.group(1) + '/releases'),
# GitLab
# e.g. https://gitlab.dkrz.de/k202009/libaec/uploads/631e85bcf877c2dcaca9b2e6d6526339/libaec-1.0.0.tar.gz
(r'(.*gitlab[^/]+/[^/]+/[^/]+)',
lambda m: m.group(1) + '/tags'),
# BitBucket
# e.g. https://bitbucket.org/eigen/eigen/get/3.3.3.tar.bz2
(r'(.*bitbucket.org/[^/]+/[^/]+)',
lambda m: m.group(1) + '/downloads/?tab=tags'),
# CRAN
# e.g. https://cran.r-project.org/src/contrib/Rcpp_0.12.9.tar.gz
# e.g. https://cloud.r-project.org/src/contrib/rgl_0.98.1.tar.gz
(r'(.*\.r-project\.org/src/contrib)/([^_]+)',
lambda m: m.group(1) + '/Archive/' + m.group(2)),
]
for pattern, fun in url_types:
match = re.search(pattern, url)
if match:
return fun(match)
else:
return os.path.dirname(url)
def strip_query_and_fragment(path):
try:
components = urlsplit(path)
stripped = components[:3] + (None, None)
query, frag = components[3:5]
suffix = ''
if query:
suffix += '?' + query
if frag:
suffix += '#' + frag
return (urlunsplit(stripped), suffix)
except ValueError:
tty.debug("Got error parsing path %s" % path)
return (path, '') # Ignore URL parse errors here
def strip_version_suffixes(path):
"""Some tarballs contain extraneous information after the version:
* ``bowtie2-2.2.5-source``
* ``libevent-2.0.21-stable``
* ``cuda_8.0.44_linux.run``
These strings are not part of the version number and should be ignored.
This function strips those suffixes off and returns the remaining string.
The goal is that the version is always the last thing in ``path``:
* ``bowtie2-2.2.5``
* ``libevent-2.0.21``
* ``cuda_8.0.44``
Args:
path (str): The filename or URL for the package
Returns:
str: The ``path`` with any extraneous suffixes removed
"""
# NOTE: This could be done with complicated regexes in parse_version_offset
# NOTE: The problem is that we would have to add these regexes to the end
# NOTE: of every single version regex. Easier to just strip them off
# NOTE: permanently
suffix_regexes = [
# Download type
'[Ii]nstall',
'all',
'src(_0)?',
'[Ss]ources?',
'file',
'full',
'single',
'public',
'with[a-zA-Z_-]+',
'bin',
'binary',
'run',
'[Uu]niversal',
'jar',
'complete',
'dynamic',
'oss',
'gem',
'tar',
'sh',
# Download version
'release',
'stable',
'[Ff]inal',
'rel',
'orig',
'dist',
'\+',
# License
'gpl',
# Arch
# Needs to come before and after OS, appears in both orders
'ia32',
'intel',
'amd64',
'x64',
'x86_64',
'x86',
'i[36]86',
'ppc64(le)?',
'armv?(7l|6l|64)',
# OS
'[Ll]inux(_64)?',
'[Uu]ni?x',
'[Ss]un[Oo][Ss]',
'[Mm]ac[Oo][Ss][Xx]?',
'[Oo][Ss][Xx]',
'[Dd]arwin(64)?',
'[Aa]pple',
'[Ww]indows',
'[Ww]in(64|32)?',
'[Cc]ygwin(64|32)?',
'[Mm]ingw',
# Arch
# Needs to come before and after OS, appears in both orders
'ia32',
'intel',
'amd64',
'x64',
'x86_64',
'x86',
'i[36]86',
'ppc64(le)?',
'armv?(7l|6l|64)?',
# PyPI
'[._-]py[23].*\.whl',
'[._-]cp[23].*\.whl',
'[._-]win.*\.exe',
]
for regex in suffix_regexes:
# Remove the suffix from the end of the path
# This may be done multiple times
path = re.sub(r'[._-]?' + regex + '$', '', path)
return path
def strip_name_suffixes(path, version):
"""Most tarballs contain a package name followed by a version number.
However, some also contain extraneous information in-between the name
and version:
* ``rgb-1.0.6``
* ``converge_install_2.3.16``
* ``jpegsrc.v9b``
These strings are not part of the package name and should be ignored.
This function strips the version number and any extraneous suffixes
off and returns the remaining string. The goal is that the name is
always the last thing in ``path``:
* ``rgb``
* ``converge``
* ``jpeg``
Args:
path (str): The filename or URL for the package
version (str): The version detected for this URL
Returns:
str: The ``path`` with any extraneous suffixes removed
"""
# NOTE: This could be done with complicated regexes in parse_name_offset
# NOTE: The problem is that we would have to add these regexes to every
# NOTE: single name regex. Easier to just strip them off permanently
suffix_regexes = [
# Strip off the version and anything after it
# name-ver
# name_ver
# name.ver
r'[._-]v?' + str(version) + '.*',
# namever
str(version) + '.*',
# Download type
'install',
'src',
'(open)?[Ss]ources?',
'[._-]archive',
'[._-]std',
# Download version
'release',
'snapshot',
'distrib',
# VCS
'0\+bzr',
# License
'gpl',
]
for regex in suffix_regexes:
# Remove the suffix from the end of the path
# This may be done multiple times
path = re.sub('[._-]?' + regex + '$', '', path)
return path
def split_url_extension(path):
"""Some URLs have a query string, e.g.:
1. https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7.tgz?raw=true
2. http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin.tar.gz
3. https://gitlab.kitware.com/vtk/vtk/repository/archive.tar.bz2?ref=v7.0.0
In (1), the query string needs to be stripped to get at the
extension, but in (2) & (3), the filename is IN a single final query
argument.
This strips the URL into three pieces: ``prefix``, ``ext``, and ``suffix``.
The suffix contains anything that was stripped off the URL to
get at the file extension. In (1), it will be ``'?raw=true'``, but
in (2), it will be empty. In (3) the suffix is a parameter that follows
after the file extension, e.g.:
1. ``('https://github.com/losalamos/CLAMR/blob/packages/PowerParser_v2.0.7', '.tgz', '?raw=true')``
2. ``('http://www.apache.org/dyn/closer.cgi?path=/cassandra/1.2.0/apache-cassandra-1.2.0-rc2-bin', '.tar.gz', None)``
3. ``('https://gitlab.kitware.com/vtk/vtk/repository/archive', '.tar.bz2', '?ref=v7.0.0')``
"""
prefix, ext, suffix = path, '', ''
# Strip off sourceforge download suffix.
# e.g. https://sourceforge.net/projects/glew/files/glew/2.0.0/glew-2.0.0.tgz/download
match = re.search(r'(.*(?:sourceforge\.net|sf\.net)/.*)(/download)$', path)
if match:
prefix, suffix = match.groups()
ext = comp.extension(prefix)
if ext is not None:
prefix = comp.strip_extension(prefix)
else:
prefix, suf = strip_query_and_fragment(prefix)
ext = comp.extension(prefix)
prefix = comp.strip_extension(prefix)
suffix = suf + suffix
if ext is None:
ext = ''
return prefix, ext, suffix
def determine_url_file_extension(path):
"""This returns the type of archive a URL refers to. This is
sometimes confusing because of URLs like:
(1) https://github.com/petdance/ack/tarball/1.93_02
Where the URL doesn't actually contain the filename. We need
to know what type it is so that we can appropriately name files
in mirrors.
"""
match = re.search(r'github.com/.+/(zip|tar)ball/', path)
if match:
if match.group(1) == 'zip':
return 'zip'
elif match.group(1) == 'tar':
return 'tar.gz'
prefix, ext, suffix = split_url_extension(path)
return ext
def parse_version_offset(path):
"""Try to extract a version string from a filename or URL.
Args:
path (str): The filename or URL for the package
Returns:
tuple of (Version, int, int, int, str): A tuple containing:
version of the package,
first index of version,
length of version string,
the index of the matching regex
the matching regex
Raises:
UndetectableVersionError: If the URL does not match any regexes
"""
original_path = path
# path: The prefix of the URL, everything before the ext and suffix
# ext: The file extension
# suffix: Any kind of query string that begins with a '?'
path, ext, suffix = split_url_extension(path)
# stem: Everything from path after the final '/'
original_stem = os.path.basename(path)
# Try to strip off anything after the version number
stem = strip_version_suffixes(original_stem)
# Assumptions:
#
# 1. version always comes after the name
# 2. separators include '-', '_', and '.'
# 3. names can contain A-Z, a-z, 0-9, '+', separators
# 4. versions can contain A-Z, a-z, 0-9, separators
# 5. versions always start with a digit
# 6. versions are often prefixed by a 'v' character
# 7. separators are most reliable to determine name/version boundaries
# List of the following format:
#
# [
# (regex, string),
# ...
# ]
#
# The first regex that matches string will be used to determine
# the version of the package. Thefore, hyperspecific regexes should
# come first while generic, catch-all regexes should come last.
# With that said, regular expressions are slow, so if possible, put
# ones that only catch one or two URLs at the bottom.
version_regexes = [
# 1st Pass: Simplest case
# Assume name contains no digits and version contains no letters
# e.g. libpng-1.6.27
(r'^[a-zA-Z+._-]+[._-]v?(\d[\d._-]*)$', stem),
# 2nd Pass: Version only
# Assume version contains no letters
# ver
# e.g. 3.2.7, 7.0.2-7, v3.3.0, v1_6_3
(r'^v?(\d[\d._-]*)$', stem),
# 3rd Pass: No separator characters are used
# Assume name contains no digits
# namever
# e.g. turbolinux702, nauty26r7
(r'^[a-zA-Z+]*(\d[\da-zA-Z]*)$', stem),
# 4th Pass: A single separator character is used
# Assume name contains no digits
# name-name-ver-ver
# e.g. panda-2016-03-07, gts-snapshot-121130, cdd-061a
(r'^[a-zA-Z+-]*(\d[\da-zA-Z-]*)$', stem),
# name_name_ver_ver
# e.g. tinyxml_2_6_2, boost_1_55_0, tbb2017_20161128, v1_6_3
(r'^[a-zA-Z+_]*(\d[\da-zA-Z_]*)$', stem),
# name.name.ver.ver
# e.g. prank.source.150803, jpegsrc.v9b, atlas3.11.34, geant4.10.01.p03
(r'^[a-zA-Z+.]*(\d[\da-zA-Z.]*)$', stem),
# 5th Pass: Two separator characters are used
# Name may contain digits, version may contain letters
# name-name-ver.ver
# e.g. m4-1.4.17, gmp-6.0.0a, launchmon-v1.0.2
(r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z.]*)$', stem),
# name-name-ver_ver
# e.g. icu4c-57_1
(r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z_]*)$', stem),
# name_name_ver.ver
# e.g. superlu_dist_4.1, pexsi_v0.9.0
(r'^[a-zA-Z\d+_]+_v?(\d[\da-zA-Z.]*)$', stem),
# name_name.ver.ver
# e.g. fer_source.v696
(r'^[a-zA-Z\d+_]+\.v?(\d[\da-zA-Z.]*)$', stem),
# name-name-ver.ver-ver.ver
# e.g. sowing-1.1.23-p1, bib2xhtml-v3.0-15-gf506, 4.6.3-alpha04
(r'^(?:[a-zA-Z\d+-]+-)?v?(\d[\da-zA-Z.-]*)$', stem),
# namever.ver-ver.ver
# e.g. go1.4-bootstrap-20161024
(r'^[a-zA-Z+]+v?(\d[\da-zA-Z.-]*)$', stem),
# 6th Pass: All three separator characters are used
# Name may contain digits, version may contain letters
# name_name-ver.ver
# e.g. the_silver_searcher-0.32.0, sphinx_rtd_theme-0.1.10a0
(r'^[a-zA-Z\d+_]+-v?(\d[\da-zA-Z.]*)$', stem),
# name.name_ver.ver-ver.ver
# e.g. TH.data_1.0-8, XML_3.98-1.4
(r'^[a-zA-Z\d+.]+_v?(\d[\da-zA-Z.-]*)$', stem),
# name-name-ver.ver_ver.ver
# e.g. pypar-2.1.5_108
(r'^[a-zA-Z\d+-]+-v?(\d[\da-zA-Z._]*)$', stem),
# name.name_name-ver.ver
# e.g. tap.py-1.6, backports.ssl_match_hostname-3.5.0.1
(r'^[a-zA-Z\d+._]+-v?(\d[\da-zA-Z.]*)$', stem),
# name-namever.ver_ver.ver
# e.g. STAR-CCM+11.06.010_02
(r'^[a-zA-Z+-]+(\d[\da-zA-Z._]*)$', stem),
# 7th Pass: Specific VCS
# bazaar
# e.g. libvterm-0+bzr681
(r'bzr(\d[\da-zA-Z._-]*)$', stem),
# 8th Pass: Version in path
# github.com/repo/name/releases/download/vver/name
# e.g. https://github.com/nextflow-io/nextflow/releases/download/v0.20.1/nextflow
(r'github\.com/[^/]+/[^/]+/releases/download/[a-zA-Z+._-]*v?(\d[\da-zA-Z._-]*)/', path), # noqa
# 9th Pass: Query strings
# e.g. http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0
(r'\?ref=[a-zA-Z+._-]*v?(\d[\da-zA-Z._-]*)$', suffix),
# e.g. http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1
(r'\?version=v?(\d[\da-zA-Z._-]*)$', suffix),
# e.g. http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz
(r'\?filename=[a-zA-Z\d+-]+-v?(\d[\da-zA-Z.]*)$', stem),
# e.g. http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz
(r'\?package=[a-zA-Z\d+-]+&get=[a-zA-Z\d+-]+-v?(\d[\da-zA-Z.]*)$', stem), # noqa
]
for i, version_regex in enumerate(version_regexes):
regex, match_string = version_regex
match = re.search(regex, match_string)
if match and match.group(1) is not None:
version = match.group(1)
start = match.start(1)
# If we matched from the stem or suffix, we need to add offset
offset = 0
if match_string is stem:
offset = len(path) - len(original_stem)
elif match_string is suffix:
offset = len(path)
if ext:
offset += len(ext) + 1 # .tar.gz is converted to tar.gz
start += offset
return version, start, len(version), i, regex
raise UndetectableVersionError(original_path)
def parse_version(path):
"""Try to extract a version string from a filename or URL.
Args:
path (str): The filename or URL for the package
Returns:
spack.version.Version: The version of the package
Raises:
UndetectableVersionError: If the URL does not match any regexes
"""
version, start, length, i, regex = parse_version_offset(path)
return Version(version)
def parse_name_offset(path, v=None):
"""Try to determine the name of a package from its filename or URL.
Args:
path (str): The filename or URL for the package
v (str): The version of the package
Returns:
tuple of (str, int, int, int, str): A tuple containing:
name of the package,
first index of name,
length of name,
the index of the matching regex
the matching regex
Raises:
UndetectableNameError: If the URL does not match any regexes
"""
original_path = path
# We really need to know the version of the package
# This helps us prevent collisions between the name and version
if v is None:
try:
v = parse_version(path)
except UndetectableVersionError:
# Not all URLs contain a version. We still want to be able
# to determine a name if possible.
v = 'unknown'
# path: The prefix of the URL, everything before the ext and suffix
# ext: The file extension
# suffix: Any kind of query string that begins with a '?'
path, ext, suffix = split_url_extension(path)
# stem: Everything from path after the final '/'
original_stem = os.path.basename(path)
# Try to strip off anything after the package name
stem = strip_name_suffixes(original_stem, v)
# List of the following format:
#
# [
# (regex, string),
# ...
# ]
#
# The first regex that matches string will be used to determine
# the name of the package. Thefore, hyperspecific regexes should
# come first while generic, catch-all regexes should come last.
# With that said, regular expressions are slow, so if possible, put
# ones that only catch one or two URLs at the bottom.
name_regexes = [
# 1st Pass: Common repositories
# GitHub: github.com/repo/name/
# e.g. https://github.com/nco/nco/archive/4.6.2.tar.gz
(r'github\.com/[^/]+/([^/]+)', path),
# GitLab: gitlab.*/repo/name/
# e.g. http://gitlab.cosma.dur.ac.uk/swift/swiftsim/repository/archive.tar.gz?ref=v0.3.0
(r'gitlab[^/]+/[^/]+/([^/]+)', path),
# Bitbucket: bitbucket.org/repo/name/
# e.g. https://bitbucket.org/glotzer/hoomd-blue/get/v1.3.3.tar.bz2
(r'bitbucket\.org/[^/]+/([^/]+)', path),
# PyPI: pypi.(python.org|io)/packages/source/first-letter/name/
# e.g. https://pypi.python.org/packages/source/m/mpmath/mpmath-all-0.19.tar.gz
# e.g. https://pypi.io/packages/source/b/backports.ssl_match_hostname/backports.ssl_match_hostname-3.5.0.1.tar.gz
(r'pypi\.(?:python\.org|io)/packages/source/[A-Za-z\d]/([^/]+)', path),
# 2nd Pass: Query strings
# ?filename=name-ver.ver
# e.g. http://slepc.upv.es/download/download.php?filename=slepc-3.6.2.tar.gz
(r'\?filename=([A-Za-z\d+-]+)$', stem),
# ?package=name
# e.g. http://wwwpub.zih.tu-dresden.de/%7Emlieber/dcount/dcount.php?package=otf&get=OTF-1.12.5salmon.tar.gz
(r'\?package=([A-Za-z\d+-]+)', stem),
# download.php
# e.g. http://apps.fz-juelich.de/jsc/sionlib/download.php?version=1.7.1
(r'([^/]+)/download.php$', path),
# 3rd Pass: Name followed by version in archive
(r'^([A-Za-z\d+\._-]+)$', stem),
]
for i, name_regex in enumerate(name_regexes):
regex, match_string = name_regex
match = re.search(regex, match_string)
if match:
name = match.group(1)
start = match.start(1)
# If we matched from the stem or suffix, we need to add offset
offset = 0
if match_string is stem:
offset = len(path) - len(original_stem)
elif match_string is suffix:
offset = len(path)
if ext:
offset += len(ext) + 1 # .tar.gz is converted to tar.gz
start += offset
return name, start, len(name), i, regex
raise UndetectableNameError(original_path)
def parse_name(path, ver=None):
"""Try to determine the name of a package from its filename or URL.
Args:
path (str): The filename or URL for the package
ver (str): The version of the package
Returns:
str: The name of the package
Raises:
UndetectableNameError: If the URL does not match any regexes
"""
name, start, length, i, regex = parse_name_offset(path, ver)
return name
def parse_name_and_version(path):
"""Try to determine the name of a package and extract its version
from its filename or URL.
Args:
path (str): The filename or URL for the package
Returns:
tuple of (str, Version)A tuple containing:
The name of the package
The version of the package
Raises:
UndetectableVersionError: If the URL does not match any regexes
UndetectableNameError: If the URL does not match any regexes
"""
ver = parse_version(path)
name = parse_name(path, ver)
return (name, ver)
def insensitize(string):
"""Change upper and lowercase letters to be case insensitive in
the provided string. e.g., 'a' becomes '[Aa]', 'B' becomes
'[bB]', etc. Use for building regexes."""
def to_ins(match):
char = match.group(1)
return '[%s%s]' % (char.lower(), char.upper())
return re.sub(r'([a-zA-Z])', to_ins, string)
def cumsum(elts, init=0, fn=lambda x: x):
"""Return cumulative sum of result of fn on each element in elts."""
sums = []
s = init
for i, e in enumerate(elts):
sums.append(s)
s += fn(e)
return sums
def find_all(substring, string):
"""Returns a list containing the indices of
every occurrence of substring in string."""
occurrences = []
index = 0
while index < len(string):
index = string.find(substring, index)
if index == -1:
break
occurrences.append(index)
index += len(substring)
return occurrences
def substitution_offsets(path):
"""This returns offsets for substituting versions and names in the
provided path. It is a helper for :func:`substitute_version`.
"""
# Get name and version offsets
try:
ver, vs, vl, vi, vregex = parse_version_offset(path)
name, ns, nl, ni, nregex = parse_name_offset(path, ver)
except UndetectableNameError:
return (None, -1, -1, (), ver, vs, vl, (vs,))
except UndetectableVersionError:
try:
name, ns, nl, ni, nregex = parse_name_offset(path)
return (name, ns, nl, (ns,), None, -1, -1, ())
except UndetectableNameError:
return (None, -1, -1, (), None, -1, -1, ())
# Find the index of every occurrence of name and ver in path
name_offsets = find_all(name, path)
ver_offsets = find_all(ver, path)
return (name, ns, nl, name_offsets,
ver, vs, vl, ver_offsets)
def wildcard_version(path):
"""Find the version in the supplied path, and return a regular expression
that will match this path with any version in its place.
"""
# Get version so we can replace it with a wildcard
version = parse_version(path)
# Split path by versions
vparts = path.split(str(version))
# Replace each version with a generic capture group to find versions
# and escape everything else so it's not interpreted as a regex
result = '(\d.*)'.join(re.escape(vp) for vp in vparts)
return result
def substitute_version(path, new_version):
"""Given a URL or archive name, find the version in the path and
substitute the new version for it. Replace all occurrences of
the version *if* they don't overlap with the package name.
Simple example:
.. code-block:: python
substitute_version('http://www.mr511.de/software/libelf-0.8.13.tar.gz', '2.9.3')
>>> 'http://www.mr511.de/software/libelf-2.9.3.tar.gz'
Complex example:
.. code-block:: python
substitute_version('https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.12/src/hdf-4.2.12.tar.gz', '2.3')
>>> 'https://www.hdfgroup.org/ftp/HDF/releases/HDF2.3/src/hdf-2.3.tar.gz'
"""
(name, ns, nl, noffs,
ver, vs, vl, voffs) = substitution_offsets(path)
new_path = ''
last = 0
for vo in voffs:
new_path += path[last:vo]
new_path += str(new_version)
last = vo + vl
new_path += path[last:]
return new_path
def color_url(path, **kwargs):
"""Color the parts of the url according to Spack's parsing.
Colors are:
| Cyan: The version found by :func:`parse_version_offset`.
| Red: The name found by :func:`parse_name_offset`.
| Green: Instances of version string from :func:`substitute_version`.
| Magenta: Instances of the name (protected from substitution).
Args:
path (str): The filename or URL for the package
errors (bool): Append parse errors at end of string.
subs (bool): Color substitutions as well as parsed name/version.
"""
errors = kwargs.get('errors', False)
subs = kwargs.get('subs', False)
(name, ns, nl, noffs,
ver, vs, vl, voffs) = substitution_offsets(path)
nends = [no + nl - 1 for no in noffs]
vends = [vo + vl - 1 for vo in voffs]
nerr = verr = 0
out = StringIO()
for i in range(len(path)):
if i == vs:
out.write('@c')
verr += 1
elif i == ns:
out.write('@r')
nerr += 1
elif subs:
if i in voffs:
out.write('@g')
elif i in noffs:
out.write('@m')
out.write(path[i])
if i == vs + vl - 1:
out.write('@.')
verr += 1
elif i == ns + nl - 1:
out.write('@.')
nerr += 1
elif subs:
if i in vends or i in nends:
out.write('@.')
if errors:
if nerr == 0:
out.write(" @r{[no name]}")
if verr == 0:
out.write(" @r{[no version]}")
if nerr == 1:
out.write(" @r{[incomplete name]}")
if verr == 1:
out.write(" @r{[incomplete version]}")
return colorize(out.getvalue())
class UrlParseError(spack.error.SpackError):
"""Raised when the URL module can't parse something correctly."""
def __init__(self, msg, path):
super(UrlParseError, self).__init__(msg)
self.path = path
class UndetectableVersionError(UrlParseError):
"""Raised when we can't parse a version from a string."""
def __init__(self, path):
super(UndetectableVersionError, self).__init__(
"Couldn't detect version in: " + path, path)
class UndetectableNameError(UrlParseError):
"""Raised when we can't parse a package name from a string."""
def __init__(self, path):
super(UndetectableNameError, self).__init__(
"Couldn't parse package name in: " + path, path)
|
skosukhin/spack
|
lib/spack/spack/url.py
|
Python
|
lgpl-2.1
| 29,938
|
[
"HOOMD-blue",
"VTK"
] |
aaae56526771c58d1659ab4042bd52cb6775715cf2593e4f186965fb04661b45
|
from __future__ import division, print_function
from CompyledFunc import CompyledFunc
from copy import deepcopy
from frozendict import frozendict
from HelpyFuncs.SymPy import is_non_atomic_sympy_expr, sympy_xreplace
from HelpyFuncs.Dicts import combine_dict_and_kwargs, merge_dicts_ignoring_dup_keys_and_none_values
from HelpyFuncs.zzz import shift_time_subscripts
from itertools import product
from MathDict import exp as exp_math_dict, MathDict
from MathFunc import MathFunc
from numpy.linalg import inv, slogdet
from pprint import pprint
from scipy.stats import uniform, multivariate_normal
from sympy import exp, log, pi, pprint as sympy_print
from sympy.matrices import BlockMatrix, det, Matrix
DO_NOTHING_FUNC = lambda *args, **kwargs: None
SELF_FUNC = lambda x: x
ZERO_FUNC = lambda *args, **kwargs: .0
class PDF(MathFunc):
def __init__(self, family='', var_names_and_syms={}, param={}, cond={}, scope={},
neg_log_dens_func=DO_NOTHING_FUNC, norm_func=DO_NOTHING_FUNC, max_func=DO_NOTHING_FUNC,
marg_func=DO_NOTHING_FUNC, cond_func=DO_NOTHING_FUNC, sample_func=DO_NOTHING_FUNC, compile=False):
self.Family = family
self.Param = param
if hasattr(self, 'Mapping'):
dens = self.Mapping
else:
self.NegLogDensFunc = neg_log_dens_func
neg_log_dens = neg_log_dens_func(self, var_names_and_syms)
if self.is_discrete_finite():
dens = exp_math_dict(-neg_log_dens)
else:
dens = exp(-neg_log_dens)
MathFunc.__init__(self, var_names_and_syms=var_names_and_syms, mapping=dens, param=param,
cond=cond, scope=scope, compile=compile)
self.NormFunc = norm_func
self.MaxFunc = max_func
self.MargFunc = marg_func
self.CondFunc = cond_func
self.SampleFunc = sample_func
def is_discrete_finite(self):
return self.Family == 'DiscreteFinite'
def is_one(self):
return self.Family == 'One'
def is_uniform(self):
return self.Family == 'Uniform'
def is_gaussian(self):
return self.Family == 'Gaussian'
def at(self, var_and_param_names_and_values={}, **kw_var_and_param_names_and_values):
var_and_param_names_and_values =\
combine_dict_and_kwargs(var_and_param_names_and_values, kw_var_and_param_names_and_values)
cond = deepcopy(self.Cond) # just to be careful
scope = deepcopy(self.Scope) # just to be careful
param = self.Param.copy()
syms_and_values = {}
for var, value in var_and_param_names_and_values.items():
if var in self.Vars:
if var in cond:
cond[var] = value
if var in scope:
scope[var] = value
syms_and_values[self.Vars[var]] = value
if var in param:
try:
syms_and_values[param[var]] = value
except:
pass
param[var] = value
cond = sympy_xreplace(cond, syms_and_values)
scope = sympy_xreplace(scope, syms_and_values)
self.CompyledFunc = None # remove compiled version because many things can be changing
if self.is_discrete_finite():
neg_log_p = {}
s = set(var_and_param_names_and_values)
s_items = set(var_and_param_names_and_values.items())
for var_values___frozen_dict, mapping_value in param['NegLogP'].items():
other_items___dict = dict(set(var_values___frozen_dict.items()) - s_items)
if not (set(other_items___dict) & s):
neg_log_p[frozendict(set(var_values___frozen_dict.items()) - set(cond.items()))] =\
sympy_xreplace(mapping_value, syms_and_values)
return DiscreteFinitePMF(var_names_and_syms=self.Vars.copy(), p_or_neg_log_p=neg_log_p, p=False,
cond=cond, scope=scope)
else:
pdf = self.copy()
pdf.Cond = cond
pdf.Scope = scope
pdf.Param = sympy_xreplace(param, syms_and_values)
#print(pdf.Mapping)
#print(syms_and_values)
#print(type(syms_and_values.keys().pop()))
pdf.Mapping = sympy_xreplace(pdf.Mapping, syms_and_values)
return pdf
def norm(self):
return self.NormFunc(self)
def max(self, **kwargs):
return self.MaxFunc(self, **kwargs)
def marg(self, *marginalized_vars):
return self.MargFunc(self, *marginalized_vars)
def cond(self, cond={}, **kw_cond):
cond = combine_dict_and_kwargs(cond, kw_cond)
return self.CondFunc(self, cond)
def sample(self, num_samples=1):
return self.SampleFunc(self, num_samples)
def __mul__(self, probability_density_function_to_multiply):
return product_of_2_PDFs(self, probability_density_function_to_multiply)
def __rmul__(self, probability_density_function_to_multiply):
return product_of_2_PDFs(probability_density_function_to_multiply, self)
def multiply(self, *probability_density_functions_to_multiply):
pdf = self.copy()
for pdf_to_multiply in probability_density_functions_to_multiply:
pdf = pdf.__mul__(pdf_to_multiply)
return pdf
def pprint(self):
discrete = self.is_discrete_finite()
if discrete:
print('DISCRETE FINITE MASS FUNCTION')
print('_____________________________')
else:
print('CONTINUOUS DENSITY FUNCTION')
print('___________________________')
print('FAMILY:', self.Family)
print("VARIABLES' SYMBOLS:")
pprint(self.Vars)
print('CONDITION:')
pprint(self.Cond)
print('SCOPE:')
pprint(self.Scope)
if not discrete:
print('PARAMETERS:')
pprint(self.Param)
print('DENSITY:')
else:
print('MASS:')
d = self()
sympy_print(d)
if discrete:
print(' sum =', sum(d.values()))
def shift_time_subscripts(self, t):
pdf = self.copy()
pdf.Vars = shift_time_subscripts(pdf.Vars, t)
pdf.Cond = shift_time_subscripts(pdf.Cond, t)
pdf.Scope = shift_time_subscripts(pdf.Scope, t)
pdf.Param = shift_time_subscripts(pdf.Param, t)
return pdf
def p_from_neg_log_p(expr_or_dict):
if hasattr(expr_or_dict, 'keys'):
probs___math_dict = MathDict()
for k, v in expr_or_dict.items():
probs___math_dict[k] = exp(-v)
return probs___math_dict
else:
return exp(-expr_or_dict)
def product_of_2_PDFs(pdf0, pdf1):
families = (pdf0.Family, pdf1.Family)
if families == ('DiscreteFinite', 'DiscreteFinite'):
return product_of_2_DiscreteFinitePMFs(pdf0, pdf1)
elif pdf0.is_discrete_finite():
return product_of_DiscreteFinitePMF_and_continuousPDF(pdf0, pdf1)
elif pdf1.is_discrete_finite():
return product_of_DiscreteFinitePMF_and_continuousPDF(pdf1, pdf0)
elif families == ('One', 'Gaussian'):
return product_of_OnePDF_and_GaussPDF(pdf0, pdf1)
elif families == ('Gaussian', 'One'):
return product_of_OnePDF_and_GaussPDF(pdf1, pdf0)
elif families == ('Gaussian', 'Gaussian'):
return product_of_2_GaussPDFs(pdf0, pdf1)
class DiscreteFinitePMF(PDF):
def __init__(self, var_names_and_syms={}, p_or_neg_log_p={}, p=True, cond={}, scope={}):
non_none_scope = {var: value for var, value in scope.items() if value is not None}
if p:
f = lambda x: -log(x)
else:
f = lambda x: x
p_or_neg_log_p = MathDict({var_values___frozen_dict: f(func_value)
for var_values___frozen_dict, func_value in p_or_neg_log_p.items()
if set(var_values___frozen_dict.items()) >= set(non_none_scope.items())})
PDF.__init__(self, family='DiscreteFinite', var_names_and_syms=var_names_and_syms,
param=dict(NegLogP=p_or_neg_log_p), cond=cond, scope=non_none_scope,
neg_log_dens_func=discrete_finite_neg_log_mass, norm_func=discrete_finite_norm,
max_func=discrete_finite_max, marg_func=discrete_finite_marg, cond_func=discrete_finite_cond,
sample_func=DO_NOTHING_FUNC)
def allclose(self, *PMFs, **kwargs):
for pmf in PMFs:
if not ((self.Vars == pmf.Vars) and (self.Cond == pmf.Cond) and (self.Scope == pmf.Scope) and
self.Param['NegLogP'].allclose(pmf.Param['NegLogP'], **kwargs)):
return False
return True
def discrete_finite_neg_log_mass(pmf, var_names_and_values={}):
v = var_names_and_values.copy()
for var, value in var_names_and_values.items():
if (value is None) or is_non_atomic_sympy_expr(value):
del v[var]
s0 = set(v.items())
d = MathDict(())
for var_names_and_values___frozen_dict, func_value in pmf.Param['NegLogP'].items():
spare_var_values = dict(s0 - set(var_names_and_values___frozen_dict.items()))
s = set(spare_var_values.keys())
if not(s) or (s and not(s & set(var_names_and_values___frozen_dict))):
d[var_names_and_values___frozen_dict] = sympy_xreplace(func_value, var_names_and_values)
return d
def discrete_finite_norm(pmf):
pmf = pmf.copy()
pmf.Param['NegLogP'] = pmf.Param['NegLogP'].copy()
neg_log_p = pmf.Param['NegLogP']
condition_sums = {}
for var_values___frozen_dict, function_value in neg_log_p.items():
condition_instance = pmf.CondInstances[var_values___frozen_dict]
if condition_instance in condition_sums:
condition_sums[condition_instance] += exp(-function_value)
else:
condition_sums[condition_instance] = exp(-function_value)
for var_values___frozen_dict in neg_log_p:
pmf.Param['NegLogP'][var_values___frozen_dict] +=\
log(condition_sums[pmf.CondInstances[var_values___frozen_dict]])
return pmf
def discrete_finite_max(pmf, leave_unoptimized=None):
neg_log_p = pmf.Param['NegLogP']
if leave_unoptimized:
comparison_bases = {}
conditioned_and_unoptimized_vars = set(pmf.Cond) | set(leave_unoptimized)
for var_names_and_values___frozen_dict in neg_log_p:
comparison_basis = {}
for var in (set(var_names_and_values___frozen_dict) & conditioned_and_unoptimized_vars):
comparison_basis[var] = var_names_and_values___frozen_dict[var]
comparison_bases[var_names_and_values___frozen_dict] = frozendict(comparison_basis)
else:
comparison_bases = pmf.CondInstances
neg_log_mins = {}
for var_names_and_values___frozen_dict, func_value in neg_log_p.items():
comparison_basis = comparison_bases[var_names_and_values___frozen_dict]
if comparison_basis in neg_log_mins:
neg_log_mins[comparison_basis] = min(neg_log_mins[comparison_basis], func_value)
else:
neg_log_mins[comparison_basis] = func_value
optims = {}
for var_names_and_values___frozen_dict, func_value in neg_log_p.items():
if func_value <= neg_log_mins[comparison_bases[var_names_and_values___frozen_dict]]:
optims[var_names_and_values___frozen_dict] = func_value
return DiscreteFinitePMF(var_names_and_syms=pmf.Vars.copy(), p_or_neg_log_p=optims, p=False,
cond=pmf.Cond.copy(), scope=pmf.Scope.copy())
def discrete_finite_marg(pmf, *marginalized_vars):
var_symbols = pmf.Vars.copy()
mappings = pmf.Param['NegLogP'].copy()
for marginalized_var in marginalized_vars:
del var_symbols[marginalized_var]
d = {}
for var_values___frozen_dict, mapping_value in mappings.items():
marginalized_var_value = var_values___frozen_dict[marginalized_var]
fd = frozendict(set(var_values___frozen_dict.items()) - {(marginalized_var, marginalized_var_value)})
if fd in d:
d[fd] += exp(-mapping_value)
else:
d[fd] = exp(-mapping_value)
mappings = {k: -log(v) for k, v in d.items()}
return DiscreteFinitePMF(var_symbols, mappings,
cond=deepcopy(pmf.Cond),
scope=deepcopy(pmf.Scope), p=False)
def discrete_finite_cond(pmf, cond={}, **kw_cond):
cond = combine_dict_and_kwargs(cond, kw_cond)
mappings = pmf.Param['NegLogP'].copy()
d = {}
s0 = set(cond.items())
for var_values___frozen_dict, mapping_value in mappings.items():
s = set(var_values___frozen_dict.items())
if s >= s0:
d[frozendict(s - s0)] = mapping_value
new_cond = deepcopy(pmf.Cond)
new_cond.update(cond)
scope = deepcopy(pmf.Scope)
for var in cond:
del scope[var]
return DiscreteFinitePMF(pmf.Vars.copy(), d, cond=new_cond, scope=scope, p=False)
class OnePMF(DiscreteFinitePMF):
def __init__(self, var_names_and_syms={}, var_names_and_values=set(), cond={}):
DiscreteFinitePMF.__init__(self, var_names_and_syms=var_names_and_syms,
p_or_neg_log_p={item: 1. for item in var_names_and_values}, p=True, cond=cond)
class OnePDF(PDF):
def __init__(self, cond={}):
PDF.__init__(self, family='One', var_names_and_syms={}, param={}, cond=cond, scope={},
neg_log_dens_func=ZERO_FUNC, norm_func=SELF_FUNC, max_func=SELF_FUNC,
marg_func=SELF_FUNC, cond_func=DO_NOTHING_FUNC, sample_func=DO_NOTHING_FUNC)
def uniform_density_function(var_symbols, parameters, cond={}, scope={}):
return PDF('Uniform', deepcopy(var_symbols), deepcopy(parameters),
uniform_density, uniform_normalization, lambda *args, **kwargs: None,
uniform_marginalization, uniform_conditioning, uniform_sampling,
deepcopy(cond), deepcopy(scope))
def uniform_density(var_symbols, parameters):
d = 1.
return d
def uniform_normalization():
return 0
def uniform_marginalization():
return 0
def uniform_conditioning():
return 0
def uniform_sampling():
return 0
class GaussPDF(PDF):
def __init__(self, var_names_and_syms={}, param={}, cond={}, scope={}, compile=False):
self.Vars = var_names_and_syms
self.Param = param
self.PreProcessed = False
self.VarList = None
self.NumVars = None
self.VarVector = None
self.NumDims = None
self.Mean = None
self.DemeanedVarVector = None
self.Cov = None
self.LogDetCov = None
self.InvCov = None
if compile:
self.preprocess()
PDF.__init__(self, family='Gaussian', var_names_and_syms=var_names_and_syms, param=param,
cond=cond, scope=scope, neg_log_dens_func=gauss_neg_log_dens, norm_func=DO_NOTHING_FUNC,
max_func=gauss_max, marg_func=gauss_marg, cond_func=gauss_cond, sample_func=gauss_sample,
compile=compile)
def preprocess(self):
self.VarList = tuple(self.Vars)
self.NumVars = len(self.VarList)
self.VarVector = BlockMatrix((tuple(self.Vars[var] for var in self.VarList),))
self.NumDims = self.VarVector.shape[1]
self.Mean = BlockMatrix((tuple(self.Param[('Mean', var)] for var in self.VarList),))
self.DemeanedVarVector = self.VarVector - self.Mean
cov = [self.NumVars * [None] for _ in range(self.NumVars)] # careful not to create same mutable object
for i in range(self.NumVars):
for j in range(i):
if ('Cov', self.VarList[i], self.VarList[j]) in self.Param:
cov[i][j] = self.Param[('Cov', self.VarList[i], self.VarList[j])]
cov[j][i] = cov[i][j].T
else:
cov[j][i] = self.Param[('Cov', self.VarList[j], self.VarList[i])]
cov[i][j] = cov[j][i].T
cov[i][i] = self.Param[('Cov', self.VarList[i])]
self.Cov = BlockMatrix(cov)
try:
cov = CompyledFunc(var_names_and_syms={}, dict_or_expr=self.Cov)()
sign, self.LogDetCov = slogdet(cov)
self.LogDetCov *= sign
self.InvCov = inv(cov)
except:
pass
self.PreProcessed = True
def gauss_neg_log_dens(pdf, var_and_param_names_and_values={}, **kw_var_and_param_names_and_values):
var_and_param_names_and_values = combine_dict_and_kwargs(var_and_param_names_and_values,
kw_var_and_param_names_and_values)
if not pdf.PreProcessed:
pdf.preprocess()
if pdf.LogDetCov is None:
neg_log_dens = (pdf.NumDims * log(2 * pi) + log(det(pdf.Cov)) +
det(pdf.DemeanedVarVector * pdf.Cov.inverse() * pdf.DemeanedVarVector.T)) / 2
else:
neg_log_dens = (pdf.NumDims * log(2 * pi) + pdf.LogDetCov +
det(pdf.DemeanedVarVector * Matrix(pdf.InvCov) * pdf.DemeanedVarVector.T)) / 2
return sympy_xreplace(neg_log_dens, var_and_param_names_and_values)
def gauss_max(pdf):
pdf = pdf.copy()
for var, value in pdf.Scope.items():
if value is None:
pdf.Scope[var] = pdf.Param[('Mean', var)]
return pdf
def gauss_marg(pdf, *marginalized_vars):
var_names_and_syms = pdf.Vars.copy()
scope = pdf.Scope.copy()
param = pdf.Param.copy()
for marginalized_var in marginalized_vars:
del var_names_and_syms[marginalized_var]
del scope[marginalized_var]
p = param.copy()
for k in p:
if marginalized_var in k:
del param[k]
if scope:
return GaussPDF(var_names_and_syms=var_names_and_syms, param=param, cond=pdf.Cond.copy(), scope=scope)
else:
return OnePDF(cond=pdf.Cond.copy())
def gauss_cond(pdf, cond={}, **kw_cond):
cond = combine_dict_and_kwargs(cond, kw_cond)
new_cond = pdf.Cond.copy()
new_cond.update(cond)
scope = pdf.Scope.copy()
for var in cond:
del scope[var]
point_cond = {}
for var, value in cond.items():
if value is not None:
point_cond[pdf.Vars[var]] = value
cond_vars = tuple(cond)
num_cond_vars = len(cond_vars)
scope_vars = tuple(set(pdf.VarsList) - set(cond))
num_scope_vars = len(scope_vars)
x_c = BlockMatrix((tuple(pdf.Vars[cond_var] for cond_var in cond_vars),))
m_c = BlockMatrix((tuple(pdf.Param[('Mean', cond_var)] for cond_var in cond_vars),))
m_s = BlockMatrix((tuple(pdf.Param[('Mean', scope_var)] for scope_var in scope_vars),))
S_c = [num_cond_vars * [None] for _ in range(num_cond_vars)] # careful not to create same mutable object
for i in range(num_cond_vars):
for j in range(i):
if ('Cov', cond_vars[i], cond_vars[j]) in pdf.Param:
S_c[i][j] = pdf.Param[('Cov', cond_vars[i], cond_vars[j])]
S_c[j][i] = S_c[i][j].T
else:
S_c[j][i] = pdf.Param[('Cov', cond_vars[j], cond_vars[i])]
S_c[i][j] = S_c[j][i].T
S_c[i][i] = pdf.Param[('Cov', cond_vars[i])]
S_c = BlockMatrix(S_c)
S_s = [num_scope_vars * [None] for _ in range(num_scope_vars)] # careful not to create same mutable object
for i in range(num_scope_vars):
for j in range(i):
if ('Cov', scope_vars[i], scope_vars[j]) in pdf.Param:
S_s[i][j] = pdf.Param[('Cov', scope_vars[i], scope_vars[j])]
S_s[j][i] = S_s[i][j].T
else:
S_s[j][i] = pdf.Param[('Cov', scope_vars[j], scope_vars[i])]
S_s[i][j] = S_s[j][i].T
S_s[i][i] = pdf.Param[('Cov', scope_vars[i])]
S_s = BlockMatrix(S_s)
S_cs = [num_scope_vars * [None] for _ in range(num_cond_vars)] # careful not to create same mutable object
for i, j in product(range(num_cond_vars), range(num_scope_vars)):
if ('Cov', cond_vars[i], scope_vars[j]) in pdf.Param:
S_cs[i][j] = pdf.Param[('Cov', cond_vars[i], scope_vars[j])]
else:
S_cs[i][j] = pdf.Param[('Cov', scope_vars[j], cond_vars[i])].T
S_cs = BlockMatrix(S_cs)
S_sc = S_cs.T
m = (m_s + (x_c - m_c) * S_c.inverse() * S_cs).xreplace(point_cond)
S = S_s - S_sc * S_c.inverse() * S_cs
param = {}
index_ranges_from = []
index_ranges_to = []
k = 0
for i in range(num_scope_vars):
l = k + pdf.Vars[scope_vars[i]].shape[1]
index_ranges_from += [k]
index_ranges_to += [l]
param[('Mean', scope_vars[i])] = m[0, index_ranges_from[i]:index_ranges_to[i]]
for j in range(i):
param[('Cov', scope_vars[j], scope_vars[i])] =\
S[index_ranges_from[j]:index_ranges_to[j], index_ranges_from[i]:index_ranges_to[i]]
param[('Cov', scope_vars[i])] =\
S[index_ranges_from[i]:index_ranges_to[i], index_ranges_from[i]:index_ranges_to[i]]
k = l
return GaussPDF(var_names_and_syms=pdf.Vars.copy(), param=param, cond=new_cond, scope=scope)
def gauss_sample(gaussian_pdf, num_samples):
# scope_vars
# for scope
#
# scope_vars = tuple(gaussian_pdf.Scope)
#
# num_scope_vars = len(scope_vars)
# m = []
# S = [num_scope_vars * [None] for _ in range(num_scope_vars)] # careful not to create same mutable object
# for i in range(num_scope_vars):
# m += [gaussian_pdf.Param[('mean', scope_vars[i])]]
# for j in range(i):
# if ('cov', scope_vars[i], scope_vars[j]) in gaussian_pdf.Param:
# S[i][j] = gaussian_pdf.Param[('cov', scope_vars[i], scope_vars[j])]
# S[j][i] = S[i][j].T
# else:
# S[j][i] = gaussian_pdf.Param[('cov', scope_vars[j], scope_vars[i])]
# S[i][j] = S[j][i].T
# S[i][i] = gaussian_pdf.Param[('cov', scope_vars[i])]
# m = BlockMatrix([m]).as_explicit().tolist()[0]
# S = BlockMatrix(S).as_explicit().tolist()
# X = multivariate_normal(m, S)
# samples = X.rvs(num_samples)
# densities = X.pdf(samples)
# mappings = {}
# for i in range(num_samples):
# fd = {}
# k = 0
# for j in range(num_scope_vars):
# scope_var = scope_vars[j]
# l = k + gaussian_pdf.Vars[scope_var].shape[1]
# fd[scope_var] = samples[i, k:l]
# mappings[FrozenDict(fd)] = densities[i]
return 0 #discrete_finite_mass_function(deepcopy(gaussian_pdf.Vars), dict(NegLogP=mappings),
# deepcopy(gaussian_pdf.Cond))
def product_of_2_DiscreteFinitePMFs(pmf0, pmf1):
cond = merge_dicts_ignoring_dup_keys_and_none_values(pmf0.Cond, pmf1.Cond)
scope = merge_dicts_ignoring_dup_keys_and_none_values(pmf0.Scope, pmf1.Scope)
for var in (set(cond) & set(scope)):
del cond[var]
var_names_and_syms = merge_dicts_ignoring_dup_keys_and_none_values(pmf0.Vars, pmf1.Vars)
neg_log_p0 = pmf0.Param['NegLogP'].copy()
neg_log_p1 = pmf1.Param['NegLogP'].copy()
neg_log_p = {}
for item_0, item_1 in product(neg_log_p0.items(), neg_log_p1.items()):
var_names_and_values_0___frozen_dict, func_value_0 = item_0
var_names_and_values_1___frozen_dict, func_value_1 = item_1
same_vars_same_values = True
for var in (set(var_names_and_values_0___frozen_dict) & set(var_names_and_values_1___frozen_dict)):
if not (var_names_and_values_0___frozen_dict[var] == var_names_and_values_1___frozen_dict[var]):
same_vars_same_values = False
break
if same_vars_same_values:
neg_log_p[frozendict(set(var_names_and_values_0___frozen_dict.items()) |
set(var_names_and_values_1___frozen_dict.items()))] = func_value_0 + func_value_1
return DiscreteFinitePMF(var_names_and_syms=var_names_and_syms, p_or_neg_log_p=neg_log_p, p=False,
cond=cond, scope=scope)
def product_of_DiscreteFinitePMF_and_continuousPDF(pmf, pdf):
cond = merge_dicts_ignoring_dup_keys_and_none_values(pmf.Cond, pdf.Cond)
scope = merge_dicts_ignoring_dup_keys_and_none_values(pmf.Scope, pdf.Scope)
for var in (set(cond) & set(scope)):
del cond[var]
var_names_and_symbols = merge_dicts_ignoring_dup_keys_and_none_values(pmf.Vars, pdf.Vars)
neg_log_p = {}
for var_names_and_values___frozen_dict, func_value in pmf.Param['NegLogP'].items():
neg_log_p[var_names_and_values___frozen_dict] = func_value - log(pdf.Mapping)
return DiscreteFinitePMF(var_names_and_syms=var_names_and_symbols, p_or_neg_log_p=neg_log_p, p=False,
cond=cond, scope=scope)
def product_of_OnePDF_and_GaussPDF(one_pdf, gauss_pdf):
cond = merge_dicts_ignoring_dup_keys_and_none_values(gauss_pdf.Cond, one_pdf.Cond)
scope = merge_dicts_ignoring_dup_keys_and_none_values(gauss_pdf.Scope, one_pdf.Scope)
for var in (set(cond) & set(scope)):
del cond[var]
var_names_and_symbols = merge_dicts_ignoring_dup_keys_and_none_values(gauss_pdf.Vars, one_pdf.Vars)
return GaussPDF(var_names_and_syms=var_names_and_symbols, param=gauss_pdf.Param.copy(), cond=cond, scope=scope)
def product_of_2_GaussPDFs(pdf0, pdf1):
cond = merge_dicts_ignoring_dup_keys_and_none_values(pdf0.Cond, pdf1.Cond)
scope = merge_dicts_ignoring_dup_keys_and_none_values(pdf0.Scope, pdf1.Scope)
for var in (set(cond) & set(scope)):
del cond[var]
var_names_and_symbols = merge_dicts_ignoring_dup_keys_and_none_values(pdf0.Vars, pdf1.Vars)
param = {}
return GaussPDF(var_names_and_syms=var_names_and_symbols, param=param, cond=cond, scope=scope)
|
MBALearnsToCode/ProbabPy
|
ProbabPy/__init__.py
|
Python
|
mit
| 26,081
|
[
"Gaussian"
] |
16ce3701790d69aff34e339a9c33064cdd2524568599bb8eba0a9effe2f969dd
|
# -*- coding: utf-8 -*-
#
#Created on Mon Apr 10 11:34:50 2017
#
#author: Elina Thibeau-Sutre
#
from .base import BaseMixture
from .base import _log_normal_matrix
from .base import _full_covariance_matrices
from .base import _spherical_covariance_matrices
from .initializations import initialize_log_assignements,initialize_mcw
import numpy as np
from scipy.misc import logsumexp
class GaussianMixture(BaseMixture):
"""
Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows to estimate the parameters of a Gaussian mixture
distribution.
Parameters
----------
n_components : int, defaults to 1.
Number of clusters used.
init : str, defaults to 'kmeans'.
Method used in order to perform the initialization,
must be in ['random', 'plus', 'AF_KMC', 'kmeans'].
reg_covar : float, defaults to 1e-6
In order to avoid null covariances this float is added to the diagonal
of covariance matrices.
type_init : str, defaults to 'resp'.
The algorithm is initialized using this data (responsibilities if 'resp'
or means, covariances and weights if 'mcw').
Attributes
----------
name : str
The name of the method : 'GMM'
cov : array of floats (n_components,dim,dim)
Contains the computed covariance matrices of the mixture.
means : array of floats (n_components,dim)
Contains the computed means of the mixture.
log_weights : array of floats (n_components,)
Contains the logarithm of the mixing coefficient of each cluster.
iter : int
The number of iterations computed with the method fit()
convergence_criterion_data : array of floats (iter,)
Stores the value of the convergence criterion computed with data
on which the model is fitted.
convergence_criterion_test : array of floats (iter,) | if _early_stopping only
Stores the value of the convergence criterion computed with test data
if it exists.
_is_initialized : bool
Ensures that the method _initialize() has been used before using other
methods such as score() or predict_log_assignements().
Raises
------
ValueError : if the parameters are inconsistent, for example if the cluster number is negative, init_type is not in ['resp','mcw']...
References
----------
'Pattern Recognition and Machine Learning', Bishop
"""
def __init__(self, n_components=1,covariance_type="full",init="kmeans",
reg_covar=1e-6,type_init='resp',n_jobs=1):
super(GaussianMixture, self).__init__()
self.name = 'GMM'
self.n_components = n_components
self.covariance_type = covariance_type
self.init = init
self.type_init = type_init
self.reg_covar = reg_covar
self.n_jobs = n_jobs
self._is_initialized = False
self.iter = 0
self.convergence_criterion_data = []
self.convergence_criterion_test = []
self._check_common_parameters()
self._check_parameters()
def _check_parameters(self):
if self.init not in ['random', 'random_sk', 'plus', 'kmeans', 'AF_KMC']:
raise ValueError("Invalid value for 'init': %s "
"'init' should be in "
"['random', 'random_sk', 'plus', 'kmeans', 'AF_KMC']"
% self.init)
if self.covariance_type not in ['full','spherical']:
raise ValueError("Invalid value for 'init': %s "
"'covariance_type' should be in "
"['full', 'spherical']"
% self.covariance_type)
if self.init == 'random_sk' and self.type_init=='mcw':
raise ValueError("random_sk is only compatible with"
"type_init = resp")
def _initialize(self,points_data,points_test=None):
"""
This method initializes the Gaussian Mixture by setting the values of
the means, covariances and weights.
Parameters
----------
points_data : an array (n_points,dim)
Data on which the model is fitted.
points_test: an array (n_points,dim) | Optional
Data used to do early stopping (avoid overfitting)
"""
if self.type_init=='resp':
log_assignements = initialize_log_assignements(self.init,self.n_components,points_data,points_test,
self.covariance_type)
self._step_M(points_data,log_assignements)
elif self.type_init=='mcw':
means,cov,log_weights = initialize_mcw(self.init,self.n_components,points_data,points_test,
self.covariance_type)
self.means = means
self.cov = cov
self.log_weights = log_weights
elif self.type_init=='kmeans':
self._initialize_cov(points_data)
self._is_initialized = True
def _step_E(self, points):
"""
In this step the algorithm evaluates the responsibilities of each points in each cluster
Parameters
----------
points : an array (n_points,dim)
Returns
-------
log_resp: an array (n_points,n_components)
an array containing the logarithm of the responsibilities.
log_prob_norm : an array (n_points,)
logarithm of the probability of each sample in points
"""
log_normal_matrix = _log_normal_matrix(points,self.means,self.cov,self.covariance_type,self.n_jobs)
log_product = log_normal_matrix + self.log_weights[:,np.newaxis].T
log_prob_norm = logsumexp(log_product,axis=1)
log_resp = log_product - log_prob_norm[:,np.newaxis]
return log_prob_norm,log_resp
def _step_M(self,points,log_assignements):
"""
In this step the algorithm updates the values of the parameters (means, covariances,
alpha, beta, nu).
Parameters
----------
points : an array (n_points,dim)
log_resp: an array (n_points,n_components)
an array containing the logarithm of the responsibilities.
"""
n_points,dim = points.shape
assignements = np.exp(log_assignements)
#Phase 1:
product = np.dot(assignements.T,points)
weights = np.sum(assignements,axis=0) + 10 * np.finfo(assignements.dtype).eps
self.means = product / weights[:,np.newaxis]
#Phase 2:
if self.covariance_type=="full":
self.cov = _full_covariance_matrices(points,self.means,weights,assignements,self.reg_covar,self.n_jobs)
elif self.covariance_type=="spherical":
self.cov = _spherical_covariance_matrices(points,self.means,weights,assignements,self.reg_covar,self.n_jobs)
#Phase 3:
self.log_weights = logsumexp(log_assignements, axis=0) - np.log(n_points)
def _convergence_criterion_simplified(self,points,_,log_prob_norm):
"""
Compute the log likelihood.
Parameters
----------
points : an array (n_points,dim)
log_prob_norm : an array (n_points,)
logarithm of the probability of each sample in points
Returns
-------
result : float
the log likelihood
"""
return np.sum(log_prob_norm)
def _convergence_criterion(self,points,_,log_prob_norm):
"""
Compute the log likelihood.
Parameters
----------
points : an array (n_points,dim)
log_prob_norm : an array (n_points,)
logarithm of the probability of each sample in points
Returns
-------
result : float
the log likelihood
"""
return np.sum(log_prob_norm)
def _get_parameters(self):
return (self.log_weights, self.means, self.cov)
def _set_parameters(self, params,verbose=True):
self.log_weights, self.means, self.cov = params
if self.n_components != len(self.means) and verbose:
print('The number of components changed')
self.n_components = len(self.means)
def _limiting_model(self,points):
n_points,dim = points.shape
log_resp = self.predict_log_resp(points)
_,n_components = log_resp.shape
exist = np.zeros(n_components)
for i in range(n_points):
for j in range(n_components):
if np.argmax(log_resp[i])==j:
exist[j] = 1
idx_existing = np.where(exist==1)
log_weights = self.log_weights[idx_existing]
means = self.means[idx_existing]
cov = self.cov[idx_existing]
params = (log_weights, means, cov)
return params
|
14thibea/megamix
|
megamix/batch/GMM.py
|
Python
|
apache-2.0
| 9,537
|
[
"Gaussian"
] |
cb0d5d3128b65483c95087aca27fb9732d09fe520a85d15165f68b9c0c31d67d
|
# -*- coding: utf-8 -*-
# Copyright 2007-2020 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import gc
import numpy as np
import pytest
import hyperspy.api as hs
from hyperspy import components1d
from hyperspy.decorators import lazifyTestClass
def teardown_module(module):
""" Run a garbage collection cycle at the end of the test of this module
to avoid any memory issue when continuing running the test suite.
"""
gc.collect()
@lazifyTestClass
class TestRemoveBackground1DGaussian:
def setup_method(self, method):
gaussian = components1d.Gaussian()
gaussian.A.value = 10
gaussian.centre.value = 10
gaussian.sigma.value = 1
self.signal = hs.signals.Signal1D(
gaussian.function(np.arange(0, 20, 0.02)))
self.signal.axes_manager[0].scale = 0.01
@pytest.mark.parametrize('binning', (True, False))
@pytest.mark.parametrize('fast', [False, True])
@pytest.mark.parametrize('return_model', [False, True])
def test_background_remove(self, binning, fast, return_model):
signal = self.signal
signal.metadata.Signal.binned = binning
out = signal.remove_background(
signal_range=(None, None),
background_type='Gaussian',
fast=fast,
return_model=return_model)
if return_model:
s1 = out[0]
model = out[1]
assert np.allclose(model.chisq.data, 0.0)
assert np.allclose(model.as_signal().data, signal.data)
else:
s1 = out
assert np.allclose(s1.data, np.zeros_like(s1.data))
def test_background_remove_navigation(self):
# Check it calculate the chisq
s2 = hs.stack([self.signal]*2)
(s, model) = s2.remove_background(
signal_range=(None, None),
background_type='Gaussian',
fast=True,
return_model=True)
assert np.allclose(model.chisq.data, np.array([0.0, 0.0]))
assert np.allclose(model.as_signal().data, s2.data)
assert np.allclose(s.data, np.zeros_like(s.data))
@lazifyTestClass
class TestRemoveBackground1DLorentzian:
def setup_method(self, method):
lorentzian = components1d.Lorentzian()
lorentzian.A.value = 10
lorentzian.centre.value = 10
lorentzian.gamma.value = 1
self.signal = hs.signals.Signal1D(
lorentzian.function(np.arange(0, 20, 0.03)))
self.signal.axes_manager[0].scale = 0.01
self.signal.metadata.Signal.binned = False
def test_background_remove_lorentzian(self):
# Fast is not accurate
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='Lorentzian')
assert np.allclose(np.zeros(len(s1.data)), s1.data, atol=0.2)
def test_background_remove_lorentzian_full_fit(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='Lorentzian',
fast=False)
assert np.allclose(s1.data, np.zeros(len(s1.data)))
@lazifyTestClass
class TestRemoveBackground1DPowerLaw:
def setup_method(self, method):
pl = components1d.PowerLaw()
pl.A.value = 1e10
pl.r.value = 3
self.signal = hs.signals.Signal1D(pl.function(np.arange(100, 200)))
self.signal.axes_manager[0].offset = 100
self.signal.metadata.Signal.binned = False
self.signal_noisy = self.signal.deepcopy()
self.signal_noisy.add_gaussian_noise(1)
self.atol = 0.04 * abs(self.signal.data).max()
self.atol_zero_fill = 0.04 * abs(self.signal.isig[10:].data).max()
def test_background_remove_pl(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='PowerLaw')
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.data, np.zeros(len(s1.data)), atol=self.atol)
assert s1.axes_manager.navigation_dimension == 0
def test_background_remove_pl_zero(self):
s1 = self.signal_noisy.remove_background(
signal_range=(110.0, 190.0),
background_type='PowerLaw',
zero_fill=True)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.isig[10:], np.zeros(len(s1.data[10:])),
atol=self.atol_zero_fill)
assert np.allclose(s1.data[:10], np.zeros(10))
def test_background_remove_pl_int(self):
self.signal.change_dtype("int")
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='PowerLaw')
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.data, np.zeros(len(s1.data)), atol=self.atol)
def test_background_remove_pl_int_zero(self):
self.signal_noisy.change_dtype("int")
s1 = self.signal_noisy.remove_background(
signal_range=(110.0, 190.0),
background_type='PowerLaw',
zero_fill=True)
# since we compare to zero, rtol can't be used (see np.allclose doc)
assert np.allclose(s1.isig[10:], np.zeros(len(s1.data[10:])),
atol=self.atol_zero_fill)
assert np.allclose(s1.data[:10], np.zeros(10))
@lazifyTestClass
class TestRemoveBackground1DSkewNormal:
def setup_method(self, method):
skewnormal = components1d.SkewNormal()
skewnormal.A.value = 3
skewnormal.x0.value = 1
skewnormal.scale.value = 2
skewnormal.shape.value = 10
self.signal = hs.signals.Signal1D(
skewnormal.function(np.arange(0, 10, 0.01)))
self.signal.axes_manager[0].scale = 0.01
self.signal.metadata.Signal.binned = False
def test_background_remove_skewnormal(self):
# Fast is not accurate
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='SkewNormal')
assert np.allclose(np.zeros(len(s1.data)), s1.data, atol=0.2)
def test_background_remove_skewnormal_full_fit(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='SkewNormal',
fast=False)
assert np.allclose(s1.data, np.zeros(len(s1.data)))
@lazifyTestClass
class TestRemoveBackground1DVoigt:
def setup_method(self, method):
voigt = components1d.Voigt(legacy=False)
voigt.area.value = 5
voigt.centre.value = 10
voigt.gamma.value = 0.2
voigt.sigma.value = 0.5
self.signal = hs.signals.Signal1D(
voigt.function(np.arange(0, 20, 0.03)))
self.signal.axes_manager[0].scale = 0.01
self.signal.metadata.Signal.binned = False
def test_background_remove_voigt(self):
# resort to fast=False as estimator guesses only Gaussian width
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='Voigt',
fast=False)
assert np.allclose(np.zeros(len(s1.data)), s1.data)
def test_background_remove_voigt_full_fit(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='Voigt',
fast=False)
assert np.allclose(s1.data, np.zeros(len(s1.data)))
@lazifyTestClass
class TestRemoveBackground1DExponential:
def setup_method(self, method):
exponential = components1d.Exponential()
exponential.A.value = 12500.
exponential.tau.value = 168.
self.signal = hs.signals.Signal1D(
exponential.function(np.arange(100, 200, 0.02)))
self.signal.axes_manager[0].scale = 0.01
self.signal.metadata.Signal.binned = False
self.atol = 0.04 * abs(self.signal.data).max()
def test_background_remove_exponential(self):
# Fast is not accurate
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='Exponential')
assert np.allclose(np.zeros(len(s1.data)), s1.data, atol=self.atol)
def test_background_remove_exponential_full_fit(self):
s1 = self.signal.remove_background(
signal_range=(None, None),
background_type='Exponential',
fast=False)
assert np.allclose(s1.data, np.zeros(len(s1.data)))
def compare_axes_manager_metadata(s0, s1):
assert s0.data.shape == s1.data.shape
assert s0.axes_manager.shape == s1.axes_manager.shape
for iaxis in range(len(s0.axes_manager._axes)):
a0, a1 = s0.axes_manager[iaxis], s1.axes_manager[iaxis]
assert a0.name == a1.name
assert a0.units == a1.units
assert a0.scale == a1.scale
assert a0.offset == a1.offset
assert s0.metadata.General.title == s1.metadata.General.title
@pytest.mark.parametrize('nav_dim', [0, 1])
@pytest.mark.parametrize('fast', [True, False])
@pytest.mark.parametrize('zero_fill', [True, False])
@pytest.mark.parametrize('show_progressbar', [True, False])
@pytest.mark.parametrize('plot_remainder', [True, False])
@pytest.mark.parametrize('background_type',
['Doniach', 'Gaussian', 'Lorentzian', 'Polynomial',
'Power Law', 'Offset', 'SkewNormal', 'SplitVoigt',
'Voigt'])
def test_remove_background_metadata_axes_manager_copy(nav_dim,
fast,
zero_fill,
show_progressbar,
plot_remainder,
background_type):
if nav_dim == 0:
if background_type == ('Voigt'): # speeds up the test
s = hs.signals.Signal1D(np.hstack((np.arange(10, 50),
np.arange(10, 50)[::-1])))
else:
s = hs.signals.Signal1D(np.arange(10, 100)[::-1])
else:
if background_type == ('Voigt'): # avoids warning
s = hs.signals.Signal1D(
np.tile(np.exp(np.arange(0, 100)[::-1]), (2, 1)))
else:
s = hs.signals.Signal1D(np.arange(10, 210)[::-1].reshape(2, 100))
s.axes_manager[0].name = 'axis0'
s.axes_manager[0].units = 'units0'
s.axes_manager[0].scale = 0.9
s.axes_manager[0].offset = 1.
s.metadata.General.title = "atitle"
s_r = s.remove_background(signal_range=(2, 50),
fast=fast,
zero_fill=zero_fill,
show_progressbar=show_progressbar,
plot_remainder=plot_remainder,
background_type=background_type)
compare_axes_manager_metadata(s, s_r)
assert s_r.data.shape == s.data.shape
|
dnjohnstone/hyperspy
|
hyperspy/tests/signal/test_remove_background.py
|
Python
|
gpl-3.0
| 11,665
|
[
"Gaussian"
] |
3967bd158e6a833c6263b0aec293dbe1318eaabfd4d53eba418d618f31aae6ef
|
from basesynapse import BaseSynapse
import numpy as np
import pycuda.gpuarray as garray
from pycuda.tools import dtype_to_ctype
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
cuda_src = """
__global__ void alpha_synapse(
int num,
%(type)s dt,
int *spike,
int *Pre,
%(type)s *Ar,
%(type)s *Ad,
%(type)s *Gmax,
%(type)s *a0,
%(type)s *a1,
%(type)s *a2,
%(type)s *cond )
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int tot_threads = gridDim.x * blockDim.x;
int pre;
%(type)s ar,ad,gmax;
%(type)s old_a[3];
%(type)s new_a[3];
for( int i=tid; i<num; i+=tot_threads ){
// copy data from global memory to register
ar = Ar[i];
ad = Ad[i];
pre = Pre[i];
gmax = Gmax[i];
old_a[0] = a0[i];
old_a[1] = a1[i];
old_a[2] = a2[i];
// update the alpha function
new_a[0] = fmax( 0., old_a[0] + dt*old_a[1] );
new_a[1] = old_a[1] + dt*old_a[2];
if( spike[pre] )
new_a[1] += ar*ad;
new_a[2] = -( ar+ad )*old_a[1] - ar*ad*old_a[0];
// copy data from register to the global memory
a0[i] = new_a[0];
a1[i] = new_a[1];
a2[i] = new_a[2];
cond[i] = new_a[0]*gmax;
}
return;
}
"""
class AlphaSynapse(BaseSynapse):
def __init__( self, s_dict, synapse_state, dt, debug=False):
self.debug = debug
self.dt = dt
self.num = len( s_dict['id'] )
self.pre = garray.to_gpu( np.asarray( s_dict['pre'], dtype=np.int32 ))
self.ar = garray.to_gpu( np.asarray( s_dict['ar'], dtype=np.float64 ))
self.ad = garray.to_gpu( np.asarray( s_dict['ad'], dtype=np.float64 ))
self.gmax = garray.to_gpu( np.asarray( s_dict['gmax'], dtype=np.float64 ))
self.a0 = garray.zeros( (self.num,), dtype=np.float64 )
self.a1 = garray.zeros( (self.num,), dtype=np.float64 )
self.a2 = garray.zeros( (self.num,), dtype=np.float64 )
self.cond = synapse_state
self.update = self.get_gpu_kernel()
@property
def synapse_class(self): return int(0)
def update_state(self, buffer, st = None):
self.update.prepared_async_call(
self.gpu_grid,\
self.gpu_block,\
st,\
self.num,\
self.dt,\
buffer.spike_buffer.gpudata,\
self.pre.gpudata,\
self.ar.gpudata,\
self.ad.gpudata,\
self.gmax.gpudata,\
self.a0.gpudata,\
self.a1.gpudata,\
self.a2.gpudata,\
self.cond)
def get_gpu_kernel(self):
self.gpu_block = (128,1,1)
self.gpu_grid = (min( 6*cuda.Context.get_device().MULTIPROCESSOR_COUNT,\
(self.num-1)/self.gpu_block[0] + 1), 1)
# cuda_src = open('./alpha_synapse.cu','r')
mod = SourceModule( \
cuda_src % {"type": dtype_to_ctype(np.float64)},\
options=["--ptxas-options=-v"])
func = mod.get_function("alpha_synapse")
func.prepare('idPPPPPPPPP')# [ np.int32, # syn_num
# np.float64, # dt
# np.intp, # spike list
# np.intp, # pre-synaptic neuron list
# np.intp, # ar array
# np.intp, # ad array
# np.intp, # gmax array
# np.intp, # a0 array
# np.intp, # a1 array
# np.intp, # a2 array
# np.intp ] ) # cond array
return func
|
cerrno/neurokernel
|
neurokernel/LPU/synapses/AlphaSynapse.py
|
Python
|
bsd-3-clause
| 3,696
|
[
"NEURON"
] |
4156b595165fb7611a63949244068a182205ba37d45b39ed12f9471caf38957c
|
## \file
## \ingroup tutorial_roofit
## \notebook
## Multidimensional models: complete example with use of conditional pdf with per-event errors
##
## \macro_code
##
## \date February 2018
## \authors Clemens Lange, Wouter Verkerke (C++ version)
import ROOT
# B-physics pdf with per-event Gaussian resolution
# ----------------------------------------------------------------------------------------------
# Observables
dt = ROOT.RooRealVar("dt", "dt", -10, 10)
dterr = ROOT.RooRealVar("dterr", "per-event error on dt", 0.01, 10)
# Build a gaussian resolution model scaled by the per-error =
# gauss(dt,bias,sigma*dterr)
bias = ROOT.RooRealVar("bias", "bias", 0, -10, 10)
sigma = ROOT.RooRealVar(
"sigma", "per-event error scale factor", 1, 0.1, 10)
gm = ROOT.RooGaussModel(
"gm1", "gauss model scaled bt per-event error", dt, bias, sigma, dterr)
# Construct decay(dt) (x) gauss1(dt|dterr)
tau = ROOT.RooRealVar("tau", "tau", 1.548)
decay_gm = ROOT.RooDecay("decay_gm", "decay", dt,
tau, gm, ROOT.RooDecay.DoubleSided)
# Construct fake 'external' data with per-event error
# ------------------------------------------------------------------------------------------------------
# Use landau pdf to get somewhat realistic distribution with long tail
pdfDtErr = ROOT.RooLandau("pdfDtErr", "pdfDtErr", dterr, ROOT.RooFit.RooConst(
1), ROOT.RooFit.RooConst(0.25))
expDataDterr = pdfDtErr.generate(ROOT.RooArgSet(dterr), 10000)
# Sample data from conditional decay_gm(dt|dterr)
# ---------------------------------------------------------------------------------------------
# Specify external dataset with dterr values to use decay_dm as
# conditional pdf
data = decay_gm.generate(ROOT.RooArgSet(
dt), ROOT.RooFit.ProtoData(expDataDterr))
# Fit conditional decay_dm(dt|dterr)
# ---------------------------------------------------------------------
# Specify dterr as conditional observable
decay_gm.fitTo(data, ROOT.RooFit.ConditionalObservables(
ROOT.RooArgSet(dterr)))
# Plot conditional decay_dm(dt|dterr)
# ---------------------------------------------------------------------
# Make two-dimensional plot of conditional pdf in (dt,dterr)
hh_decay = decay_gm.createHistogram("hh_decay", dt, ROOT.RooFit.Binning(
50), ROOT.RooFit.YVar(dterr, ROOT.RooFit.Binning(50)))
hh_decay.SetLineColor(ROOT.kBlue)
# Plot decay_gm(dt|dterr) at various values of dterr
frame = dt.frame(ROOT.RooFit.Title(
"Slices of decay(dt|dterr) at various dterr"))
for ibin in range(0, 100, 20):
dterr.setBin(ibin)
decay_gm.plotOn(frame, ROOT.RooFit.Normalization(5.))
# Make projection of data an dt
frame2 = dt.frame(ROOT.RooFit.Title("Projection of decay(dt|dterr) on dt"))
data.plotOn(frame2)
# Make projection of decay(dt|dterr) on dt.
#
# Instead of integrating out dterr, a weighted average of curves
# at values dterr_i as given in the external dataset.
# (The kTRUE argument bins the data before projection to speed up the process)
decay_gm.plotOn(frame2, ROOT.RooFit.ProjWData(expDataDterr, ROOT.kTRUE))
# Draw all frames on canvas
c = ROOT.TCanvas("rf306_condpereventerrors",
"rf306_condperventerrors", 1200, 400)
c.Divide(3)
c.cd(1)
ROOT.gPad.SetLeftMargin(0.20)
hh_decay.GetZaxis().SetTitleOffset(2.5)
hh_decay.Draw("surf")
c.cd(2)
ROOT.gPad.SetLeftMargin(0.15)
frame.GetYaxis().SetTitleOffset(1.6)
frame.Draw()
c.cd(3)
ROOT.gPad.SetLeftMargin(0.15)
frame2.GetYaxis().SetTitleOffset(1.6)
frame2.Draw()
c.SaveAs("rf306_condpereventerrors.png")
|
root-mirror/root
|
tutorials/roofit/rf306_condpereventerrors.py
|
Python
|
lgpl-2.1
| 3,522
|
[
"Gaussian"
] |
ee1c2935a3336882d9f9a79c35e379e78541b85efe64df67c9070d75b84bd594
|
#
# Copyright (C) 2013-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import numpy as np
import espressomd
# Dihedral interaction needs more rigorous tests.
# The geometry checked here is rather simple and special.
# I also found that as the dihedral angle approaches to 0, the simulation
# values deviate from the analytic values by roughly 10%.
def rotate_vector(v, k, phi):
"""Rotates vector v around unit vector k by angle phi.
Uses Rodrigues' rotation formula."""
vrot = v * np.cos(phi) + np.cross(k, v) * \
np.sin(phi) + k * np.dot(k, v) * (1.0 - np.cos(phi))
return vrot
def dihedral_potential(k, phi, n, phase):
if phi == -1:
return 0
else:
return k * (1 - np.cos(n * phi - phase))
def dihedral_force(k, n, phase, p1, p2, p3, p4):
v12 = p2 - p1
v23 = p3 - p2
v34 = p4 - p3
v12Xv23 = np.cross(v12, v23)
l_v12Xv23 = np.linalg.norm(v12Xv23)
v23Xv34 = np.cross(v23, v34)
l_v23Xv34 = np.linalg.norm(v23Xv34)
# if dihedral angle is not defined, no forces
if l_v12Xv23 <= 1e-8 or l_v23Xv34 <= 1e-8:
return 0, 0, 0
else:
cosphi = np.abs(np.dot(v12Xv23, v23Xv34)) / (l_v12Xv23 * l_v23Xv34)
phi = np.arccos(cosphi)
f1 = (v23Xv34 - cosphi * v12Xv23) / l_v12Xv23
f4 = (v12Xv23 - cosphi * v23Xv34) / l_v23Xv34
v23Xf1 = np.cross(v23, f1)
v23Xf4 = np.cross(v23, f4)
v34Xf4 = np.cross(v34, f4)
v12Xf1 = np.cross(v12, f1)
coeff = -k * n * np.sin(n * phi - phase) / np.sin(phi)
force1 = coeff * v23Xf1
force2 = coeff * (v34Xf4 - v12Xf1 - v23Xf1)
force3 = coeff * (v12Xf1 - v23Xf4 - v34Xf4)
return force1, force2, force3
class InteractionsBondedTest(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
np.random.seed(seed=42)
box_l = 10.
start_pos = [5., 5., 5.]
axis = np.array([1., 0., 0.])
axis /= np.linalg.norm(axis)
rel_pos_1 = np.array([0., 1., 0.])
rel_pos_2 = np.array([0., 0., 1.])
def setUp(self):
self.system.box_l = [self.box_l] * 3
self.system.cell_system.skin = 0.4
self.system.time_step = .1
self.system.part.add(pos=4 * [self.start_pos], type=4 * [0])
def tearDown(self):
self.system.part.clear()
# Analytical Expression
def dihedral_angle(self, p1, p2, p3, p4):
"""
Calculate the dihedral angle phi based on particles' position p1, p2, p3, p4.
"""
v12 = p2 - p1
v23 = p3 - p2
v34 = p4 - p3
v12Xv23 = np.cross(v12, v23)
l_v12Xv23 = np.linalg.norm(v12Xv23)
v23Xv34 = np.cross(v23, v34)
l_v23Xv34 = np.linalg.norm(v23Xv34)
# if dihedral angle is not defined, phi := -1.
if l_v12Xv23 <= 1e-8 or l_v23Xv34 <= 1e-8:
return -1
else:
cosphi = np.abs(np.dot(v12Xv23, v23Xv34)) / (
l_v12Xv23 * l_v23Xv34)
return np.arccos(cosphi)
# Test Dihedral Angle
def test_dihedral(self):
p0, p1, p2, p3 = self.system.part.all()
dh_k = 1
dh_phase = np.pi / 6
dh_n = 1
dh = espressomd.interactions.Dihedral(
bend=dh_k, mult=dh_n, phase=dh_phase)
self.system.bonded_inter.add(dh)
p1.add_bond((dh, p0, p2, p3))
p2.pos = p1.pos + [1, 0, 0]
N = 111
d_phi = np.pi / (N * 4)
for i in range(N):
p0.pos = p1.pos + \
rotate_vector(self.rel_pos_1, self.axis, i * d_phi)
p3.pos = p2.pos + \
rotate_vector(self.rel_pos_2, self.axis, -i * d_phi)
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["bonded"]
phi = self.dihedral_angle(p0.pos, p1.pos, p2.pos, p3.pos)
E_ref = dihedral_potential(dh_k, phi, dh_n, dh_phase)
# Calculate forces
f2_sim = p1.f
_, f2_ref, _ = dihedral_force(dh_k, dh_n, dh_phase,
p0.pos, p1.pos, p2.pos, p3.pos)
# Check that energies match, ...
np.testing.assert_almost_equal(E_sim, E_ref)
# and has correct value.
f2_sim_copy = np.copy(f2_sim)
np.testing.assert_almost_equal(f2_sim_copy, f2_ref)
# Test Tabulated Dihedral Angle
@utx.skipIfMissingFeatures(["TABULATED"])
def test_tabulated_dihedral(self):
p0, p1, p2, p3 = self.system.part.all()
N = 111
d_phi = 2 * np.pi / N
# tabulated values for the range [0, 2*pi]
tab_energy = [np.cos(i * d_phi) for i in range(N + 1)]
tab_force = [np.cos(i * d_phi) for i in range(N + 1)]
dihedral_tabulated = espressomd.interactions.TabulatedDihedral(
energy=tab_energy, force=tab_force)
self.system.bonded_inter.add(dihedral_tabulated)
p1.add_bond((dihedral_tabulated, p0, p2, p3))
p2.pos = p1.pos + [1, 0, 0]
# check stored parameters
interaction_id = len(self.system.bonded_inter) - 1
tabulated = self.system.bonded_inter[interaction_id]
np.testing.assert_allclose(tabulated.params['force'], tab_force)
np.testing.assert_allclose(tabulated.params['energy'], tab_energy)
np.testing.assert_almost_equal(tabulated.params['min'], 0.)
np.testing.assert_almost_equal(tabulated.params['max'], 2 * np.pi)
# measure at half the angular resolution to observe interpolation
for i in range(2 * N - 1):
# increase dihedral angle by d_phi (phi ~ 0 at i = 0)
p0.pos = p1.pos + \
rotate_vector(self.rel_pos_1, self.axis, -i * d_phi / 4)
p3.pos = p2.pos + \
rotate_vector(self.rel_pos_1, self.axis, i * d_phi / 4)
self.system.integrator.run(recalc_forces=True, steps=0)
# Calculate energies
E_sim = self.system.analysis.energy()["bonded"]
# Get tabulated values
j = i // 2
if i % 2 == 0:
E_ref = tab_energy[j]
else:
E_ref = (tab_energy[j] + tab_energy[j + 1]) / 2.0
# Check that energies match, ...
np.testing.assert_almost_equal(E_sim, E_ref)
if __name__ == '__main__':
ut.main()
|
espressomd/espresso
|
testsuite/python/interactions_dihedral.py
|
Python
|
gpl-3.0
| 7,112
|
[
"ESPResSo"
] |
13acd50dff9eb943ce122dc0d69c5d9c12ab840cd51373e4e3490dbd337b12de
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""
| Database (Truhlar) of non-hydrogen-transfer barrier height reactions.
| Geometries and Reaction energies from Truhlar and coworkers at site http://t1.chem.umn.edu/misc/database_group/database_therm_bh/non_H.htm.
- **cp** ``'off'``
- **rlxd** ``'off'``
- **subset**
- ``'small'``
- ``'large'``
"""
import re
import qcdb
# <<< NHTBH Database Module >>>
dbse = 'NHTBH'
isOS = 'true'
# <<< Database Members >>>
HRXN = range(1, 39)
HRXN_SM = [3, 4, 31, 32]
HRXN_LG = [36]
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
ACTV = {} # order of active reagents per reaction
ACTV['%s-%s' % (dbse, 1)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'N2O' ),
'%s-%s-reagent' % (dbse, 'N2OHts') ]
RXNM['%s-%s' % (dbse, 1)] = dict(zip(ACTV['%s-%s' % (dbse, 1)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 2)] = ['%s-%s-reagent' % (dbse, 'OH' ),
'%s-%s-reagent' % (dbse, 'N2' ),
'%s-%s-reagent' % (dbse, 'N2OHts') ]
RXNM['%s-%s' % (dbse, 2)] = dict(zip(ACTV['%s-%s' % (dbse, 2)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 3)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'HFHts') ]
RXNM['%s-%s' % (dbse, 3)] = dict(zip(ACTV['%s-%s' % (dbse, 3)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 4)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'HFHts') ]
RXNM['%s-%s' % (dbse, 4)] = dict(zip(ACTV['%s-%s' % (dbse, 4)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 5)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HCl' ),
'%s-%s-reagent' % (dbse, 'HClHts') ]
RXNM['%s-%s' % (dbse, 5)] = dict(zip(ACTV['%s-%s' % (dbse, 5)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 6)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'HCl' ),
'%s-%s-reagent' % (dbse, 'HClHts') ]
RXNM['%s-%s' % (dbse, 6)] = dict(zip(ACTV['%s-%s' % (dbse, 6)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 7)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'HFCH3ts') ]
RXNM['%s-%s' % (dbse, 7)] = dict(zip(ACTV['%s-%s' % (dbse, 7)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 8)] = ['%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'HFCH3ts') ]
RXNM['%s-%s' % (dbse, 8)] = dict(zip(ACTV['%s-%s' % (dbse, 8)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 9)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'F2' ),
'%s-%s-reagent' % (dbse, 'HF2ts') ]
RXNM['%s-%s' % (dbse, 9)] = dict(zip(ACTV['%s-%s' % (dbse, 9)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 10)] = ['%s-%s-reagent' % (dbse, 'HF' ),
'%s-%s-reagent' % (dbse, 'F' ),
'%s-%s-reagent' % (dbse, 'HF2ts') ]
RXNM['%s-%s' % (dbse, 10)] = dict(zip(ACTV['%s-%s' % (dbse, 10)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 11)] = ['%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'ClF' ),
'%s-%s-reagent' % (dbse, 'CH3FClts') ]
RXNM['%s-%s' % (dbse, 11)] = dict(zip(ACTV['%s-%s' % (dbse, 11)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 12)] = ['%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'Cl' ),
'%s-%s-reagent' % (dbse, 'CH3FClts') ]
RXNM['%s-%s' % (dbse, 12)] = dict(zip(ACTV['%s-%s' % (dbse, 12)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 13)] = ['%s-%s-reagent' % (dbse, 'F_anion'),
'%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'FCH3Fts') ]
RXNM['%s-%s' % (dbse, 13)] = dict(zip(ACTV['%s-%s' % (dbse, 13)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 14)] = ['%s-%s-reagent' % (dbse, 'F_anion'),
'%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'FCH3Fts') ]
RXNM['%s-%s' % (dbse, 14)] = dict(zip(ACTV['%s-%s' % (dbse, 14)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 15)] = ['%s-%s-reagent' % (dbse, 'FCH3Fcomp'),
'%s-%s-reagent' % (dbse, 'FCH3Fts' ) ]
RXNM['%s-%s' % (dbse, 15)] = dict(zip(ACTV['%s-%s' % (dbse, 15)], [-1, +1]))
ACTV['%s-%s' % (dbse, 16)] = ['%s-%s-reagent' % (dbse, 'FCH3Fcomp'),
'%s-%s-reagent' % (dbse, 'FCH3Fts' ) ]
RXNM['%s-%s' % (dbse, 16)] = dict(zip(ACTV['%s-%s' % (dbse, 16)], [-1, +1]))
ACTV['%s-%s' % (dbse, 17)] = ['%s-%s-reagent' % (dbse, 'Cl_anion' ),
'%s-%s-reagent' % (dbse, 'CH3Cl' ),
'%s-%s-reagent' % (dbse, 'ClCH3Clts') ]
RXNM['%s-%s' % (dbse, 17)] = dict(zip(ACTV['%s-%s' % (dbse, 17)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 18)] = ['%s-%s-reagent' % (dbse, 'Cl_anion' ),
'%s-%s-reagent' % (dbse, 'CH3Cl' ),
'%s-%s-reagent' % (dbse, 'ClCH3Clts') ]
RXNM['%s-%s' % (dbse, 18)] = dict(zip(ACTV['%s-%s' % (dbse, 18)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 19)] = ['%s-%s-reagent' % (dbse, 'ClCH3Clcomp'),
'%s-%s-reagent' % (dbse, 'ClCH3Clts' ) ]
RXNM['%s-%s' % (dbse, 19)] = dict(zip(ACTV['%s-%s' % (dbse, 19)], [-1, +1]))
ACTV['%s-%s' % (dbse, 20)] = ['%s-%s-reagent' % (dbse, 'ClCH3Clcomp'),
'%s-%s-reagent' % (dbse, 'ClCH3Clts' ) ]
RXNM['%s-%s' % (dbse, 20)] = dict(zip(ACTV['%s-%s' % (dbse, 20)], [-1, +1]))
ACTV['%s-%s' % (dbse, 21)] = ['%s-%s-reagent' % (dbse, 'F_anion' ),
'%s-%s-reagent' % (dbse, 'CH3Cl' ),
'%s-%s-reagent' % (dbse, 'FCH3Clts') ]
RXNM['%s-%s' % (dbse, 21)] = dict(zip(ACTV['%s-%s' % (dbse, 21)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 22)] = ['%s-%s-reagent' % (dbse, 'CH3F'),
'%s-%s-reagent' % (dbse, 'Cl_anion'),
'%s-%s-reagent' % (dbse, 'FCH3Clts') ]
RXNM['%s-%s' % (dbse, 22)] = dict(zip(ACTV['%s-%s' % (dbse, 22)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 23)] = ['%s-%s-reagent' % (dbse, 'FCH3Clcomp1'),
'%s-%s-reagent' % (dbse, 'FCH3Clts' ) ]
RXNM['%s-%s' % (dbse, 23)] = dict(zip(ACTV['%s-%s' % (dbse, 23)], [-1, +1]))
ACTV['%s-%s' % (dbse, 24)] = ['%s-%s-reagent' % (dbse, 'FCH3Clcomp2'),
'%s-%s-reagent' % (dbse, 'FCH3Clts' ) ]
RXNM['%s-%s' % (dbse, 24)] = dict(zip(ACTV['%s-%s' % (dbse, 24)], [-1, +1]))
ACTV['%s-%s' % (dbse, 25)] = ['%s-%s-reagent' % (dbse, 'OH_anion'),
'%s-%s-reagent' % (dbse, 'CH3F' ),
'%s-%s-reagent' % (dbse, 'HOCH3Fts') ]
RXNM['%s-%s' % (dbse, 25)] = dict(zip(ACTV['%s-%s' % (dbse, 25)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 26)] = ['%s-%s-reagent' % (dbse, 'CH3OH' ),
'%s-%s-reagent' % (dbse, 'F_anion' ),
'%s-%s-reagent' % (dbse, 'HOCH3Fts') ]
RXNM['%s-%s' % (dbse, 26)] = dict(zip(ACTV['%s-%s' % (dbse, 26)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 27)] = ['%s-%s-reagent' % (dbse, 'HOCH3Fcomp2'),
'%s-%s-reagent' % (dbse, 'HOCH3Fts' ) ]
RXNM['%s-%s' % (dbse, 27)] = dict(zip(ACTV['%s-%s' % (dbse, 27)], [-1, +1]))
ACTV['%s-%s' % (dbse, 28)] = ['%s-%s-reagent' % (dbse, 'HOCH3Fcomp1'),
'%s-%s-reagent' % (dbse, 'HOCH3Fts' ) ]
RXNM['%s-%s' % (dbse, 28)] = dict(zip(ACTV['%s-%s' % (dbse, 28)], [-1, +1]))
ACTV['%s-%s' % (dbse, 29)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'N2' ),
'%s-%s-reagent' % (dbse, 'HN2ts') ]
RXNM['%s-%s' % (dbse, 29)] = dict(zip(ACTV['%s-%s' % (dbse, 29)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 30)] = ['%s-%s-reagent' % (dbse, 'HN2' ),
'%s-%s-reagent' % (dbse, 'HN2ts') ]
RXNM['%s-%s' % (dbse, 30)] = dict(zip(ACTV['%s-%s' % (dbse, 30)], [-1, +1]))
ACTV['%s-%s' % (dbse, 31)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'CO' ),
'%s-%s-reagent' % (dbse, 'HCOts') ]
RXNM['%s-%s' % (dbse, 31)] = dict(zip(ACTV['%s-%s' % (dbse, 31)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 32)] = ['%s-%s-reagent' % (dbse, 'HCO' ),
'%s-%s-reagent' % (dbse, 'HCOts') ]
RXNM['%s-%s' % (dbse, 32)] = dict(zip(ACTV['%s-%s' % (dbse, 32)], [-1, +1]))
ACTV['%s-%s' % (dbse, 33)] = ['%s-%s-reagent' % (dbse, 'H' ),
'%s-%s-reagent' % (dbse, 'C2H4' ),
'%s-%s-reagent' % (dbse, 'C2H5ts') ]
RXNM['%s-%s' % (dbse, 33)] = dict(zip(ACTV['%s-%s' % (dbse, 33)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 34)] = ['%s-%s-reagent' % (dbse, 'C2H5' ),
'%s-%s-reagent' % (dbse, 'C2H5ts') ]
RXNM['%s-%s' % (dbse, 34)] = dict(zip(ACTV['%s-%s' % (dbse, 34)], [-1, +1]))
ACTV['%s-%s' % (dbse, 35)] = ['%s-%s-reagent' % (dbse, 'CH3' ),
'%s-%s-reagent' % (dbse, 'C2H4' ),
'%s-%s-reagent' % (dbse, 'C3H7ts') ]
RXNM['%s-%s' % (dbse, 35)] = dict(zip(ACTV['%s-%s' % (dbse, 35)], [-1, -1, +1]))
ACTV['%s-%s' % (dbse, 36)] = ['%s-%s-reagent' % (dbse, 'C3H7' ),
'%s-%s-reagent' % (dbse, 'C3H7ts') ]
RXNM['%s-%s' % (dbse, 36)] = dict(zip(ACTV['%s-%s' % (dbse, 36)], [-1, +1]))
ACTV['%s-%s' % (dbse, 37)] = ['%s-%s-reagent' % (dbse, 'HCN' ),
'%s-%s-reagent' % (dbse, 'HCNts') ]
RXNM['%s-%s' % (dbse, 37)] = dict(zip(ACTV['%s-%s' % (dbse, 37)], [-1, +1]))
ACTV['%s-%s' % (dbse, 38)] = ['%s-%s-reagent' % (dbse, 'HNC' ),
'%s-%s-reagent' % (dbse, 'HCNts') ]
RXNM['%s-%s' % (dbse, 38)] = dict(zip(ACTV['%s-%s' % (dbse, 38)], [-1, +1]))
# <<< Reference Values >>>
BIND = {}
BIND['%s-%s' % (dbse, 1)] = 18.14
BIND['%s-%s' % (dbse, 2)] = 83.22
BIND['%s-%s' % (dbse, 3)] = 42.18
BIND['%s-%s' % (dbse, 4)] = 42.18
BIND['%s-%s' % (dbse, 5)] = 18.00
BIND['%s-%s' % (dbse, 6)] = 18.00
BIND['%s-%s' % (dbse, 7)] = 30.38
BIND['%s-%s' % (dbse, 8)] = 57.02
BIND['%s-%s' % (dbse, 9)] = 2.27
BIND['%s-%s' % (dbse, 10)] = 106.18
BIND['%s-%s' % (dbse, 11)] = 7.43
BIND['%s-%s' % (dbse, 12)] = 60.17
BIND['%s-%s' % (dbse, 13)] = -0.34
BIND['%s-%s' % (dbse, 14)] = -0.34
BIND['%s-%s' % (dbse, 15)] = 13.38
BIND['%s-%s' % (dbse, 16)] = 13.38
BIND['%s-%s' % (dbse, 17)] = 3.10
BIND['%s-%s' % (dbse, 18)] = 3.10
BIND['%s-%s' % (dbse, 19)] = 13.61
BIND['%s-%s' % (dbse, 20)] = 13.61
BIND['%s-%s' % (dbse, 21)] = -12.54
BIND['%s-%s' % (dbse, 22)] = 20.11
BIND['%s-%s' % (dbse, 23)] = 2.89
BIND['%s-%s' % (dbse, 24)] = 29.62
BIND['%s-%s' % (dbse, 25)] = -2.78
BIND['%s-%s' % (dbse, 26)] = 17.33
BIND['%s-%s' % (dbse, 27)] = 10.96
BIND['%s-%s' % (dbse, 28)] = 47.20
BIND['%s-%s' % (dbse, 29)] = 14.69
BIND['%s-%s' % (dbse, 30)] = 10.72
BIND['%s-%s' % (dbse, 31)] = 3.17
BIND['%s-%s' % (dbse, 32)] = 22.68
BIND['%s-%s' % (dbse, 33)] = 1.72
BIND['%s-%s' % (dbse, 34)] = 41.75
BIND['%s-%s' % (dbse, 35)] = 6.85
BIND['%s-%s' % (dbse, 36)] = 32.97
BIND['%s-%s' % (dbse, 37)] = 48.16
BIND['%s-%s' % (dbse, 38)] = 33.11
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 1)] = '{ H + N2O <-- [HN2O] } --> OH + N2'
TAGL['%s-%s' % (dbse, 2)] = 'H + N2O <-- { [HN2O] --> OH + N2 }'
TAGL['%s-%s' % (dbse, 3)] = '{ H + FH <-- [HFH] } --> HF + H'
TAGL['%s-%s' % (dbse, 4)] = 'H + FH <-- { [HFH] --> HF + H }'
TAGL['%s-%s' % (dbse, 5)] = '{ H + ClH <-- [HClH] } --> HCl + H'
TAGL['%s-%s' % (dbse, 6)] = 'H + ClH <-- { [HClH] --> HCl + H }'
TAGL['%s-%s' % (dbse, 7)] = '{ H + FCH3 <-- [HFCH3] } --> HF + CH3'
TAGL['%s-%s' % (dbse, 8)] = 'H + FCH3 <-- { [HFCH3] --> HF + CH3 }'
TAGL['%s-%s' % (dbse, 9)] = '{ H + F2 <-- [HF2] } --> HF + F'
TAGL['%s-%s' % (dbse, 10)] = 'H + F2 <-- { [HF2] --> HF + F }'
TAGL['%s-%s' % (dbse, 11)] = '{ CH3 + FCl <-- [CH3FCl] } --> CH3F + Cl'
TAGL['%s-%s' % (dbse, 12)] = 'CH3 + FCl <-- { [CH3FCl] --> CH3F + Cl }'
TAGL['%s-%s' % (dbse, 13)] = '{ F- + CH3F <-- [FCH3F-] } --> FCH3 + F-'
TAGL['%s-%s' % (dbse, 14)] = 'F- + CH3F <-- { [FCH3F-] --> FCH3 + F- }'
TAGL['%s-%s' % (dbse, 15)] = '{ F- ... CH3F <-- [FCH3F-] } --> FCH3 ... F-'
TAGL['%s-%s' % (dbse, 16)] = 'F- ... CH3F <-- { [FCH3F-] --> FCH3 ... F- }'
TAGL['%s-%s' % (dbse, 17)] = '{ Cl- + CH3Cl <-- [ClCH3Cl-] } --> ClCH3 + Cl-'
TAGL['%s-%s' % (dbse, 18)] = 'Cl- + CH3Cl <-- { [ClCH3Cl-] --> ClCH3 + Cl- }'
TAGL['%s-%s' % (dbse, 19)] = '{ Cl- ... CH3Cl <-- [ClCH3Cl-] } --> ClCH3 ... Cl-'
TAGL['%s-%s' % (dbse, 20)] = 'Cl- ... CH3Cl <-- { [ClCH3Cl-] --> ClCH3 ... Cl- }'
TAGL['%s-%s' % (dbse, 21)] = '{ F- + CH3Cl <-- [FCH3Cl-] } --> FCH3 + Cl-'
TAGL['%s-%s' % (dbse, 22)] = 'F- + CH3Cl <-- { [FCH3Cl-] --> FCH3 + Cl- }'
TAGL['%s-%s' % (dbse, 23)] = '{ F- ... CH3Cl <-- [FCH3Cl-] } --> FCH3 ... Cl-'
TAGL['%s-%s' % (dbse, 24)] = 'F- ... CH3Cl <-- { [FCH3Cl-] --> FCH3 ... Cl- }'
TAGL['%s-%s' % (dbse, 25)] = '{ OH- + CH3F <-- [OHCH3F-] } --> HOCH3 + F-'
TAGL['%s-%s' % (dbse, 26)] = 'OH- + CH3F <-- { [OHCH3F-] --> HOCH3 + F- }'
TAGL['%s-%s' % (dbse, 27)] = '{ OH- ... CH3F <-- [OHCH3F-] } --> HOCH3 ... F-'
TAGL['%s-%s' % (dbse, 28)] = 'OH- ... CH3F <-- { [OHCH3F-] --> HOCH3 ... F- }'
TAGL['%s-%s' % (dbse, 29)] = '{ H + N2 <-- [HN2] } --> HN2'
TAGL['%s-%s' % (dbse, 30)] = 'H + N2 <-- { [HN2] --> HN2 }'
TAGL['%s-%s' % (dbse, 31)] = '{ H + CO <-- [HCO] } --> HCO'
TAGL['%s-%s' % (dbse, 32)] = 'H + CO <-- { [HCO] --> HCO }'
TAGL['%s-%s' % (dbse, 33)] = '{ H + C2H4 <-- [HC2H4] } --> CH3CH2'
TAGL['%s-%s' % (dbse, 34)] = 'H + C2H4 <-- { [HC2H4] --> CH3CH2 }'
TAGL['%s-%s' % (dbse, 35)] = '{ CH3 + C2H4 <-- [CH3C2H4] } --> CH3CH2CH2'
TAGL['%s-%s' % (dbse, 36)] = 'CH3 + C2H4 <-- { [CH3C2H4] --> CH3CH2CH2 }'
TAGL['%s-%s' % (dbse, 37)] = '{ HCN <-- [HCN] } --> HNC'
TAGL['%s-%s' % (dbse, 38)] = 'HCN <-- { [HCN] --> HNC }'
TAGL['%s-%s-reagent' % (dbse, 'C2H4' )] = 'Ethene'
TAGL['%s-%s-reagent' % (dbse, 'C2H5ts' )] = 'Transition State of H + C2H4 <--> CH3CH2'
TAGL['%s-%s-reagent' % (dbse, 'C2H5' )] = 'C2H5'
TAGL['%s-%s-reagent' % (dbse, 'C3H7ts' )] = 'Transition State of CH3 + C2H4 <--> CH3CH2CH2'
TAGL['%s-%s-reagent' % (dbse, 'C3H7' )] = 'C3H7'
TAGL['%s-%s-reagent' % (dbse, 'CH3Cl' )] = 'CH3Cl'
TAGL['%s-%s-reagent' % (dbse, 'CH3FClts' )] = 'Transition State of CH3 + FCL <--> CH3F + Cl'
TAGL['%s-%s-reagent' % (dbse, 'CH3F' )] = 'CH3F'
TAGL['%s-%s-reagent' % (dbse, 'CH3OH' )] = 'Methanol'
TAGL['%s-%s-reagent' % (dbse, 'CH3' )] = 'CH3'
TAGL['%s-%s-reagent' % (dbse, 'ClCH3Clcomp')] = 'Complex of Cl- + CH3Cl'
TAGL['%s-%s-reagent' % (dbse, 'ClCH3Clts' )] = 'Transition State of Cl- + CH3Cl <--> ClCH3 + Cl-'
TAGL['%s-%s-reagent' % (dbse, 'ClF' )] = 'ClF'
TAGL['%s-%s-reagent' % (dbse, 'Cl_anion' )] = 'Chloride Anion'
TAGL['%s-%s-reagent' % (dbse, 'Cl' )] = 'Chlorine Atom'
TAGL['%s-%s-reagent' % (dbse, 'CO' )] = 'Carbon Monoxide'
TAGL['%s-%s-reagent' % (dbse, 'F2' )] = 'Fluorine Molecule'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Clcomp1')] = 'Complex of F- + CH3Cl'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Clcomp2')] = 'Complex of FCH3 + Cl-'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Clts' )] = 'Transition State of F- + CH3Cl <--> FCH3 + Cl-'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Fcomp' )] = 'Complex of F- + CH3F'
TAGL['%s-%s-reagent' % (dbse, 'FCH3Fts' )] = 'Transition State of F- CH3F <--> FCH3 + F-'
TAGL['%s-%s-reagent' % (dbse, 'F_anion' )] = 'Fluoride Anion'
TAGL['%s-%s-reagent' % (dbse, 'F' )] = 'Fluorine Atom'
TAGL['%s-%s-reagent' % (dbse, 'HClHts' )] = 'Transition State of H + ClH <--> HCl + H'
TAGL['%s-%s-reagent' % (dbse, 'HCl' )] = 'Hydrogen Chloride'
TAGL['%s-%s-reagent' % (dbse, 'HCNts' )] = 'Transition State of HCN <--> HNC'
TAGL['%s-%s-reagent' % (dbse, 'HCN' )] = 'Hydrogen Cyanide'
TAGL['%s-%s-reagent' % (dbse, 'HCOts' )] = 'Transition State of H + CO <--> HCO'
TAGL['%s-%s-reagent' % (dbse, 'HCO' )] = 'HCO'
TAGL['%s-%s-reagent' % (dbse, 'HF2ts' )] = 'Transition State of H + F2 <--> HF + F'
TAGL['%s-%s-reagent' % (dbse, 'HFCH3ts' )] = 'Transition State of H + FCH3 <--> HF + CH3'
TAGL['%s-%s-reagent' % (dbse, 'HFHts' )] = 'Transition State of H + FH <--> HF + H'
TAGL['%s-%s-reagent' % (dbse, 'HF' )] = 'Hydrogen Fluoride'
TAGL['%s-%s-reagent' % (dbse, 'HN2ts' )] = 'Transition State of H + N2 <--> HN2'
TAGL['%s-%s-reagent' % (dbse, 'HN2' )] = 'HN2'
TAGL['%s-%s-reagent' % (dbse, 'HNC' )] = 'HNC'
TAGL['%s-%s-reagent' % (dbse, 'HOCH3Fcomp1')] = 'Complex of HOCH3 + F-'
TAGL['%s-%s-reagent' % (dbse, 'HOCH3Fcomp2')] = 'Complex of OH- + CH3F'
TAGL['%s-%s-reagent' % (dbse, 'HOCH3Fts' )] = 'Transition State of OH- + CH3F <--> HOCH3 + F-'
TAGL['%s-%s-reagent' % (dbse, 'H' )] = 'Hydrogen Atom'
TAGL['%s-%s-reagent' % (dbse, 'N2OHts' )] = 'Transition State of H + N2O <--> OH + N2'
TAGL['%s-%s-reagent' % (dbse, 'N2O' )] = 'N2O'
TAGL['%s-%s-reagent' % (dbse, 'N2' )] = 'Nitrogen Molecule'
TAGL['%s-%s-reagent' % (dbse, 'OH_anion' )] = 'Hydroxide Anion'
TAGL['%s-%s-reagent' % (dbse, 'OH' )] = 'OH'
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-reagent' % (dbse, 'C2H4')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 0.66559300
C 0.00000000 -0.00000000 -0.66559300
H 0.00000000 0.92149500 1.23166800
H 0.00000000 -0.92149500 1.23166800
H 0.00000000 0.92149500 -1.23166800
H 0.00000000 -0.92149500 -1.23166800
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C2H5ts')] = qcdb.Molecule("""
0 2
C -0.56787700 0.00005100 -0.21895800
C 0.75113900 -0.00003600 0.04193200
H -1.49388400 -0.00048800 1.53176500
H -1.10169100 0.92065100 -0.40862600
H -1.10202200 -0.92023400 -0.40911000
H 1.29912800 -0.92234400 0.17376300
H 1.29889900 0.92232500 0.17436300
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C2H5')] = qcdb.Molecule("""
0 2
C -0.25871900 -0.81682900 0.00000000
C -0.25098700 0.67419100 0.00000000
H 0.75883000 -1.22593900 0.00000000
H -0.75883000 -1.21386600 0.88341900
H -0.75883000 -1.21386600 -0.88341900
H -0.17002100 1.22593900 -0.92432000
H -0.17002100 1.22593900 0.92432000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C3H7ts')] = qcdb.Molecule("""
0 2
C -0.47213200 0.64593300 -0.00004300
C -1.38261700 -0.36388500 -0.00000200
H -0.23204400 1.16457500 -0.91726400
H -0.23234200 1.16475900 0.91716900
H -1.72712800 -0.80981000 0.92251900
H -1.72693600 -0.81013100 -0.92243500
C 1.61201500 -0.24218900 0.00003500
H 2.19518200 0.66867100 -0.00126900
H 1.58942300 -0.80961900 -0.91863200
H 1.59024500 -0.80759800 0.91996900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'C3H7')] = qcdb.Molecule("""
0 2
C 1.20844000 -0.28718900 0.00005700
C -0.06535900 0.57613200 -0.00005700
C -1.31478700 -0.23951800 -0.00001100
H 1.24136900 -0.92839500 0.88123400
H 1.24139400 -0.92858600 -0.88098000
H 2.10187100 0.33872700 0.00000000
H -0.04821800 1.22685100 -0.87708900
H -0.04827200 1.22703700 0.87683400
H -1.72914600 -0.61577100 0.92443500
H -1.72876300 -0.61641500 -0.92436900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3Cl')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -1.12588600
Cl 0.00000000 0.00000000 0.65683000
H 0.00000000 1.02799300 -1.47026400
H 0.89026800 -0.51399700 -1.47026400
H -0.89026800 -0.51399700 -1.47026400
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3FClts')] = qcdb.Molecule("""
0 2
Cl 1.45474900 -0.00123700 -0.00004000
F -0.32358700 0.00463100 0.00012400
C -2.38741800 -0.00214700 -0.00007300
H -2.49508600 -0.85536100 -0.64940400
H -2.49731300 -0.13867300 1.06313900
H -2.50153700 0.98626900 -0.41373400
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3F')] = qcdb.Molecule("""
0 1
C -0.63207400 0.00000100 -0.00000000
F 0.74911700 0.00000200 -0.00000200
H -0.98318200 -0.33848900 0.97262500
H -0.98322200 1.01155300 -0.19317200
H -0.98320300 -0.67308400 -0.77943700
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3OH')] = qcdb.Molecule("""
0 1
C -0.04642300 0.66306900 0.00000000
O -0.04642300 -0.75506300 0.00000000
H -1.08695600 0.97593800 0.00000000
H 0.86059200 -1.05703900 0.00000000
H 0.43814500 1.07159400 0.88953900
H 0.43814500 1.07159400 -0.88953900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CH3')] = qcdb.Molecule("""
0 2
C 0.00000000 0.00000000 0.00000000
H 1.07731727 0.00000000 0.00000000
H -0.53865863 0.93298412 0.00000000
H -0.53865863 -0.93298412 -0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'ClCH3Clcomp')] = qcdb.Molecule("""
-1 1
Cl 0.00000000 0.00000000 -2.38473500
C 0.00000000 0.00000000 -0.56633100
H 0.00000000 1.02506600 -0.22437900
H -0.88773400 -0.51253300 -0.22437900
H 0.88773400 -0.51253300 -0.22437900
Cl 0.00000000 0.00000000 2.62421300
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'ClCH3Clts')] = qcdb.Molecule("""
-1 1
Cl 2.32258100 -0.00013200 0.00014000
C -0.00008500 0.00049100 -0.00050900
H 0.00007700 -0.74429000 -0.76760500
H -0.00032000 -0.29144300 1.02802100
H 0.00008100 1.03721800 -0.26195900
Cl -2.32254200 -0.00012900 0.00013000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'ClF')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 0.00000000
Cl 1.63033021 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'Cl_anion')] = qcdb.Molecule("""
-1 1
Cl 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'Cl')] = qcdb.Molecule("""
0 2
Cl 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'CO')] = qcdb.Molecule("""
0 1
O 0.00000000 0.00000000 0.00000000
C 1.12960815 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'F2')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 0.00000000
F 1.39520410 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Clcomp1')] = qcdb.Molecule("""
-1 1
Cl 0.00000000 0.00000000 1.62313800
C 0.00000000 0.00000000 -0.22735800
H 0.00000000 1.02632100 -0.55514100
H 0.88882000 -0.51316000 -0.55514100
H -0.88882000 -0.51316000 -0.55514100
F 0.00000000 0.00000000 -2.72930800
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Clcomp2')] = qcdb.Molecule("""
-1 1
F 0.00000000 0.00000000 -2.64853900
C 0.00000000 0.00000000 -1.24017000
H 0.00000000 1.02471900 -0.88640600
H -0.88743200 -0.51235900 -0.88640600
H 0.88743200 -0.51235900 -0.88640600
Cl 0.00000000 0.00000000 1.99629900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Clts')] = qcdb.Molecule("""
-1 1
F 0.00000000 0.00000000 -2.53792900
C 0.00000000 0.00000000 -0.48837200
H 0.00000000 1.06208700 -0.61497200
H -0.91979500 -0.53104400 -0.61497200
H 0.91979500 -0.53104400 -0.61497200
Cl 0.00000000 0.00000000 1.62450100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Fcomp')] = qcdb.Molecule("""
-1 1
F 0.00000000 0.00000000 -1.84762600
C 0.00000000 0.00000000 -0.42187300
H 0.00000000 1.02358100 -0.07384300
H -0.88644700 -0.51179100 -0.07384300
H 0.88644700 -0.51179100 -0.07384300
F 0.00000000 0.00000000 2.15348900
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'FCH3Fts')] = qcdb.Molecule("""
-1 1
F 0.00309800 -0.01889200 -0.01545600
C -0.00014900 -0.00014000 1.80785700
H 1.06944900 0.00170800 1.80976100
H -0.53660700 0.92513300 1.79693500
H -0.53260100 -0.92778300 1.81705800
F -0.00319100 0.01997400 3.63184500
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'F_anion')] = qcdb.Molecule("""
-1 1
F 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'F')] = qcdb.Molecule("""
0 2
F 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HClHts')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 1.48580000
Cl 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 -1.48580000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCl')] = qcdb.Molecule("""
0 1
Cl 0.00000000 0.00000000 0.00000000
H 1.27444789 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCNts')] = qcdb.Molecule("""
0 1
C 0.08031900 0.62025800 0.00000000
N 0.08031900 -0.56809500 0.00000000
H -1.04414800 0.25512100 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCN')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -0.50036500
N 0.00000000 0.00000000 0.65264000
H 0.00000000 0.00000000 -1.56629100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCOts')] = qcdb.Molecule("""
0 2
H -1.52086400 1.38882900 0.00000000
C 0.10863300 0.54932900 0.00000000
O 0.10863300 -0.58560100 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HCO')] = qcdb.Molecule("""
0 2
H -0.00905700 0.00000000 -0.00708600
C -0.00703500 0.00000000 1.10967800
O 0.95604000 0.00000000 1.78565600
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HF2ts')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 -2.23127300
F 0.00000000 0.00000000 -0.61621800
F 0.00000000 0.00000000 0.86413800
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HFCH3ts')] = qcdb.Molecule("""
0 2
H -0.03976400 0.00000000 0.04410600
F -0.04932100 0.00000000 1.28255400
C -0.06154400 0.00000000 2.95115700
H 0.99049700 0.00000000 3.19427500
H -0.59007000 0.91235500 3.18348100
H -0.59007000 -0.91235500 3.18348100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HFHts')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 1.13721700
F 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 -1.13721700
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HF')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 0.00000000
H 0.91538107 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HN2ts')] = qcdb.Molecule("""
0 2
N 0.00000000 0.00000000 0.00000000
N 1.12281100 0.00000000 0.00000000
H 1.78433286 1.26844651 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HN2')] = qcdb.Molecule("""
0 2
N 0.00000000 0.00000000 0.00000000
N 1.17820000 0.00000000 0.00000000
H 1.64496947 0.93663681 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HNC')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -0.73724800
N 0.00000000 0.00000000 0.43208900
H 0.00000000 0.00000000 1.42696000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HOCH3Fcomp1')] = qcdb.Molecule("""
-1 1
C -1.29799700 -0.38951800 -0.00003400
O -0.47722300 0.72802100 0.00005400
H -2.35192200 -0.08023200 -0.00863900
H -1.14085300 -1.03582100 -0.87810100
H -1.15317800 -1.02751300 0.88635900
H 0.51058000 0.37116000 0.00024300
F 1.74901600 -0.19051700 -0.00001000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HOCH3Fcomp2')] = qcdb.Molecule("""
-1 1
F 0.00037100 -2.46834000 0.02139000
C -0.27664200 -1.07441800 -0.00269000
H 0.64929000 -0.51650000 -0.00901600
H -0.84198900 -0.84711900 -0.89707500
H -0.85102800 -0.82658900 0.88141700
O -0.30171300 1.58252400 -0.20654400
H -0.60511200 2.49243400 -0.16430500
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'HOCH3Fts')] = qcdb.Molecule("""
-1 1
F 0.02253600 -0.00745300 0.00552900
C -0.01842000 0.00503700 1.76492500
H 1.04805000 0.00524000 1.85414600
H -0.54781900 0.93470700 1.79222400
H -0.54895500 -0.92343300 1.80576200
O 0.00126500 0.01920000 3.75059900
H -0.92676300 0.03161500 3.99758100
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'H')] = qcdb.Molecule("""
0 2
H 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'N2OHts')] = qcdb.Molecule("""
0 2
H -0.30328600 -1.93071200 0.00000000
O -0.86100600 -0.62152600 0.00000000
N 0.00000000 0.25702700 0.00000000
N 1.02733300 0.72910400 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'N2O')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 0.00000000
N 1.12056262 0.00000000 0.00000000
O 2.30761092 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'N2')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 0.00000000
N 1.09710935 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OH_anion')] = qcdb.Molecule("""
-1 1
O 0.00000000 0.00000000 0.00000000
H 0.96204317 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-reagent' % (dbse, 'OH')] = qcdb.Molecule("""
0 2
O 0.00000000 0.00000000 0.00000000
H 0.96889819 0.00000000 0.00000000
units angstrom
""")
#########################################################################
# <<< Supplementary Quantum Chemical Results >>>
DATA = {}
DATA['NUCLEAR REPULSION ENERGY'] = {}
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-H-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-N2O-reagent' ] = 60.94607766
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-N2OHts-reagent' ] = 65.68644495
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-OH-reagent' ] = 4.36931115
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-N2-reagent' ] = 23.63454766
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HF-reagent' ] = 5.20285489
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HFHts-reagent' ] = 8.60854029
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCl-reagent' ] = 7.05875275
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HClHts-reagent' ] = 12.28739648
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3F-reagent' ] = 37.42304655
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HFCH3ts-reagent' ] = 38.79779200
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3-reagent' ] = 9.69236444
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-F2-reagent' ] = 30.72192369
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HF2ts-reagent' ] = 33.44223409
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-F-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-ClF-reagent' ] = 49.66117442
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3FClts-reagent' ] = 95.59999471
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-Cl-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-F_anion-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Fts-reagent' ] = 66.36618410
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Fcomp-reagent' ] = 64.36230187
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-Cl_anion-reagent' ] = 0.00000000
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3Cl-reagent' ] = 51.37857642
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-ClCH3Clts-reagent' ] = 110.27962403
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-ClCH3Clcomp-reagent' ] = 107.04230687
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Clts-reagent' ] = 86.10066616
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Clcomp1-reagent' ] = 86.07639241
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-FCH3Clcomp2-reagent' ] = 79.90981772
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-OH_anion-reagent' ] = 4.40044460
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HOCH3Fts-reagent' ] = 69.00558005
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CH3OH-reagent' ] = 40.39337431
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HOCH3Fcomp2-reagent' ] = 67.43072234
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HOCH3Fcomp1-reagent' ] = 73.17394204
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HN2ts-reagent' ] = 27.37488066
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HN2-reagent' ] = 27.50439999
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-CO-reagent' ] = 22.48612142
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCOts-reagent' ] = 25.76648888
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCO-reagent' ] = 26.50985233
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C2H4-reagent' ] = 33.42351838
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C2H5ts-reagent' ] = 36.85248528
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C2H5-reagent' ] = 36.97781691
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C3H7ts-reagent' ] = 70.26842595
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-C3H7-reagent' ] = 75.86161869
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCN-reagent' ] = 23.92417344
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HCNts-reagent' ] = 24.04634812
DATA['NUCLEAR REPULSION ENERGY']['NHTBH-HNC-reagent' ] = 24.19729155
|
rmcgibbo/psi4public
|
psi4/share/psi4/databases/NHTBH.py
|
Python
|
lgpl-3.0
| 36,605
|
[
"Psi4"
] |
287a4fa46aa04a735eee98a06d12711cb09f256461c9d4adc5107cd32dc722d8
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that all files contain proper licensing information."""
import optparse
import os.path
import subprocess
import sys
def PrintUsage():
print """Usage: python checklicenses.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checklicenses".
--ignore-suppressions Ignores path-specific license whitelist. Useful when
trying to remove a suppression/whitelist entry.
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything.
Examples:
python checklicenses.py
python checklicenses.py --root ~/chromium/src third_party"""
WHITELISTED_LICENSES = [
'Apache (v2.0)',
'Apache (v2.0) BSD (2 clause)',
'Apache (v2.0) GPL (v2)',
'Apple MIT', # https://fedoraproject.org/wiki/Licensing/Apple_MIT_License
'APSL (v2)',
'APSL (v2) BSD (4 clause)',
'BSD',
'BSD (2 clause)',
'BSD (2 clause) MIT/X11 (BSD like)',
'BSD (3 clause)',
'BSD (3 clause) ISC',
'BSD (3 clause) LGPL (v2 or later)',
'BSD (3 clause) LGPL (v2.1 or later)',
'BSD (3 clause) MIT/X11 (BSD like)',
'BSD (4 clause)',
'BSD-like',
# TODO(phajdan.jr): Make licensecheck not print BSD-like twice.
'BSD-like MIT/X11 (BSD like)',
'BSL (v1.0)',
'GPL (v2 or later) with Bison parser exception',
'GPL (v2 or later) with libtool exception',
'GPL (v3 or later) with Bison parser exception',
'GPL with Bison parser exception',
'ISC',
'LGPL',
'LGPL (v2)',
'LGPL (v2 or later)',
'LGPL (v2.1)',
'LGPL (v3 or later)',
# TODO(phajdan.jr): Make licensecheck convert that comma to a dot.
'LGPL (v2,1 or later)',
'LGPL (v2.1 or later)',
'MPL (v1.0) LGPL (v2 or later)',
'MPL (v1.1)',
'MPL (v1.1) BSD-like',
'MPL (v1.1) BSD-like GPL (unversioned/unknown version)',
'MPL (v1.1,) BSD (3 clause) GPL (unversioned/unknown version) '
'LGPL (v2.1 or later)',
'MPL (v1.1) GPL (unversioned/unknown version)',
'MPL (v2.0)',
# TODO(phajdan.jr): Make licensecheck not print the comma after 1.1.
'MPL (v1.1,) GPL (unversioned/unknown version) LGPL (v2 or later)',
'MPL (v1.1,) GPL (unversioned/unknown version) LGPL (v2.1 or later)',
'MIT/X11 (BSD like)',
'Ms-PL',
'Public domain',
'Public domain BSD',
'Public domain BSD (3 clause)',
'Public domain BSD-like',
'Public domain LGPL (v2.1 or later)',
'libpng',
'zlib/libpng',
'SGI Free Software License B',
'University of Illinois/NCSA Open Source License (BSD like)',
]
PATH_SPECIFIC_WHITELISTED_LICENSES = {
'base/hash.cc': [ # http://crbug.com/98100
'UNKNOWN',
],
'base/third_party/icu': [ # http://crbug.com/98087
'UNKNOWN',
],
# http://code.google.com/p/google-breakpad/issues/detail?id=450
'breakpad/src': [
'UNKNOWN',
],
'chrome/common/extensions/docs/examples': [ # http://crbug.com/98092
'UNKNOWN',
],
'chrome/test/data/gpu/vt': [
'UNKNOWN',
],
'chrome/test/data/layout_tests/LayoutTests': [
'UNKNOWN',
],
'courgette/third_party/bsdiff_create.cc': [ # http://crbug.com/98095
'UNKNOWN',
],
'data/mozilla_js_tests': [
'UNKNOWN',
],
'data/page_cycler': [
'UNKNOWN',
'GPL (v2 or later)',
],
'data/tab_switching': [
'UNKNOWN',
],
'googleurl': [ # http://code.google.com/p/google-url/issues/detail?id=15
'UNKNOWN',
],
'native_client': [ # http://crbug.com/98099
'UNKNOWN',
],
'native_client/toolchain': [
'BSD GPL (v2 or later)',
'BSD (2 clause) GPL (v2 or later)',
'BSD (3 clause) GPL (v2 or later)',
'BSL (v1.0) GPL',
'BSL (v1.0) GPL (v3.1)',
'GPL',
'GPL (unversioned/unknown version)',
'GPL (v2)',
'GPL (v2 or later)',
'GPL (v3.1)',
'GPL (v3 or later)',
],
'net/tools/spdyshark': [
'GPL (v2 or later)',
'UNKNOWN',
],
# http://crbug.com/98107
'ppapi/c/documentation/check.sh': [
'UNKNOWN',
],
'ppapi/cpp/documentation/check.sh': [
'UNKNOWN',
],
'ppapi/lib/gl/include': [
'UNKNOWN',
],
'ppapi/native_client/tests/earth/earth_image.inc': [
'UNKNOWN',
],
'third_party/WebKit': [
'UNKNOWN',
],
'third_party/WebKit/Websites/webkit.org/blog/wp-content/plugins/'
'akismet/akismet.php': [
'GPL (v2 or later)'
],
'third_party/WebKit/Source/JavaScriptCore/tests/mozilla': [
'GPL',
'GPL (v2 or later)',
'GPL (unversioned/unknown version)',
],
'third_party/active_doc': [ # http://crbug.com/98113
'UNKNOWN',
],
# http://code.google.com/p/angleproject/issues/detail?id=217
'third_party/angle': [
'UNKNOWN',
],
'third_party/bsdiff/mbsdiff.cc': [
'UNKNOWN',
],
'third_party/bzip2': [
'UNKNOWN',
],
'third_party/cld/encodings/compact_lang_det': [ # http://crbug.com/98120
'UNKNOWN',
],
# Not used. http://crbug.com/156020
# Using third_party/cros_dbus_cplusplus/cros_dbus_cplusplus.gyp instead.
'third_party/cros_dbus_cplusplus/source/autogen.sh': [
'UNKNOWN',
],
# Included in the source tree but not built. http://crbug.com/156020
'third_party/cros_dbus_cplusplus/source/examples': [
'UNKNOWN',
],
'third_party/devscripts': [
'GPL (v2 or later)',
],
'third_party/expat/files/lib': [ # http://crbug.com/98121
'UNKNOWN',
],
'third_party/ffmpeg': [
'GPL',
'GPL (v2)',
'GPL (v2 or later)',
'UNKNOWN', # http://crbug.com/98123
],
'third_party/findbugs/doc': [ # http://crbug.com/157206
'UNKNOWN',
],
'third_party/freetype2': [ # http://crbug.com/177319
'UNKNOWN',
],
'third_party/gles2_book': [ # http://crbug.com/98130
'UNKNOWN',
],
'third_party/gles2_conform/GTF_ES': [ # http://crbug.com/98131
'UNKNOWN',
],
'third_party/harfbuzz': [ # http://crbug.com/98133
'UNKNOWN',
],
'third_party/hunspell': [ # http://crbug.com/98134
'UNKNOWN',
],
'third_party/hyphen/hyphen.tex': [ # http://crbug.com/157375
'UNKNOWN',
],
'third_party/iccjpeg': [ # http://crbug.com/98137
'UNKNOWN',
],
'third_party/icu': [ # http://crbug.com/98301
'UNKNOWN',
],
'third_party/jemalloc': [ # http://crbug.com/98302
'UNKNOWN',
],
'third_party/lcov': [ # http://crbug.com/98304
'UNKNOWN',
],
'third_party/lcov/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/lcov-1.9/contrib/galaxy/genflat.pl': [
'GPL (v2 or later)',
],
'third_party/libevent': [ # http://crbug.com/98309
'UNKNOWN',
],
'third_party/libjingle/source/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjingle/source_internal/talk': [ # http://crbug.com/98310
'UNKNOWN',
],
'third_party/libjpeg': [ # http://crbug.com/98313
'UNKNOWN',
],
'third_party/libjpeg_turbo': [ # http://crbug.com/98314
'UNKNOWN',
],
'third_party/libpng': [ # http://crbug.com/98318
'UNKNOWN',
],
# The following files lack license headers, but are trivial.
'third_party/libusb/src/libusb/os/poll_posix.h': [
'UNKNOWN',
],
'third_party/libusb/src/libusb/version.h': [
'UNKNOWN',
],
'third_party/libusb/src/autogen.sh': [
'UNKNOWN',
],
'third_party/libusb/src/config.h': [
'UNKNOWN',
],
'third_party/libusb/src/msvc/config.h': [
'UNKNOWN',
],
'third_party/libvpx/source': [ # http://crbug.com/98319
'UNKNOWN',
],
'third_party/libvpx/source/libvpx/examples/includes': [
'GPL (v2 or later)',
],
'third_party/libwebp': [ # http://crbug.com/98448
'UNKNOWN',
],
'third_party/libxml': [
'UNKNOWN',
],
'third_party/libxslt': [
'UNKNOWN',
],
'third_party/lzma_sdk': [
'UNKNOWN',
],
'third_party/mesa/MesaLib': [
'GPL (v2)',
'GPL (v3 or later)',
'MIT/X11 (BSD like) GPL (v3 or later) with Bison parser exception',
'UNKNOWN', # http://crbug.com/98450
],
'third_party/modp_b64': [
'UNKNOWN',
],
'third_party/npapi/npspy/extern/java': [
'GPL (unversioned/unknown version)',
],
'third_party/openssl': [ # http://crbug.com/98451
'UNKNOWN',
],
'third_party/ots/tools/ttf-checksum.py': [ # http://code.google.com/p/ots/issues/detail?id=2
'UNKNOWN',
],
'third_party/molokocacao': [ # http://crbug.com/98453
'UNKNOWN',
],
'third_party/npapi/npspy': [
'UNKNOWN',
],
'third_party/ocmock/OCMock': [ # http://crbug.com/98454
'UNKNOWN',
],
'third_party/opus/src': [ # http://crbug.com/156738
'UNKNOWN',
],
'third_party/ply/__init__.py': [
'UNKNOWN',
],
'third_party/protobuf': [ # http://crbug.com/98455
'UNKNOWN',
],
'third_party/pylib': [
'UNKNOWN',
],
'third_party/scons-2.0.1/engine/SCons': [ # http://crbug.com/98462
'UNKNOWN',
],
'third_party/simplejson': [
'UNKNOWN',
],
'third_party/skia': [ # http://crbug.com/98463
'UNKNOWN',
],
'third_party/snappy/src': [ # http://crbug.com/98464
'UNKNOWN',
],
'third_party/smhasher/src': [ # http://crbug.com/98465
'UNKNOWN',
],
'third_party/sqlite': [
'UNKNOWN',
],
'third_party/swig/Lib/linkruntime.c': [ # http://crbug.com/98585
'UNKNOWN',
],
'third_party/talloc': [
'GPL (v3 or later)',
'UNKNOWN', # http://crbug.com/98588
],
'third_party/tcmalloc': [
'UNKNOWN', # http://crbug.com/98589
],
'third_party/tlslite': [
'UNKNOWN',
],
'third_party/webdriver': [ # http://crbug.com/98590
'UNKNOWN',
],
'third_party/webrtc': [ # http://crbug.com/98592
'UNKNOWN',
],
'third_party/xdg-utils': [ # http://crbug.com/98593
'UNKNOWN',
],
'third_party/yasm/source': [ # http://crbug.com/98594
'UNKNOWN',
],
'third_party/zlib/contrib/minizip': [
'UNKNOWN',
],
'third_party/zlib/trees.h': [
'UNKNOWN',
],
'tools/dromaeo_benchmark_runner/dromaeo_benchmark_runner.py': [
'UNKNOWN',
],
'tools/emacs': [ # http://crbug.com/98595
'UNKNOWN',
],
'tools/grit/grit/node/custom/__init__.py': [
'UNKNOWN',
],
'tools/gyp/test': [
'UNKNOWN',
],
'tools/histograms': [
'UNKNOWN',
],
'tools/memory_watcher': [
'UNKNOWN',
],
'tools/playback_benchmark': [
'UNKNOWN',
],
'tools/python/google/__init__.py': [
'UNKNOWN',
],
'tools/site_compare': [
'UNKNOWN',
],
'tools/stats_viewer/Properties/AssemblyInfo.cs': [
'UNKNOWN',
],
'tools/symsrc/pefile.py': [
'UNKNOWN',
],
'v8/test/cctest': [ # http://crbug.com/98597
'UNKNOWN',
],
'webkit/data/ico_decoder': [
'UNKNOWN',
],
}
def check_licenses(options, args):
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = options.base_directory
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(options.base_directory, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", options.base_directory
print "Checking:", start_dir
print
licensecheck_path = os.path.abspath(os.path.join(options.base_directory,
'third_party',
'devscripts',
'licensecheck.pl'))
licensecheck = subprocess.Popen([licensecheck_path,
'-l', '100',
'-r', start_dir],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = licensecheck.communicate()
if options.verbose:
print '----------- licensecheck stdout -----------'
print stdout
print '--------- end licensecheck stdout ---------'
if licensecheck.returncode != 0 or stderr:
print '----------- licensecheck stderr -----------'
print stderr
print '--------- end licensecheck stderr ---------'
print "\nFAILED\n"
return 1
success = True
for line in stdout.splitlines():
filename, license = line.split(':', 1)
filename = os.path.relpath(filename.strip(), options.base_directory)
# All files in the build output directory are generated one way or another.
# There's no need to check them.
if filename.startswith('out/') or filename.startswith('sconsbuild/'):
continue
# For now we're just interested in the license.
license = license.replace('*No copyright*', '').strip()
# Skip generated files.
if 'GENERATED FILE' in license:
continue
if license in WHITELISTED_LICENSES:
continue
if not options.ignore_suppressions:
found_path_specific = False
for prefix in PATH_SPECIFIC_WHITELISTED_LICENSES:
if (filename.startswith(prefix) and
license in PATH_SPECIFIC_WHITELISTED_LICENSES[prefix]):
found_path_specific = True
break
if found_path_specific:
continue
print "'%s' has non-whitelisted license '%s'" % (filename, license)
success = False
if success:
print "\nSUCCESS\n"
return 0
else:
print "\nFAILED\n"
print "Please read",
print "http://www.chromium.org/developers/adding-3rd-party-libraries"
print "for more info how to handle the failure."
print
print "Please respect OWNERS of checklicenses.py. Changes violating"
print "this requirement may be reverted."
return 1
def main():
default_root = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..'))
option_parser = optparse.OptionParser()
option_parser.add_option('--root', default=default_root,
dest='base_directory',
help='Specifies the repository root. This defaults '
'to "../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option('-v', '--verbose', action='store_true',
default=False, help='Print debug logging')
option_parser.add_option('--ignore-suppressions',
action='store_true',
default=False,
help='Ignore path-specific license whitelist.')
options, args = option_parser.parse_args()
return check_licenses(options, args)
if '__main__' == __name__:
sys.exit(main())
|
zcbenz/cefode-chromium
|
tools/checklicenses/checklicenses.py
|
Python
|
bsd-3-clause
| 15,813
|
[
"Galaxy"
] |
811fed8698b6ab34ba1d0be5755a23d3e3dbce6a55d08992fd8ea559a0f56b54
|
"""
:copyright: (c) 2014 Building Energy Inc
:license: see LICENSE for more details.
"""
from salad.steps.everything import *
from lettuce import step
from django.core.urlresolvers import reverse
from landing.features.steps import *
@step(u'I visit the home page')
def i_visit_the_home_page(step):
world.browser.visit(django_url(reverse("seed:home")))
@step(u'I go to the jasmine unit tests for the SEED')
def given_i_go_to_the_jasmine_unit_tests_for_the_SEED(step):
world.browser.visit(django_url(reverse("seed:angular_js_tests")))
@step(u'I should see that the tests passed')
def then_i_should_see_that_the_tests_passed(step):
time.sleep(2)
try:
assert world.browser.is_element_present_by_css(".passingAlert.bar")
except:
time.sleep(50)
assert len(world.browser.find_by_css(".passingAlert.bar")) > 0
@step(u'When I visit the projects page')
def when_i_visit_the_projects_page(step):
world.browser.visit(django_url(reverse("seed:home")) + "#/projects")
@step(u'Then I should see my projects')
def then_i_should_see_my_projects(step):
assert world.browser.is_text_present('Projects')
assert world.browser.is_text_present('my project')
@step(u'And I have a project')
def and_i_have_a_project(step):
Project.objects.create(
name="my project",
super_organization_id=world.org.id,
owner=world.user
)
@step(u'And I have a dataset')
def and_i_have_a_dataset(step):
ImportRecord.objects.create(
name='dataset 1',
super_organization=world.org,
owner=world.user
)
@step(u'When I visit the dataset page')
def when_i_visit_the_dataset_page(step):
world.browser.visit(django_url(reverse("seed:home")) + "#/data")
@step(u'And I delete a dataset')
def and_i_delete_a_dataset(step):
delete_icon = world.browser.find_by_css('.delete_link')
delete_icon.click()
alert = world.browser.get_alert()
alert.accept()
@step(u'Then I should see no datasets')
def then_i_should_see_no_datasets(step):
number_of_datasets = len(world.browser.find_by_css('.import_row'))
number_of_datasets = len(world.browser.find_by_css('.import_row'))
number_of_datasets = len(world.browser.find_by_css('.import_row'))
assert number_of_datasets == 0
|
buildingenergy/buildingenergy-platform
|
seed/features/steps.py
|
Python
|
apache-2.0
| 2,282
|
[
"VisIt"
] |
9d2ff3b7eb112457dc3596839bf56e38cec4975d8d493e5163db2de73ba758bf
|
#!/usr/bin/env python3
################################################################
# copyright (c) 2017,2018 by William R. Pearson and The Rector &
# Visitors of the University of Virginia */
################################################################
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under this License is distributed on an "AS
# IS" BASIS, WITHOUT WRRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
################################################################
################################################################
# annot_blast_btop4.py --query query.file --ann_script ann_pfam_www.pl --include_doms blast_tab_btop_file
################################################################
# annot_blast_btop4.py associates domain annotation information and
# subalignment scores with a blast tabular (-outfmt 6 or -outfmt 7)
# file that contains the raw score and the BTOP alignment encoding
# This file can be generated from "blastp/n" or "blast_formatter"
# using the command:
# blast_formatter -archive blast_output.asn -outfmt '7 qseqid sseqid pident length mismatch gapopen qstart qend sstart send evalue bitscore score btop' > blast_output.tab_annot
#
# If the BTOP field or query_file is not available, the script
# produces domain content without sub-alignment scores.
################################################################
## 2-Dec-2019
# added --have_qslen, --raw_score/--no_raw_score
# made more robust to multiple HSPs when using --ann_file
#
################################################################
## 4-Nov-2018
# add --include_doms, which adds a new field with the coordinates of
# the domains in the protein (independent of alignment)
#
################################################################
## 21-July-2018
# include sequence length (actually alignment end) to produce NODOM's (no NODOM's without length).
#
################################################################
## 13-Jan-2017
# modified to provide query/subject coordinates and identities if no
# query sequence -- does not decrement for reverse-complement fastx/blastx DNA
################################################################
## 16-Nov-2015
# modify to allow multi-query blast searches
################################################################
## 19-Dec-2015
# add -q_annot_script to annotate query sequence
#
import argparse
import fileinput
import sys
import re
import shutil
import subprocess
from math import log
# read lines of the form:
# gi|121694|sp|P20432.1|GSTT1_DROME gi|121694|sp|P20432|GSTT1_DROME 100.00 209 0 0 1 209 1 209 6e-156 433 1113 209
# gi|121694|sp|P20432.1|GSTT1_DROME gi|1170090|sp|P04907|GSTF3_MAIZE 26.77 198 123 7 4 185 6 197 2e-08 51.2 121 FL1YG ... 1NKRA1YW1
# gi|121694|sp|P20432.1|GSTT1_DROME gi|81174731|sp|P0ACA5|SSPA_ECO57 39.66 58 32 2 43 100 49 103 8e-06 43.9 102 EDFLLI ... V-I-NEQS3FM
# gi|121694|sp|P20432.1|GSTT1_DROME gi|121695|sp|P12653|GSTF1_MAIZE 27.62 181 107 7 32 203 34 199 9e-05 40.8 94 LI1LF ... N-1AS1CLLM1
# and report the domain content ala -m 8CC
def init_blosum62():
# ncbi_blaa -- list of amino acids
ncbi_blaa = "A R N D C Q E G H I L K M F P S T W Y V B Z X *".split(' ')
# blosum62: 2D dict of scoring matrix values
blosum62 = {}
blosum62['A'] = dict(zip(ncbi_blaa,[ 4,-1,-2,-2, 0,-1,-1, 0,-2,-1,-1,-1,-1,-2,-1, 1, 0,-3,-2, 0,-2,-1, 0,-4]))
blosum62['R'] = dict(zip(ncbi_blaa,[-1, 5, 0,-2,-3, 1, 0,-2, 0,-3,-2, 2,-1,-3,-2,-1,-1,-3,-2,-3,-1, 0,-1,-4]))
blosum62['N'] = dict(zip(ncbi_blaa,[-2, 0, 6, 1,-3, 0, 0, 0, 1,-3,-3, 0,-2,-3,-2, 1, 0,-4,-2,-3, 3, 0,-1,-4]))
blosum62['D'] = dict(zip(ncbi_blaa,[-2,-2, 1, 6,-3, 0, 2,-1,-1,-3,-4,-1,-3,-3,-1,0,-1,-4,-3,-3,4,1,-1,-4]))
blosum62['C'] = dict(zip(ncbi_blaa,[ 0,-3,-3,-3, 9,-3,-4,-3,-3,-1,-1,-3,-1,-2,-3,-1,-1,-2,-2,-1,-3,-3,-2,-4]))
blosum62['Q'] = dict(zip(ncbi_blaa,[-1, 1, 0, 0,-3, 5, 2,-2, 0,-3,-2,1,0,-3,-1,0,-1,-2,-1,-2,0,3,-1,-4]))
blosum62['E'] = dict(zip(ncbi_blaa,[-1, 0, 0, 2,-4, 2, 5,-2, 0,-3,-3,1,-2,-3,-1,0,-1,-3,-2,-2,1,4,-1,-4]))
blosum62['G'] = dict(zip(ncbi_blaa,[ 0,-2, 0,-1,-3,-2,-2, 6,-2,-4,-4,-2,-3,-3,-2,0,-2,-2,-3,-3,-1,-2,-1,-4]))
blosum62['H'] = dict(zip(ncbi_blaa,[-2, 0, 1,-1,-3, 0, 0,-2, 8,-3,-3,-1,-2,-1,-2,-1,-2,-2,2,-3,0,0,-1,-4]))
blosum62['I'] = dict(zip(ncbi_blaa,[-1,-3,-3,-3,-1,-3,-3,-4,-3,4,2,-3,1,0,-3,-2,-1,-3,-1,3,-3,-3,-1,-4]))
blosum62['L'] = dict(zip(ncbi_blaa,[-1,-2,-3,-4,-1,-2,-3,-4,-3,2,4,-2,2,0,-3,-2,-1,-2,-1,1,-4,-3,-1,-4]))
blosum62['K'] = dict(zip(ncbi_blaa,[-1, 2, 0,-1,-3, 1, 1,-2,-1,-3,-2,5,-1,-3,-1,0,-1,-3,-2,-2,0,1,-1,-4]))
blosum62['M'] = dict(zip(ncbi_blaa,[-1,-1,-2,-3,-1, 0,-2,-3,-2,1,2,-1,5,0,-2,-1,-1,-1,-1,1,-3,-1,-1,-4]))
blosum62['F'] = dict(zip(ncbi_blaa,[-2,-3,-3,-3,-2,-3,-3,-3,-1,0,0,-3,0,6,-4,-2,-2,1,3,-1,-3,-3,-1,-4]))
blosum62['P'] = dict(zip(ncbi_blaa,[-1,-2,-2,-1,-3,-1,-1,-2,-2,-3,-3,-1,-2,-4,7,-1,-1,-4,-3,-2,-2,-1,-2,-4]))
blosum62['S'] = dict(zip(ncbi_blaa,[ 1,-1, 1, 0,-1, 0, 0, 0,-1,-2,-2,0,-1,-2,-1,4,1,-3,-2,-2,0,0,0,-4]))
blosum62['T'] = dict(zip(ncbi_blaa,[ 0,-1, 0,-1,-1,-1,-1,-2,-2,-1,-1,-1,-1,-2,-1,1,5,-2,-2,0,-1,-1,0,-4]))
blosum62['W'] = dict(zip(ncbi_blaa,[-3 -3,-4,-4,-2,-2,-3,-2,-2,-3,-2,-3,-1,1,-4,-3,-2,11,2,-3,-4,-3,-2,-4]))
blosum62['Y'] = dict(zip(ncbi_blaa,[-2,-2,-2,-3,-2,-1,-2,-3, 2,-1,-1,-2,-1,3,-3,-2,-2,2,7,-1,-3,-2,-1,-4]))
blosum62['V'] = dict(zip(ncbi_blaa,[ 0,-3,-3,-3,-1,-2,-2,-3,-3,3,1,-2,1,-1,-2,-2,0,-3,-1,4,-3,-2,-1,-4]))
blosum62['B'] = dict(zip(ncbi_blaa,[-2,-1, 3, 4,-3, 0, 1,-1, 0,-3,-4,0,-3,-3,-2,0,-1,-4,-3,-3, 4, 1,-1,-4]))
blosum62['Z'] = dict(zip(ncbi_blaa,[-1, 0, 0, 1,-3, 3, 4,-2, 0,-3,-3,1,-1,-3,-1,0,-1,-3,-2,-2,1,4,-1,-4]))
blosum62['X'] = dict(zip(ncbi_blaa,[ 0,-1,-1,-1,-2,-1,-1,-1,-1,-1,-1,-1,-1,-1,-2,0,0,-2,-1,-1,-1,-1,-1,-4]))
blosum62['*'] = dict(zip(ncbi_blaa,[-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,-4,1]))
if (len(blosum62.keys()) != len(ncbi_blaa)):
sys.stderr.write(" blosum62 length mismatch %d != %d\n" %(len(blosum62), len(ncbi_blaa)))
print(' '.join(ncbi_blaa),file=sys.stderr)
print(' '.join(blosum62.keys()),file=sys.stderr)
exit(1)
blosum62_diag = {x:blosum62[x][x] for x in ncbi_blaa}
return (blosum62, blosum62_diag, -11, -1)
################
# read_annots (\@hit_list)
# input: hit_entry['s_seq_id, etc'], target
# output: modified $hit_entry['domains']
# modified $hit_entry['sites']
#
# extend to make robust to multiple hits on the same subject
def read_annots(Reader):
target_set = {}
current_domain = ""
hit_ix = 0
seq_domains = []
seq_sites = []
subj_domains = {}
for line in Reader:
if (line[0]=='='):
continue
line = line.strip("\n")
# check for header
if (line[0] == '>'):
if (current_domain): # previous domains/sites have already been found and parsed
if (current_domain not in target_set):
target_set[current_domain] = {}
target_set[current_domain]['domains'] = [ d for d in seq_domains ] # previous domains
target_set[current_domain]['sites'] = [ s for s in seq_sites ] # previous sites
else:
sys.stderr.write("*** phase error: %s duplicate\n"%(current_domain))
seq_domains = []; # current domains
seq_sites = []; # current sites
current_domain = line.split(' ')[0][1:]
else: # check for data
a_fields = line.split('\t')
a_fields[0]=int(a_fields[0])
if (a_fields[1] == '-'):
a_fields[2]=int(a_fields[2])
annot_info = dict(zip(('d_pos','type','d_end','descr'), a_fields))
re_df=re.compile(r' :(\d+)$')
annot_info['descr'] = re_df.sub(r'~\1',annot_info['descr'])
seq_domains.append(annot_info)
else:
annot_info = dict(zip(('d_pos','type', 'd_val', 'descr', a_fields)))
annot_info['d_end'] = annot_info['d_pos']
seq_sites.append(annot_info)
Reader.close()
# get the last one
if (current_domain): # previous domains/sites have already been found and parsed
if (current_domain not in target_set):
target_set[current_domain] = {}
target_set[current_domain]['domains'] = [ d for d in seq_domains ] # previous domains
target_set[current_domain]['sites'] = [ s for s in seq_sites ] # previous sites
# else:
# sys.stderr.write("*** phase error: %s duplicate\n"%(current_domain))
return target_set
################
# merge_annots(hit_r):
#
# take different annotations in hit_r and put them in one list
#
def merge_annots(hit_r):
merged_annots = []
if ('q_aligned_domains' in hit_r):
for annot in hit_r['q_aligned_domains']:
annot['target']=1
merged_annots.append(annot)
if ('aligned_domains' in hit_r):
for annot in hit_r['aligned_domains']:
annot['target']=0
merged_annots.append(annot)
merged_annots = sorted(merged_annots, key=lambda x: x['qa_start'])
return(merged_annots)
################
# get_file_annots(file_name)
#
def get_file_annots(file_name, hit_list):
with open(file_name,'r') as Reader:
ann_set = read_annots(Reader)
return ann_set
################
# get_script_annots(script_name, hit_list)
#
# set up stdin/stdout pipe to send in hit list info and read results
#
def get_script_annots(script_name, hit_list, key_list):
seq_set = {}
proc = subprocess.Popen(script_name, stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True, encoding='utf-8')
for hit in hit_list:
(seq_id, seq_len) = (hit[key_list[0]],hit[key_list[1]])
if (seq_id not in seq_set):
proc.stdin.write(("%s\t%s\n"%(seq_id,seq_len)))
proc.stdin.close()
while (proc.returncode is None):
proc.poll()
return read_annots(proc.stdout)
################
#
# link_annots(hit_list, annot_set)
#
# put 'domains' and 'sites' into each hit in the hit list
#
def link_annots(hit_list, annot_set):
for hit in hit_list:
seqid = hit['s_seq_id']
if (seqid in annot_set):
if ('domains' in annot_set[seqid]):
hit['domains']=annot_set[seqid]['domains']
if ('sites' in annot_set[seqid]):
hit['sites']=annot_set[seqid]['sites']
# input: a blast BTOP string of the form: "1VA160TS7KG10RK27"
# returns a list_ref of tokens: (1, "VA", 60, "TS", 7, "KG, 10, "RK", 27)
#
def decode_btop(btop_str):
tokens = re.split(r'(\d+)',btop_str) # split with capture returns both strings between and separator (\d+)
if not tokens[0]:
tokens = tokens[1:]
out_tokens = []
for token in tokens:
if re.match(r'\d+',token):
out_tokens.append(token)
else:
mis_tokens = re.split(r'(..)',token) # split with capture
for mis in mis_tokens:
if (mis):
out_tokens.append(mis)
return out_tokens
def parse_query_lib(query_file):
query_seqs = {}
with open(query_file,"r") as qfd:
header=''
seq_data=''
for line in qfd:
line = line.strip("\n")
if (line[0]=='>'):
if (header):
# save existing sequence
seq_data = '' + seq_data
query_seqs[header]=seq_data
header = line[1:].split(' ')[0]
else:
line = re.sub(r'[^A-Za-z]','',line)
seq_data += line.upper()
# save last entry
if (header):
query_seqs[header]=seq_data
return query_seqs
# given: (1) a query sequence; (2) an encoded alignment; (3) a scoring matrix
# calculate:
# (1) the overall score
# (2) a per residue dictionary of scores and mappings from query -> subject
# (2) a per residue dictionary of scores and mappings from subject -> query
#
def alignment_score(query_r, hit, matrix_2d, matrix_diag, g_open, g_ext):
query_start, subj_start = (int(hit['q_start']),int(hit['s_start']))
btop_align_r = decode_btop(hit['BTOP'])
hit['btop_align'] = btop_align_r
q_map = []
s_map = []
gap0, gap1 = (0 ,0)
q_ix = query_start - 1 # start from zero
s_ix = subj_start - 1
score, m_score = (0, 0)
seq0, seq1 = ("","")
for btop in btop_align_r:
if (re.search(r'^\d+$',btop)): # matching query sequence, add it up
for i in range(0,int(btop)):
res = query_r[q_ix]
score += matrix_diag[res]
q_map.append({'s':score, 'y_ix':s_ix, 'res':res})
s_map.append({'s':score, 'y_ix':q_ix, 'res':res})
q_ix += 1
s_ix += 1
else:
seq0, seq1 = (btop[0],btop[1])
if (re.search(r'\-',btop)): # is there a gap?
if (seq0 == '-'): # is it in query?
if gap0: # are we in a gap?
score += g_ext
else:
score += g_open+g_ext
gap0 = True
# 'y_ix':-1 indicates alignment to gap
s_map.append({'s':score, 'y_ix':-1, 'res':seq1})
s_ix += 1
else: # gap is in subject
if gap1:
score += g_ext
else:
score += g_open+g_ext
gap1 = True
# 'y_ix':-1 indicates alignment to gap
q_map.append({'s':score, 'y_ix':-1, 'res':seq0})
q_ix += 1
else: # mismatch, not gap
score += matrix_2d[seq0][seq1]
gap1=gap0 = False
q_map.append({'s':score, 'y_ix':s_ix, 'res':seq0})
s_map.append({'s':score, 'y_ix':q_ix, 'res':seq1})
q_ix += 1
s_ix += 1
return score, q_map, s_map
################################################################
# sub_alignment_stats -- calculate stats for ONE domain entry
# given x_map, xa_start, xa_end where x=q/s depending on target
# domain_r : domain boundaries
#
# calculate a score, identity and boundaries in both sequences and return values
#
def one_sub_alignment_stats(domain_r, x_map, y_map, xa_start, xa_end, ya_start, ya_end):
td_start, td_end = (domain_r['d_pos'],domain_r['d_end'])
if (td_end < xa_start or td_start > xa_end):
return 0
if (td_start < xa_start):
td_start = xa_start
if (td_end > xa_end):
td_end = xa_end
td_start -= xa_start
td_end -= xa_start
left_score = 0
if (td_start>0) :
left_score = x_map[td_start-1]['s']
score = x_map[td_end]['s'] - left_score
# map[] coordinates are 0-based
# ya_start = x_map[td_start]['y_ix']+1
# ya_end = x_map[td_end]['y_ix']+1
#### identity calculation:
n_len = 0
n_id = 0
for xi in range(td_start, td_end+1):
this_x = x_map[xi]
x_res=this_x['res']
if (this_x['y_ix'] >= 0):
n_len += 1
y_res = y_map[this_x['y_ix']-ya_start+1]['res']
if (x_res.upper() == y_res.upper()):
n_id += 1
ident = float(n_id)/float(n_len)
return score, ident, td_start+xa_start-1, td_end+xa_start-1, x_map[td_start]['y_ix'], x_map[td_end]['y_ix']
################
# get domain scores, idents, boundaries for list of domains
#
def do_sub_alignment_stats(domain_list, x_map, y_map, xa_start, xa_end, ya_start, ya_end, keys_str):
aligned_doms = []
for domain in domain_list:
subalign_data = one_sub_alignment_stats(domain, x_map, y_map, xa_start, xa_end, ya_start, ya_end)
if (subalign_data and len(subalign_data)==6):
sub_data = dict(zip(keys_str,subalign_data))
for k in ('type','descr'):
sub_data[k] = domain[k]
aligned_doms.append(sub_data)
return(aligned_doms)
####
# print raw domain info:
# |DX:%d-%d;C=dom_info|XD:%d-%d:C=dom_info
#
def format_dom_info(q_dom_r, dom_r):
dom_str = ""
for dom in q_dom_r:
dom_str += "|DX:%d-%d;C=%s"%(dom['d_pos'],dom['d_end'], dom['descr'])
for dom in dom_r:
dom_str += "|XD:%d-%d;C=%s"%(dom['d_pos'],dom['d_end'], dom['descr'])
return dom_str
def format_annot_info(annot_list_r, hit):
annot_str = "";
# two types of annotations, domains and sites.
score_scale = hit['score']/hit['raw_score']
for annot_r in (annot_list_r ):
if (annot_r['type'] == '-'):
fsub_score = annot_r['score']/hit['raw_score']
ns_score, s_bit = (int(annot_r['score'] * score_scale + 0.5),
fsub_score * hit['bits'])
qval = 0.0
if (hit['evalue'] == 0.0):
if (s_bit > 50.0):
qval = 3000.0
else:
qval = -10.0 * (2.0*log(400.0) + s_bit)/log(10.0)
else:
qval = -10.0*log(hit['evalue'])*fsub_score/log(10.0)
if qval < 0.0:
qval = 0.0
rx_str = 'XR'
if (annot_r['target']):
rx_str = "RX"
annot_str += ';'.join(("|%s:%d-%d:%d-%d:s=%d"%(rx_str,
annot_r['qa_start']+1,annot_r['qa_end']+1,
annot_r['sa_start']+1,annot_r['sa_end']+1,ns_score),
"b=%.1f"%(s_bit),"I=%.3f"%(annot_r['ident']),
"Q=%.1f"%(qval),"C=%s"%(annot_r['descr'])))
else: # site annotation
ann_type = annot_r['type'];
site_str = "|%cX"%(ann_type)
if (annot_r['target'] == 1):
site_str = "|X%c"%(ann_type)
elif (annot_r['target'] == 2):
site_str = "|%c%c"%(ann_type, ann_type)
annot_str += "%s:"%(site_str)
annot_str += "%d%s%s%d%s"%(annot_r['qa_pos'], annot_r['q_res'], annot_r['m_symb'],
annot_r['sa_pos'], annot_r['s_res'])
return annot_str
def main(args):
blosum62, blosum62_diag, g_open, g_ext = init_blosum62()
if (args.query_file):
# query_lib_r has a set of query sequences
query_lib_r = parse_query_lib(args.query_file)
else:
sys.stderr.write("--query required\n")
exit(1)
tab_fields = "q_seqid s_seqid percid alen mismatch gopen q_start q_end s_start s_end evalue bits BTOP".split(' ')
int_fields = "alen mismatch gopen q_start q_end s_start s_end".split(' ')
float_fields = "percid evalue bits score".split(' ')
if (args.have_qslen):
tab_fields = "q_seqid q_len s_seqid s_len percid alen mismatch gopen q_start q_end s_start s_end evalue bits BTOP".split(' ')
int_fields = "q_len s_len alen mismatch gopen q_start q_end s_start s_end".split(' ')
# the fields that are displayed are listed here. By default, all fields except score and BTOP are displayed.
out_tab_fields = tab_fields[0:-1]
in_tab_fields = tab_fields[0:-1]
if (args.raw_out):
out_tab_fields.append("raw_score")
if (args.raw_in):
in_tab_fields.append("score")
## always add BTOP
in_tab_fields.append("BTOP")
tab_fields = in_tab_fields
if (args.out_fields):
out_tab_fields = out_fields.split(" ")
header_lines = []
next_line = ""
have_data = False
hit_list = []
q_hit_list = []
for line in fileinput.input(args.files):
if (line[0] == '#'):
if (have_data):
next_line = line
have_data = False
break
else:
header_lines.append(line)
continue
have_data = True
line = line.strip('\n')
if (line):
this_data = dict(zip(tab_fields, line.split("\t")))
for k in this_data.keys():
if (k in int_fields):
this_data[k] = int(this_data[k])
if (k in float_fields):
this_data[k] = float(this_data[k])
hit_list.append(this_data)
# get the query annotations
q_hit_list = []
if (args.q_ann_file):
q_seqid = hit_list[0]['q_seqid']
q_hit_list.append({'s_seq_id':q_seqid, 's_end':len(query_lib_r[q_seqid])})
q_annots = get_file_annots(args.q_ann_file, q_hit_list)
link_annots(q_hit_list, q_annots)
elif (args.q_ann_script):
args.q_ann_script = re.sub(r'\+',' ',args.q_ann_script)
if (args.q_ann_script and shutil.which(args.q_ann_script.split(" ")[0])):
q_seqid = hit_list[0]['q_seqid']
q_hit_list.append({'s_seq_id':q_seqid, 's_end':len(query_lib_r[q_seqid])})
q_annots = get_script_annots(args.q_ann_script, q_hit_list, ['s_seq_id','s_end'])
link_annots(q_hit_list, q_annots)
# get the subject annotations
# first set up the list with sequence lengths
if (args.ann_file or args.ann_script):
s_len = 100000
for hit in hit_list:
hit['s_seq_id']=hit['s_seqid']
if (not args.have_qslen):
hit['s_end']=s_len
if (args.ann_file):
s_annots = get_file_annots(args.ann_file, hit_list)
link_annots(hit_list, s_annots)
elif (args.ann_script):
args.ann_script = re.sub(r'\+',' ',args.ann_script)
if (shutil.which(args.ann_script.split(" ")[0])):
s_annots = get_script_annots(args.ann_script, hit_list,['s_seq_id','s_end'])
link_annots(hit_list, s_annots)
for line in header_lines:
print(line, end='')
header_lines = [next_line]
# now get query annotation if available
for hit in hit_list:
list_covered = []
# If I have an encoded aligment {BTOP} and a query sequence query_lib_r && query_lib_r[hit['q_seqid']]
# then I can calculate sub-alignment scores
if ('BTOP' in hit and query_lib_r and hit['q_seqid'] in query_lib_r):
# calculate raw_score and mappings
hit['raw_score'], q_map, s_map = alignment_score(query_lib_r[hit['q_seqid']],
hit,blosum62, blosum62_diag, g_open, g_ext)
if ('score' not in hit):
hit['score'] = hit['raw_score']
# calculate sub-alignment scores in subject/library coordinates
if ('domains' in hit and len(hit['domains'])>0):
hit['aligned_domains'] = do_sub_alignment_stats(hit['domains'], s_map, q_map, hit['s_start'],hit['s_end'],hit['q_start'],hit['q_end'],
('score','ident','sa_start', 'sa_end', 'qa_start', 'qa_end'))
# calculate sub-alignment scores in query coordinates
if (len(q_hit_list) > 0 and 'domains' in q_hit_list[0] and len(q_hit_list[0]['domains'])>0):
hit['q_aligned_domains'] = do_sub_alignment_stats(q_hit_list[0]['domains'], q_map, s_map, hit['q_start'],hit['q_end'],hit['s_start'],hit['s_end'],
('score','ident','qa_start', 'qa_end', 'sa_start', 'sa_end'))
################
## final output display
print("\t".join([str(hit[x]) for x in out_tab_fields]),end='') # show fields from original blast tabular file
merged_annots_r = merge_annots(hit) # merge the four possible annotation lists into one.
if (len(merged_annots_r)>0):
print("\t"+format_annot_info(merged_annots_r, hit),end='')
if (args.dom_info):
if (len(q_hit_list) > 0 and 'domains' in q_hit_list[0]):
print("\t"+format_dom_info(q_hit_list[0]['domains'], hit['domains']),end='')
else:
print("\t"+format_dom_info([], hit['domains']),end='')
elif (len(list_covered)>0):
print("\t" + ";".join(list_covered))
if (args.dom_info):
print("\t"+format_dom_info(q_hit_list[0]['domains'], hit['domains']),end='')
print()
for line in header_lines:
print(line,end="")
if __name__ == '__main__':
print('# ' + ' '.join(sys.argv))
parser=argparse.ArgumentParser(description='annot_blast_btop4.py : annotate blast tabular format with BTOP ')
# not implemented
# parser.add_argument('--matrix', help='scoring matrix',dest='matrix',action='store',default='BL62')
parser.add_argument('--ann_script', help='script for subject annotations',dest='ann_script',action='store')
parser.add_argument('--q_ann_script', help='script for query annotations',dest='q_ann_script',action='store')
parser.add_argument('--ann_file', help='subject annotation file',dest='ann_file',action='store')
parser.add_argument('--q_ann_file', help='query annotation file',dest='q_ann_file',action='store')
parser.add_argument('--have_qslen', help='query/subject lenghts in tab file',dest='have_qslen',action='store_true',default=False)
parser.add_argument('--dom_info', help='show unaligned domain coordinates',dest='dom_info',action='store_true',default=False)
parser.add_argument('--sub2query', help='get query annots from self-subject',dest='sub_query',action='store_true',default=False)
parser.add_argument('--query', help='file of query sequences',dest='query_file',action='store')
parser.add_argument('--out_fields', help='names/order of output fields',dest='out_fields',action='store')
parser.add_argument('--raw_score', help='raw score after bit score',dest='raw_in',action='store_true',default=True)
parser.add_argument('--no_raw_score', help='raw score after bit score',dest='raw_in',action='store_false', default=True)
parser.add_argument('--no-raw_score', help='raw score after bit score',dest='raw_in',action='store_false', default=True)
parser.add_argument('--raw_score_out', help='display raw score',dest='raw_out',action='store_true',default=False)
parser.add_argument('files', metavar='FILE', help='Blast tabular BTOP files to read', nargs='*')
args=parser.parse_args()
main(args)
|
uwbmrb/BMRB-API
|
server/wsgi/bmrbapi/submodules/fasta36/scripts/annot_blast_btop4.py
|
Python
|
gpl-3.0
| 27,071
|
[
"BLAST"
] |
78e5d1991c2303bc1067fc5c896e76e3e50d6f19f5722097c4f8d1994a90cd5c
|
#!/usr/bin/python
#This is a program for count Gene on Ensemble data "process subsection",
#Author: Maurizio Polano, mauriziopolano@blu.it
#Last revision: 03/06/2014
import subprocess
import os,os.path
import sys
import time
import re
from optparse import OptionParser
def main():
# select inupt
parser = OptionParser()
parser = OptionParser(usage="usage: %prog -i [files ID,ID,ID] -d [directory ] -l list file id -h help ",
version="%prog 1.0")
parser.add_option("-d", "--dir", dest="PosDir",type="string",help=" write input file: %prg -i: Please insert the name the Path of the results files [REQUIRED]")
parser.add_option("-i", "--ID", dest="id_name",type="string",help=" write input file: %prg -i: Please insert the ID files [REQUIRED]")
parser.add_option("-l", "--list", dest="file_id",type="string",help=" write input file: %prg -i: Please insert the file contain ID")
(options, args) = parser.parse_args(args=None, values=None)
if not os.path.exists("Count"):
os.makedirs("Count")
if not os.path.exists("Count"):
os.makedirs("Count")
if not os.path.exists("de"):
os.makedirs("de")
options, args = parser.parse_args()
if options.id_name:
ID = options.id_name.split(",")
fileh = open("_count.ense.sh","w")
for i in ID:
ctr = options.PosDir+i+"_ens_sort.bam"
cmd01 = 'samtools sort -no %s de/%s.subset.tmp |samtools view -|/illumina/software/PY276/bin/htseq-count --mode=intersection-nonempty --stranded=yes --type=exon --idattr=gene_id - /home/sbsuser/databases/bowtie2_ens/Homo_sapiens.GRCh37.72.gtf > Count/%s.count.out'%(ctr,i,i)
print >> fileh, cmd01
fileh.close()
if options.file_id:
fileh = open("_count.ense.sh","w")
with open(options.file_id) as p:
for i in p:
lines = i.strip("\n")
ctr = options.PosDir+lines+"_ens_sort.bam"
cmd01 = 'samtools sort -no %s de/%s.subset.tmp |samtools view -|/illumina/software/PY276/bin/htseq-count --mode=intersection-nonempty --stranded=yes --type=exon --idattr=gene_id - /home/sbsuser/databases/bowtie2_ens/Homo_sapiens.GRCh37.72.gtf > Count/%s.count.out'%(ctr,lines,lines)
print >> fileh, cmd01
fileh.close()
if __name__ == "__main__":
main()
|
bioinfo-dirty-jobs/Scriptechunks
|
Count_ensemble.py
|
Python
|
mit
| 2,237
|
[
"HTSeq"
] |
53a17a38b4c244a1964cf5308fdb56e381cbb6091892b8b30febd06793a0cb97
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Plot AMIGO style Gene Ontology graph using Graphviz.
"""
import os
import os.path as op
import sys
sys.path.insert(0, op.join(op.dirname(__file__), ".."))
from goatools.obo_parser import GODag, GraphEngines
if __name__ == "__main__":
import optparse
p = optparse.OptionParser("%prog [obo_file]", description=__doc__)
p.add_option(
"--description",
dest="desc",
help="Write term descriptions to stdout from the obo file specified in args",
action="store_true",
)
p.add_option(
"--term",
dest="term",
help="Write the parents and children of the query term",
action="store",
type="string",
default=None,
)
p.add_option(
"--engine",
default="pygraphviz",
choices=GraphEngines,
help="Graph plot engine, must be one of {} [default: %default]".format(
"|".join(GraphEngines)
),
)
p.add_option(
"--gml",
action="store_true",
help="Write GML output (for Cytoscape) [default: %default]",
)
p.add_option(
"--disable-draw-parents",
action="store_false",
dest="draw_parents",
help="Do not draw parents of the query term",
)
p.add_option(
"--disable-draw-children",
action="store_false",
dest="draw_children",
help="Do not draw children of the query term",
)
p.add_option(
"--output",
"-o",
default="GO_lineage.pdf",
help="Output filename, suffix is image format, common formats e.g. pdf|svg|png|jpg|... [default: %default]",
)
p.add_option(
"--dpi",
default=96,
type="int",
help="Output figure dpi, ignored by vector image formats like svg and pdf [default: %default]",
)
p.set_defaults(draw_parents=True)
p.set_defaults(draw_children=True)
opts, args = p.parse_args()
if not args:
obo_file = "go-basic.obo"
else:
obo_file = args[0]
assert os.path.exists(obo_file), "file %s not found!" % obo_file
g = GODag(obo_file)
if opts.desc:
g.write_dag()
# run a test case
if opts.term is not None:
rec = g.query_term(opts.term, verbose=True)
g.draw_lineage(
[rec],
dpi=opts.dpi,
engine=opts.engine,
gml=opts.gml,
output=opts.output,
draw_parents=opts.draw_parents,
draw_children=opts.draw_children,
)
|
tanghaibao/goatools
|
scripts/plot_go_term.py
|
Python
|
bsd-2-clause
| 2,575
|
[
"Cytoscape"
] |
4a413f38c420d16335ee5584ece7af38ea1e6457c184a4e6f2953c3991c44ff8
|
import numpy as np
from ase import Atom, Atoms
from ase.parallel import rank
from gpaw import GPAW
from gpaw.test import equal
try:
calc = GPAW('NaCl.gpw')
NaCl = calc.get_atoms()
e = NaCl.get_potential_energy()
niter = None
except IOError:
h = 0.21 # gridspacing
a = [6.5, 6.5, 7.7] # unit cell
d = 2.3608 # experimental bond length
NaCl = Atoms([Atom('Na', [0, 0, 0]),
Atom('Cl', [0, 0, d])],
pbc=False, cell=a)
NaCl.center()
calc = GPAW(h=h, xc='LDA', nbands=5, lmax=0,
setups={'Na': '1'},
convergence={'eigenstates': 1e-6}, spinpol=1)
NaCl.set_calculator(calc)
e = NaCl.get_potential_energy()
niter = calc.get_number_of_iterations()
calc.write('NaCl.gpw')
dv = NaCl.get_volume() / calc.get_number_of_grid_points().prod()
nt1 = calc.get_pseudo_density(gridrefinement=1)
Zt1 = nt1.sum() * dv
nt2 = calc.get_pseudo_density(gridrefinement=2)
Zt2 = nt2.sum() * dv / 8
print 'Integral of pseudo density:', Zt1, Zt2
equal(Zt1, Zt2, 1e-12)
for gridrefinement in [1, 2, 4]:
n = calc.get_all_electron_density(gridrefinement=gridrefinement)
Z = n.sum() * dv / gridrefinement**3
print 'Integral of all-electron density:', Z
equal(Z, 28, 1e-5)
energy_tolerance = 0.0004
niter_tolerance = 0
equal(e, -4.908677, energy_tolerance)
|
robwarm/gpaw-symm
|
gpaw/test/aedensity.py
|
Python
|
gpl-3.0
| 1,366
|
[
"ASE",
"GPAW"
] |
9402e9c1722ae167d0960472214aa499984e76891187183abe38847e995e5c75
|
"""
Module for reading Gaussian cube files, which have become one of the standard file formats
for volumetric data in quantum chemistry and solid state physics software packages
(VASP being an exception).
Some basic info about cube files
(abridged info from http://paulbourke.net/dataformats/cube/ by Paul Bourke)
The file consists of a header which includes the atom information and the size as well
as orientation of the volumetric data. The first two lines of the header are comments. The
third line has the number of atoms included in the file followed by the position of the
origin of the volumetric data. The next three lines give the number of voxels along each axis
(x, y, z) followed by the axis vector. The last section in the header is one line for each
atom consisting of 5 numbers, the first is the atom number, the second is the charge, and
the last three are the x,y,z coordinates of the atom center. The volumetric data is straightforward,
one floating point number for each volumetric element.
Example
In the following example the volumetric data is a 40 by 40 by 40 grid, each voxel is 0.283459 units
wide and the volume is aligned with the coordinate axis. There are three atoms.
CPMD CUBE FILE.
OUTER LOOP: X, MIDDLE LOOP: Y, INNER LOOP: Z
3 0.000000 0.000000 0.000000
40 0.283459 0.000000 0.000000
40 0.000000 0.283459 0.000000
40 0.000000 0.000000 0.283459
8 0.000000 5.570575 5.669178 5.593517
1 0.000000 5.562867 5.669178 7.428055
1 0.000000 7.340606 5.669178 5.111259
-0.25568E-04 0.59213E-05 0.81068E-05 0.10868E-04 0.11313E-04 0.35999E-05
: : : : : :
: : : : : :
: : : : : :
In this case there will be 40 x 40 x 40 floating point values
: : : : : :
: : : : : :
: : : : : :
"""
import numpy as np
from monty.io import zopen
from pymatgen.core.sites import Site
from pymatgen.core.structure import Structure
from pymatgen.core.units import bohr_to_angstrom
# TODO: can multiprocessing be incorporated without causing issues during drone assimilation?
class Cube:
"""
Class to read Gaussian cube file formats for volumetric data.
Cube files are, by default, written in atomic units, and this
class assumes that convention.
"""
def __init__(self, fname):
"""
Initialize the cube object and store the data as self.data
Args:
fname (str): filename of the cube to read
"""
f = zopen(fname, "rt")
# skip header lines
for i in range(2):
f.readline()
# number of atoms followed by the position of the origin of the volumetric data
line = f.readline().split()
self.natoms = int(line[0])
self.origin = np.array(list(map(float, line[1:])))
# The number of voxels along each axis (x, y, z) followed by the axis vector.
line = f.readline().split()
self.NX = int(line[0])
self.X = np.array([bohr_to_angstrom * float(l) for l in line[1:]])
self.dX = np.linalg.norm(self.X)
line = f.readline().split()
self.NY = int(line[0])
self.Y = np.array([bohr_to_angstrom * float(l) for l in line[1:]])
self.dY = np.linalg.norm(self.Y)
line = f.readline().split()
self.NZ = int(line[0])
self.Z = np.array([bohr_to_angstrom * float(l) for l in line[1:]])
self.dZ = np.linalg.norm(self.Z)
self.voxel_volume = abs(np.dot(np.cross(self.X, self.Y), self.Z))
self.volume = abs(np.dot(np.cross(self.X.dot(self.NZ), self.Y.dot(self.NY)), self.Z.dot(self.NZ)))
# The last section in the header is one line for each atom consisting of 5 numbers,
# the first is the atom number, second is charge,
# the last three are the x,y,z coordinates of the atom center.
self.sites = []
for i in range(self.natoms):
line = f.readline().split()
self.sites.append(Site(line[0], np.multiply(bohr_to_angstrom, list(map(float, line[2:])))))
self.structure = Structure(
lattice=[self.X * self.NX, self.Y * self.NY, self.Z * self.NZ],
species=[s.specie for s in self.sites],
coords=[s.coords for s in self.sites],
coords_are_cartesian=True,
)
# Volumetric data
self.data = np.reshape(np.array(f.read().split()).astype(float), (self.NX, self.NY, self.NZ))
def mask_sphere(self, radius, cx, cy, cz):
"""
Create a mask for a sphere with radius=radius, centered at cx, cy, cz.
Args:
radius: (float) of the mask (in Angstroms)
cx, cy, cz: (float) the fractional coordinates of the center of the sphere
"""
dx, dy, dz = (
np.floor(radius / np.linalg.norm(self.X)).astype(int),
np.floor(radius / np.linalg.norm(self.Y)).astype(int),
np.floor(radius / np.linalg.norm(self.Z)).astype(int),
)
gcd = max(np.gcd(dx, dy), np.gcd(dy, dz), np.gcd(dx, dz))
sx, sy, sz = dx // gcd, dy // gcd, dz // gcd
r = min(dx, dy, dz)
x0, y0, z0 = int(np.round(self.NX * cx)), int(np.round(self.NY * cy)), int(np.round(self.NZ * cz))
centerx, centery, centerz = self.NX // 2, self.NY // 2, self.NZ // 2
a = np.roll(self.data, (centerx - x0, centery - y0, centerz - z0))
i, j, k = np.indices(a.shape, sparse=True)
a = np.sqrt((sx * i - sx * centerx) ** 2 + (sy * j - sy * centery) ** 2 + (sz * k - sz * centerz) ** 2)
indices = a > r
a[indices] = 0
return a
def get_atomic_site_averages(self, atomic_site_radii):
"""
Get the average value around each atomic site.
Args:
atomic_site_radii (dict): dictionary determining the cutoff radius (in Angstroms)
for averaging around atomic sites (e.g. {'Li': 0.97, 'B': 0.77, ...}. If
not provided, then the
returns:
Array of site averages, [Average around site 1, Average around site 2, ...]
"""
return [self._get_atomic_site_average(s, atomic_site_radii[s.species_string]) for s in self.structure.sites]
def _get_atomic_site_average(self, site, radius):
"""
Helper function for get_atomic_site_averages.
Args:
site: Site in the structure around which to get the average
radius: (float) the atomic_site_radius (in Angstroms) for given atomic species
returns:
Average around the atomic site
"""
mask = self.mask_sphere(radius, *site.frac_coords)
return np.sum(self.data * mask) / np.count_nonzero(mask)
def get_atomic_site_totals(self, atomic_site_radii):
"""
Get the integrated total in a sphere around each atomic site.
Args:
atomic_site_radii (dict): dictionary determining the cutoff radius (in Angstroms)
for averaging around atomic sites (e.g. {'Li': 0.97, 'B': 0.77, ...}. If
not provided, then the
returns:
Array of site averages, [Average around site 1, Average around site 2, ...]
"""
return [self._get_atomic_site_total(s, atomic_site_radii[s.species_string]) for s in self.structure.sites]
def _get_atomic_site_total(self, site, radius):
"""
Helper function for get_atomic_site_averages.
Args:
site: Site in the structure around which to get the total
radius: (float) the atomic_site_radius (in Angstroms) for given atomic species
returns:
Average around the atomic site
"""
mask = self.mask_sphere(radius, *site.frac_coords)
return np.sum(self.data * mask)
def get_axis_grid(self, ind):
"""
Modified from pymatgen.io.vasp.outputs
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
"""
ng = self.data.shape
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)]
def get_average_along_axis(self, ind):
"""
Modified from pymatgen.io.vasp.outputs
Get the averaged total of the volumetric data a certain axis direction.
For example, useful for visualizing Hartree Potentials.
Args:
ind (int): Index of axis.
Returns:
Average total along axis
"""
ng = self.data.shape
m = self.data
if ind == 0:
total = np.sum(np.sum(m, axis=1), 1)
elif ind == 1:
total = np.sum(np.sum(m, axis=0), 1)
else:
total = np.sum(np.sum(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
|
materialsproject/pymatgen
|
pymatgen/io/cube.py
|
Python
|
mit
| 9,239
|
[
"CPMD",
"Gaussian",
"VASP",
"pymatgen"
] |
0468a5c82210f75022f1e696375bf0593e512201413a994fca1223c658e81f8d
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2021 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with functions for Psi4/Cfour interface. Portions that require
calls to Boost Python psi4 module are here, otherwise in qcdb module.
Also calls to qcdb module are here and not elsewhere in driver.
Organizationally, this module isolates qcdb code from psi4 code.
"""
import os
import re
import sys
import uuid
import shutil
import inspect
import subprocess
from psi4.driver import qcdb
from psi4.driver import p4util
from psi4.driver.molutil import *
from psi4.driver.p4util.exceptions import *
# never import driver, wrappers, or aliases into this file
P4C4_INFO = {}
def run_cfour(name, **kwargs):
"""Function that prepares environment and input files
for a calculation calling Stanton and Gauss's CFOUR code.
Also processes results back into Psi4 format.
This function is not called directly but is instead called by
:py:func:`~psi4.energy` or :py:func:`~psi4.optimize` when a Cfour
method is requested (through *name* argument). In order to function
correctly, the Cfour executable ``xcfour`` must be present in
:envvar:`PATH` or :envvar:`PSIPATH`.
.. hlist::
:columns: 1
* Many :ref:`PSI Variables <apdx:cfour_psivar>` extracted from the Cfour output
* Python dictionary of associated file constants accessible as ``P4C4_INFO['zmat']``, ``P4C4_INFO['output']``, ``P4C4_INFO['grd']``, *etc.*
:type name: str
:param name: ``'c4-scf'`` || ``'c4-ccsd(t)'`` || ``'cfour'`` || etc.
First argument, usually unlabeled. Indicates the computational
method to be applied to the system.
:type keep: :ref:`boolean <op_py_boolean>`
:param keep: ``'on'`` || |dl| ``'off'`` |dr|
Indicates whether to delete the Cfour scratch directory upon
completion of the Cfour job.
:type path: str
:param path:
Indicates path to Cfour scratch directory (with respect to Psi4
scratch directory). Otherwise, the default is a subdirectory
within the Psi4 scratch directory.
If specified, GENBAS and/or ZMAT within will be used.
:type genbas: str
:param genbas:
Indicates that contents should be used for GENBAS file.
GENBAS is a complicated topic. It is quite unnecessary if the
molecule is from a molecule {...} block and basis is set through
|Psifours| BASIS keyword. In that case, a GENBAS is written from
LibMints and all is well. Otherwise, a GENBAS is looked for in
the usual places: PSIPATH, PATH, PSIDATADIR/basis. If path kwarg is
specified, also looks there preferentially for a GENBAS. Can
also specify GENBAS within an input file through a string and
setting the genbas kwarg. Note that due to the input parser's
aggression, blank lines need to be replaced by the text blankline.
"""
lowername = name.lower()
internal_p4c4_info = {}
return_wfn = kwargs.pop('return_wfn', False)
# Make sure the molecule the user provided is the active one
molecule = kwargs.pop('molecule', core.get_active_molecule())
molecule.update_geometry()
optstash = p4util.OptionsState(
['CFOUR', 'TRANSLATE_PSI4'])
# Determine calling function and hence dertype
calledby = inspect.stack()[1][3]
dertype = ['energy', 'gradient', 'hessian'].index(calledby)
#print('I am %s called by %s called by %s.\n' %
# (inspect.stack()[0][3], inspect.stack()[1][3], inspect.stack()[2][3]))
# Save submission directory
current_directory = os.getcwd()
# Move into job scratch directory
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
os.chdir(psioh.get_default_path())
# Construct and move into cfour subdirectory of job scratch directory
cfour_tmpdir = kwargs['path'] if 'path' in kwargs else \
'psi.' + str(os.getpid()) + '.' + psio.get_default_namespace() + \
'.cfour.' + str(uuid.uuid4())[:8]
if not os.path.exists(cfour_tmpdir):
os.mkdir(cfour_tmpdir)
os.chdir(cfour_tmpdir)
# Find environment by merging PSIPATH and PATH environment variables
lenv = {
'PATH': ':'.join([os.path.abspath(x) for x in os.environ.get('PSIPATH', '').split(':') if x != '']) + \
':' + os.environ.get('PATH') + \
':' + core.get_datadir() + '/basis',
'GENBAS_PATH': core.get_datadir() + '/basis',
'CFOUR_NUM_CORES': os.environ.get('CFOUR_NUM_CORES'),
'MKL_NUM_THREADS': os.environ.get('MKL_NUM_THREADS'),
'OMP_NUM_THREADS': os.environ.get('OMP_NUM_THREADS'),
'LD_LIBRARY_PATH': os.environ.get('LD_LIBRARY_PATH')
}
if 'path' in kwargs:
lenv['PATH'] = kwargs['path'] + ':' + lenv['PATH']
# Filter out None values as subprocess will fault on them
lenv = {k: v for k, v in lenv.items() if v is not None}
# Load the GENBAS file
genbas_path = qcdb.search_file('GENBAS', lenv['GENBAS_PATH'])
if genbas_path:
try:
shutil.copy2(genbas_path, psioh.get_default_path() + cfour_tmpdir)
except shutil.Error: # should only fail if src and dest equivalent
pass
core.print_out("\n GENBAS loaded from %s\n" % (genbas_path))
core.print_out(" CFOUR to be run from %s\n" % (psioh.get_default_path() + cfour_tmpdir))
else:
message = """
GENBAS file for CFOUR interface not found. Either:
[1] Supply a GENBAS by placing it in PATH or PSIPATH
[1a] Use cfour {} block with molecule and basis directives.
[1b] Use molecule {} block and CFOUR_BASIS keyword.
[2] Allow Psi4's internal basis sets to convert to GENBAS
[2a] Use molecule {} block and BASIS keyword.
"""
core.print_out(message)
core.print_out(' Search path that was tried:\n')
core.print_out(lenv['PATH'].replace(':', ', '))
# Generate the ZMAT input file in scratch
if 'path' in kwargs and os.path.isfile('ZMAT'):
core.print_out(" ZMAT loaded from %s\n" % (psioh.get_default_path() + kwargs['path'] + '/ZMAT'))
else:
with open('ZMAT', 'w') as cfour_infile:
cfour_infile.write(write_zmat(lowername, dertype, molecule))
internal_p4c4_info['zmat'] = open('ZMAT', 'r').read()
#core.print_out('\n====== Begin ZMAT input for CFOUR ======\n')
#core.print_out(open('ZMAT', 'r').read())
#core.print_out('======= End ZMAT input for CFOUR =======\n\n')
#print('\n====== Begin ZMAT input for CFOUR ======')
#print(open('ZMAT', 'r').read())
#print('======= End ZMAT input for CFOUR =======\n')
if 'genbas' in kwargs:
with open('GENBAS', 'w') as cfour_basfile:
cfour_basfile.write(kwargs['genbas'].replace('\nblankline\n', '\n\n'))
core.print_out(' GENBAS loaded from kwargs string\n')
# Close psi4 output file and reopen with filehandle
print('output in', current_directory + '/' + core.outfile_name())
pathfill = '' if os.path.isabs(core.outfile_name()) else current_directory + os.path.sep
# Handle threading
# OMP_NUM_THREADS from env is in lenv from above
# threads from psi4 -n (core.get_num_threads()) is ignored
# CFOUR_OMP_NUM_THREADS psi4 option takes precedence, handled below
if core.has_option_changed('CFOUR', 'CFOUR_OMP_NUM_THREADS') == True:
lenv['OMP_NUM_THREADS'] = str(core.get_option('CFOUR', 'CFOUR_OMP_NUM_THREADS'))
#print("""\n\n<<<<< RUNNING CFOUR ... >>>>>\n\n""")
# Call executable xcfour, directing cfour output to the psi4 output file
cfour_executable = kwargs['c4exec'] if 'c4exec' in kwargs else 'xcfour'
try:
retcode = subprocess.Popen([cfour_executable], bufsize=0, stdout=subprocess.PIPE, env=lenv)
except OSError as e:
sys.stderr.write('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
message = ('Program %s not found in path or execution failed: %s\n' % (cfour_executable, e.strerror))
raise ValidationError(message)
c4out = ''
while True:
data = retcode.stdout.readline()
data = data.decode('utf-8')
if not data:
break
core.print_out(data)
c4out += data
internal_p4c4_info['output'] = c4out
c4files = {}
core.print_out('\n')
for item in ['GRD', 'FCMFINAL', 'DIPOL']:
try:
with open(psioh.get_default_path() + cfour_tmpdir + '/' + item, 'r') as handle:
c4files[item] = handle.read()
core.print_out(' CFOUR scratch file %s has been read\n' % (item))
core.print_out('%s\n' % c4files[item])
internal_p4c4_info[item.lower()] = c4files[item]
except IOError:
pass
core.print_out('\n')
if molecule.name() == 'blank_molecule_psi4_yo':
qcdbmolecule = None
else:
molecule.update_geometry()
qcdbmolecule = qcdb.Molecule(molecule.create_psi4_string_from_molecule())
qcdbmolecule.update_geometry()
# c4mol, if it exists, is dinky, just a clue to geometry of cfour results
psivar, c4grad, c4mol = qcdb.cfour.harvest(qcdbmolecule, c4out, **c4files)
# Absorb results into psi4 data structures
for key in psivar.keys():
core.set_variable(key.upper(), float(psivar[key]))
if qcdbmolecule is None and c4mol is not None:
molecule = geometry(c4mol.create_psi4_string_from_molecule(), name='blank_molecule_psi4_yo')
molecule.update_geometry()
# This case arises when no Molecule going into calc (cfour {} block) but want
# to know the orientation at which grad, properties, etc. are returned (c4mol).
# c4mol is dinky, w/o chg, mult, dummies and retains name
# blank_molecule_psi4_yo so as to not interfere with future cfour {} blocks
if c4grad is not None:
mat = core.Matrix.from_list(c4grad)
core.set_gradient(mat)
#print ' <<< [3] C4-GRD-GRAD >>>'
#mat.print()
# exit(1)
# # Things needed core.so module to do
# collect c4out string
# read GRD
# read FCMFINAL
# see if theres an active molecule
# # Things delegatable to qcdb
# parsing c4out
# reading GRD and FCMFINAL strings
# reconciling p4 and c4 molecules (orient)
# reconciling c4out and GRD and FCMFINAL results
# transforming frame of results back to p4
# # Things run_cfour needs to have back
# psivar
# qcdb.Molecule of c4?
# coordinates?
# gradient in p4 frame
# # Process the cfour output
# psivar, c4coord, c4grad = qcdb.cfour.cfour_harvest(c4out)
# for key in psivar.keys():
# core.set_variable(key.upper(), float(psivar[key]))
#
# # Awful Hack - Go Away TODO
# if c4grad:
# molecule = core.get_active_molecule()
# molecule.update_geometry()
#
# if molecule.name() == 'blank_molecule_psi4_yo':
# p4grad = c4grad
# p4coord = c4coord
# else:
# qcdbmolecule = qcdb.Molecule(molecule.create_psi4_string_from_molecule())
# #p4grad = qcdbmolecule.deorient_array_from_cfour(c4coord, c4grad)
# #p4coord = qcdbmolecule.deorient_array_from_cfour(c4coord, c4coord)
#
# with open(psioh.get_default_path() + cfour_tmpdir + '/GRD', 'r') as cfour_grdfile:
# c4outgrd = cfour_grdfile.read()
# print('GRD\n',c4outgrd)
# c4coordGRD, c4gradGRD = qcdb.cfour.cfour_harvest_files(qcdbmolecule, grd=c4outgrd)
#
# p4mat = core.Matrix.from_list(p4grad)
# core.set_gradient(p4mat)
# print(' <<< P4 PSIVAR >>>')
# for item in psivar:
# print(' %30s %16.8f' % (item, psivar[item]))
#print(' <<< P4 COORD >>>')
#for item in p4coord:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# print(' <<< P4 GRAD >>>')
# for item in c4grad:
# print(' %16.8f %16.8f %16.8f' % (item[0], item[1], item[2]))
# Clean up cfour scratch directory unless user instructs otherwise
keep = yes.match(str(kwargs['keep'])) if 'keep' in kwargs else False
os.chdir('..')
try:
if keep or ('path' in kwargs):
core.print_out('\n CFOUR scratch files have been kept in %s\n' % (psioh.get_default_path() + cfour_tmpdir))
else:
shutil.rmtree(cfour_tmpdir)
except OSError as e:
print('Unable to remove CFOUR temporary directory %s' % e, file=sys.stderr)
exit(1)
# Return to submission directory and reopen output file
os.chdir(current_directory)
core.print_out('\n')
p4util.banner(' Cfour %s %s Results ' % (name.lower(), calledby.capitalize()))
core.print_variables()
if c4grad is not None:
core.get_gradient().print_out()
core.print_out('\n')
p4util.banner(' Cfour %s %s Results ' % (name.lower(), calledby.capitalize()))
core.print_variables()
if c4grad is not None:
core.get_gradient().print_out()
# Quit if Cfour threw error
if 'CFOUR ERROR CODE' in core.variables():
raise ValidationError("""Cfour exited abnormally.""")
P4C4_INFO.clear()
P4C4_INFO.update(internal_p4c4_info)
optstash.restore()
# new skeleton wavefunction w/mol, highest-SCF basis (just to choose one), & not energy
# Feb 2017 hack. Could get proper basis in skel wfn even if not through p4 basis kw
gobas = core.get_global_option('BASIS') if core.get_global_option('BASIS') else 'sto-3g'
basis = core.BasisSet.build(molecule, "ORBITAL", gobas)
if basis.has_ECP():
raise ValidationError("""ECPs not hooked up for Cfour""")
wfn = core.Wavefunction(molecule, basis)
optstash.restore()
if dertype == 0:
finalquantity = psivar['CURRENT ENERGY']
elif dertype == 1:
finalquantity = core.get_gradient()
wfn.set_gradient(finalquantity)
if finalquantity.rows(0) < 20:
core.print_out('CURRENT GRADIENT')
finalquantity.print_out()
elif dertype == 2:
pass
#finalquantity = finalhessian
#wfn.set_hessian(finalquantity)
#if finalquantity.rows(0) < 20:
# core.print_out('CURRENT HESSIAN')
# finalquantity.print_out()
return wfn
def cfour_list():
"""Form list of Cfour :py:func:`~driver.energy` arguments."""
return qcdb.cfour.cfour_list()
def cfour_gradient_list():
"""Form list of Cfour analytic :py:func:`~driver.gradient` arguments."""
return qcdb.cfour.cfour_gradient_list()
def cfour_hessian_list():
"""Form list of Cfour analytic :py:func:`~driver.gradient` arguments."""
return qcdb.cfour.cfour_hessian_list()
def cfour_psivar_list():
"""Form dictionary of :ref:`PSI Variables <apdx:cfour_psivar>` set by Cfour methods."""
return qcdb.cfour.cfour_psivar_list()
def write_zmat(name, dertype, molecule):
"""Returns string with contents of Cfour ZMAT file as gathered from
active molecule, current keyword settings, and cfour {...} block.
"""
# Handle memory
mem = int(0.000001 * core.get_memory())
if mem == 524:
memcmd, memkw = '', {}
else:
memcmd, memkw = qcdb.cfour.muster_memory(mem)
# Handle molecule and basis set
if molecule.name() == 'blank_molecule_psi4_yo':
molcmd, molkw = '', {}
bascmd, baskw = '', {}
core.set_local_option('CFOUR', 'TRANSLATE_PSI4', False)
else:
molecule.update_geometry()
#print(molecule.create_psi4_string_from_molecule())
qcdbmolecule = qcdb.Molecule(molecule.create_psi4_string_from_molecule())
qcdbmolecule.tagline = molecule.name()
molcmd, molkw = qcdbmolecule.format_molecule_for_cfour()
if core.get_global_option('BASIS') == '':
bascmd, baskw = '', {}
else:
user_pg = molecule.schoenflies_symbol()
molecule.reset_point_group('c1') # need basis printed for *every* atom
qbs = core.BasisSet.build(molecule, "BASIS", core.get_global_option('BASIS'))
if qbs.has_ECP():
raise ValidationError("""ECPs not hooked up for Cfour""")
with open('GENBAS', 'w') as cfour_basfile:
cfour_basfile.write(qbs.genbas())
core.print_out(' GENBAS loaded from Psi4 LibMints for basis %s\n' % (core.get_global_option('BASIS')))
molecule.reset_point_group(user_pg)
molecule.update_geometry()
bascmd, baskw = qcdbmolecule.format_basis_for_cfour(qbs.has_puream())
# Handle psi4 keywords implying cfour keyword values
if core.get_option('CFOUR', 'TRANSLATE_PSI4'):
psicmd, psikw = qcdb.cfour.muster_psi4options(p4util.prepare_options_for_modules(changedOnly=True))
else:
psicmd, psikw = '', {}
# Handle calc type and quantum chemical method
mdccmd, mdckw = qcdb.cfour.muster_modelchem(name, dertype)
# Handle calc type and quantum chemical method
mdccmd, mdckw = qcdb.cfour.muster_modelchem(name, dertype)
# Handle driver vs input/default keyword reconciliation
userkw = p4util.prepare_options_for_modules()
userkw = qcdb.options.reconcile_options(userkw, memkw)
userkw = qcdb.options.reconcile_options(userkw, molkw)
userkw = qcdb.options.reconcile_options(userkw, baskw)
userkw = qcdb.options.reconcile_options(userkw, psikw)
userkw = qcdb.options.reconcile_options(userkw, mdckw)
# Handle conversion of psi4 keyword structure into cfour format
optcmd = qcdb.options.prepare_options_for_cfour(userkw)
# Handle text to be passed untouched to cfour
litcmd = core.get_global_option('LITERAL_CFOUR')
# Assemble ZMAT pieces
zmat = memcmd + molcmd + optcmd + mdccmd + psicmd + bascmd + litcmd
if len(re.findall(r'^\*(ACES2|CFOUR|CRAPS)\(', zmat, re.MULTILINE)) != 1:
core.print_out('\n Faulty ZMAT constructed:\n%s' % (zmat))
raise ValidationError("""
Multiple *CFOUR(...) blocks in input. This usually arises
because molecule or options are specified both the psi4 way through
molecule {...} and set ... and the cfour way through cfour {...}.""")
return zmat
|
jturney/psi4
|
psi4/driver/procrouting/interface_cfour.py
|
Python
|
lgpl-3.0
| 19,085
|
[
"CFOUR",
"Psi4"
] |
15974d2861457440bf4005d0cbda6e1c3732e5a9ddb491b9c54cd1612300ebe6
|
#
# gPrime - A web-based genealogy program
#
# Copyright (C) 2007-2012 Brian G. Matherly
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2013-2014 Paul Franklin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Reports/Text Reports/Tag Report"""
#------------------------------------------------------------------------
#
# standard python modules
#
#------------------------------------------------------------------------
#------------------------------------------------------------------------
#
# Gprime modules
#
#------------------------------------------------------------------------
from gprime.const import LOCALE as glocale
_ = glocale.translation.gettext
from gprime.plug.menu import EnumeratedListOption
from gprime.plug.report import Report
from gprime.plug.report import utils
from gprime.plug.report import MenuReportOptions
from gprime.plug.report import stdoptions
from gprime.plug.docgen import (IndexMark, FontStyle, ParagraphStyle,
TableStyle, TableCellStyle,
FONT_SANS_SERIF, INDEX_TYPE_TOC,
PARA_ALIGN_CENTER)
from gprime.lib import NoteType, UrlType
from gprime.filters import GenericFilterFactory, rules
from gprime.errors import ReportError
from gprime.utils.db import get_participant_from_event
from gprime.display.place import displayer as _pd
from gprime.proxy import LivingProxyDb, CacheProxyDb
#------------------------------------------------------------------------
#
# TagReport
#
#------------------------------------------------------------------------
class TagReport(Report):
""" Tag Report """
def __init__(self, database, options, user):
"""
Create the TagReport object that produces the report.
The arguments are:
database - the GRAMPS database instance
options - instance of the Options class for this report
user - a gen.user.User() instance
This report needs the following parameters (class variables)
that come in the options class.
tag - The tag each object must match to be included.
name_format - Preferred format to display names of people
incl_private - Whether to include private data
living_people - How to handle living people
years_past_death - Consider as living this many years after death
"""
Report.__init__(self, database, options, user)
menu = options.menu
lang = menu.get_option_by_name('trans').get_value()
rlocale = self.set_locale(lang)
stdoptions.run_private_data_option(self, menu)
living_opt = stdoptions.run_living_people_option(self, menu, rlocale)
self.database = CacheProxyDb(self.database)
self._lv = menu.get_option_by_name('living_people').get_value()
for (value, description) in living_opt.get_items(xml_items=True):
if value == self._lv:
living_desc = self._(description)
break
self.living_desc = self._("(Living people: %(option_name)s)"
) % {'option_name' : living_desc}
self.tag = menu.get_option_by_name('tag').get_value()
if not self.tag:
raise ReportError(
_('Tag Report'),
_('You must first create a tag before running this report.'))
stdoptions.run_name_format_option(self, menu)
def write_report(self):
self.doc.start_paragraph("TR-Title")
# feature request 2356: avoid genitive form
title = self._("Tag Report for %s Items") % self.tag
mark = IndexMark(title, INDEX_TYPE_TOC, 1)
self.doc.write_text(title, mark)
self.doc.end_paragraph()
if self._lv != LivingProxyDb.MODE_INCLUDE_ALL:
self.doc.start_paragraph("TR-ReportSubtitle")
self.doc.write_text(self.living_desc)
self.doc.end_paragraph()
self.write_people()
self.write_families()
self.write_events()
self.write_places()
self.write_notes()
self.write_media()
self.write_repositories()
self.write_sources()
self.write_citations()
def write_people(self):
""" write the people associated with the tag """
plist = self.database.iter_person_handles()
filter_class = GenericFilterFactory('Person')
a_filter = filter_class()
a_filter.add_rule(rules.person.HasTag([self.tag]))
ind_list = a_filter.apply(self.database, plist)
if not ind_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("People")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('PeopleTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Name"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Birth"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Death"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for person_handle in ind_list:
person = self.database.get_person_from_handle(person_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(person.get_gid())
self.doc.end_paragraph()
self.doc.end_cell()
name = self._name_display.display(person)
mark = utils.get_person_mark(self.database, person)
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(name, mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
birth_ref = person.get_birth_ref()
if birth_ref:
event = self.database.get_event_from_handle(birth_ref.ref)
self.doc.write_text(self._get_date(event.get_date_object()))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
death_ref = person.get_death_ref()
if death_ref:
event = self.database.get_event_from_handle(death_ref.ref)
self.doc.write_text(self._get_date(event.get_date_object()))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_families(self):
""" write the families associated with the tag """
flist = self.database.iter_family_handles()
filter_class = GenericFilterFactory('Family')
a_filter = filter_class()
a_filter.add_rule(rules.family.HasTag([self.tag]))
fam_list = a_filter.apply(self.database, flist)
if not fam_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Families")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('FamilyTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Father"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Mother"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Relationship"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for family_handle in fam_list:
family = self.database.get_family_from_handle(family_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(family.get_gid())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
father_handle = family.get_father_handle()
if father_handle:
father = self.database.get_person_from_handle(father_handle)
mark = utils.get_person_mark(self.database, father)
self.doc.write_text(self._name_display.display(father), mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.database.get_person_from_handle(mother_handle)
mark = utils.get_person_mark(self.database, mother)
self.doc.write_text(self._name_display.display(mother), mark)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
relation = family.get_relationship()
self.doc.write_text(str(relation))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_events(self):
""" write the events associated with the tag """
elist = self.database.get_event_handles()
filter_class = GenericFilterFactory('Event')
a_filter = filter_class()
a_filter.add_rule(rules.event.HasTag([self.tag]))
event_list = a_filter.apply(self.database, elist)
if not event_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Events")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('EventTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Type"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Participants"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Date"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for event_handle in event_list:
event = self.database.get_event_from_handle(event_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(event.get_gid())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(self._(self._get_type(event.get_type())))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(get_participant_from_event(self.database,
event_handle))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
date = self._get_date(event.get_date_object())
if date:
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_places(self):
""" write the places associated with the tag """
plist = self.database.get_place_handles()
filter_class = GenericFilterFactory('Place')
a_filter = filter_class()
a_filter.add_rule(rules.place.HasTag([self.tag]))
place_list = a_filter.apply(self.database, plist)
if not place_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Places")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('PlaceTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Title"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Name"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Type"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for place_handle in place_list:
place = self.database.get_place_from_handle(place_handle)
place_title = _pd.display(self.database, place)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(place.get_gid())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(place_title)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(place.get_name())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(str(place.get_type()))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_notes(self):
""" write the notes associated with the tag """
nlist = self.database.get_note_handles()
filter_class = GenericFilterFactory('Note')
a_filter = filter_class()
a_filter.add_rule(rules.note.HasTag([self.tag]))
note_list = a_filter.apply(self.database, nlist)
if not note_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Notes")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('NoteTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Type"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell', 2)
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Text"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for note_handle in note_list:
note = self.database.get_note_from_handle(note_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(note.get_gid())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
note_type = note.get_type()
self.doc.write_text(str(note_type))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell', 2)
self.doc.write_styled_note(
note.get_styledtext(), note.get_format(), 'TR-Note',
contains_html=((note.get_type() == NoteType.HTML_CODE)))
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_media(self):
""" write the media associated with the tag """
mlist = self.database.get_media_handles(sort_handles=True)
filter_class = GenericFilterFactory('Media')
a_filter = filter_class()
a_filter.add_rule(rules.media.HasTag([self.tag]))
media_list = a_filter.apply(self.database, mlist)
if not media_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Media")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('MediaTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Title"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Type"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Date"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for media_handle in media_list:
media = self.database.get_media_from_handle(media_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(media.get_gid())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
title = media.get_description()
self.doc.write_text(str(title))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
mime_type = media.get_mime_type()
self.doc.write_text(str(mime_type))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
date = self._get_date(media.get_date_object())
if date:
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_repositories(self):
""" write the repositories associated with the tag """
rlist = self.database.get_repository_handles()
filter_class = GenericFilterFactory('Repository')
a_filter = filter_class()
a_filter.add_rule(rules.repository.HasTag([self.tag]))
repo_list = a_filter.apply(self.database, rlist)
if not repo_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Repositories")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('ReopTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Name"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Type"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Email Address"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for repo_handle in repo_list:
repo = self.database.get_repository_from_handle(repo_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(repo.get_gid())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(repo.get_name())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(str(repo.get_type()))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
home_page = ''
for url in repo.get_url_list():
if url.get_type() == UrlType.EMAIL:
home_page = url.get_path()
break
self.doc.write_text(home_page)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_sources(self):
""" write the sources associated with the tag """
slist = self.database.get_source_handles(sort_handles=True)
filter_class = GenericFilterFactory('Source')
a_filter = filter_class()
a_filter.add_rule(rules.source.HasTag([self.tag]))
source_list = a_filter.apply(self.database, slist)
if not source_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Source")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('SourceTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Title"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Author"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Publication Information"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for source_handle in source_list:
source = self.database.get_source_from_handle(source_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(source.get_gid())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(source.get_title())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(source.get_author())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(source.get_publication_info())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
def write_citations(self):
""" write the citations associated with the tag """
clist = self.database.get_citation_handles(sort_handles=True)
filter_class = GenericFilterFactory('Citation')
a_filter = filter_class()
a_filter.add_rule(rules.citation.HasTag([self.tag]))
citation_list = a_filter.apply(self.database, clist)
if not citation_list:
return
self.doc.start_paragraph("TR-Heading")
header = self._("Citations")
mark = IndexMark(header, INDEX_TYPE_TOC, 2)
self.doc.write_text(header, mark)
self.doc.end_paragraph()
self.doc.start_table('CitationTable', 'TR-Table')
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Id"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Volume/Page"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Date"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal-Bold')
self.doc.write_text(self._("Source"))
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
for citation_handle in citation_list:
citation = self.database.get_citation_from_handle(citation_handle)
self.doc.start_row()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(citation.get_gid())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
self.doc.write_text(citation.get_page())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
date = self._get_date(citation.get_date_object())
if date:
self.doc.write_text(date)
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.start_cell('TR-TableCell')
self.doc.start_paragraph('TR-Normal')
source_handle = citation.get_reference_handle()
source = self.database.get_source_from_handle(source_handle)
self.doc.write_text(source.get_title())
self.doc.end_paragraph()
self.doc.end_cell()
self.doc.end_row()
self.doc.end_table()
#------------------------------------------------------------------------
#
# TagOptions
#
#------------------------------------------------------------------------
class TagOptions(MenuReportOptions):
""" Options for the Tag Report """
def __init__(self, name, dbase):
self.__db = dbase
MenuReportOptions.__init__(self, name, dbase)
def get_subject(self):
""" Return a string that describes the subject of the report. """
return self.__tag_option.get_value()
def add_menu_options(self, menu):
"""
Add options to the menu for the tag report.
"""
category_name = _("Report Options")
all_tags = []
for handle in self.__db.get_tag_handles(sort_handles=True):
tag = self.__db.get_tag_from_handle(handle)
all_tags.append(tag.get_name())
if len(all_tags) > 0:
self.__tag_option = EnumeratedListOption(_('Tag'), all_tags[0])
for tag_name in all_tags:
self.__tag_option.add_item(tag_name, tag_name)
else:
self.__tag_option = EnumeratedListOption(_('Tag'), '')
self.__tag_option.add_item('', '')
self.__tag_option.set_help(_("The tag to use for the report"))
menu.add_option(category_name, "tag", self.__tag_option)
stdoptions.add_name_format_option(menu, category_name)
stdoptions.add_private_data_option(menu, category_name)
stdoptions.add_living_people_option(menu, category_name)
stdoptions.add_localization_option(menu, category_name)
def make_default_style(self, default_style):
"""Make the default output style for the Tag Report."""
# Paragraph Styles
font = FontStyle()
font.set_size(16)
font.set_type_face(FONT_SANS_SERIF)
font.set_bold(1)
para = ParagraphStyle()
para.set_header_level(1)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_("The style used for the title of the page."))
default_style.add_paragraph_style("TR-Title", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=12, bold=1)
para = ParagraphStyle()
para.set_header_level(1)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_font(font)
para.set_alignment(PARA_ALIGN_CENTER)
para.set_description(_('The style used for the subtitle.'))
default_style.add_paragraph_style("TR-ReportSubtitle", para)
font = FontStyle()
font.set(face=FONT_SANS_SERIF, size=14, italic=1)
para = ParagraphStyle()
para.set_font(font)
para.set_header_level(2)
para.set_top_margin(0.25)
para.set_bottom_margin(0.25)
para.set_description(_('The style used for the section headers.'))
default_style.add_paragraph_style("TR-Heading", para)
font = FontStyle()
font.set_size(12)
para = ParagraphStyle()
para.set(first_indent=-0.75, lmargin=.75)
para.set_font(font)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_description(_('The basic style used for the text display.'))
default_style.add_paragraph_style("TR-Normal", para)
font = FontStyle()
font.set_size(12)
font.set_bold(True)
para = ParagraphStyle()
para.set(first_indent=-0.75, lmargin=.75)
para.set_font(font)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_description(_('The basic style used for table headings.'))
default_style.add_paragraph_style("TR-Normal-Bold", para)
para = ParagraphStyle()
para.set(first_indent=-0.75, lmargin=.75)
para.set_top_margin(utils.pt2cm(3))
para.set_bottom_margin(utils.pt2cm(3))
para.set_description(_('The basic style used for the note display.'))
default_style.add_paragraph_style("TR-Note", para)
#Table Styles
cell = TableCellStyle()
default_style.add_cell_style('TR-TableCell', cell)
table = TableStyle()
table.set_width(100)
table.set_columns(4)
table.set_column_width(0, 10)
table.set_column_width(1, 30)
table.set_column_width(2, 30)
table.set_column_width(3, 30)
default_style.add_table_style('TR-Table', table)
|
sam-m888/gprime
|
gprime/plugins/textreport/tagreport.py
|
Python
|
gpl-2.0
| 34,619
|
[
"Brian"
] |
da6ef88e02fd8a369acfd26bde4625465c6b470e88eb1105f2a3abb7b804ae5b
|
"""
simple, elegant templating
(part of web.py)
Template design:
Template string is split into tokens and the tokens are combined into nodes.
Parse tree is a nodelist. TextNode and ExpressionNode are simple nodes and
for-loop, if-loop etc are block nodes, which contain multiple child nodes.
Each node can emit some python string. python string emitted by the
root node is validated for safeeval and executed using python in the given environment.
Enough care is taken to make sure the generated code and the template has line to line match,
so that the error messages can point to exact line number in template. (It doesn't work in some cases still.)
Grammar:
template -> defwith sections
defwith -> '$def with (' arguments ')' | ''
sections -> section*
section -> block | assignment | line
assignment -> '$ ' <assignment expression>
line -> (text|expr)*
text -> <any characters other than $>
expr -> '$' pyexpr | '$(' pyexpr ')' | '${' pyexpr '}'
pyexpr -> <python expression>
"""
from __future__ import print_function
from io import open
__all__ = [
"Template",
"Render", "render", "frender",
"ParseError", "SecurityError",
"test"
]
import tokenize
import os
import sys
import glob
import re
import warnings
import ast
from .utils import storage, safeunicode, safestr, re_compile
from .webapi import config
from .net import websafe
from .py3helpers import PY2, iteritems
if PY2:
from UserDict import DictMixin
# Make a new-style class
class MutableMapping(object, DictMixin):
pass
else:
from collections import MutableMapping
def splitline(text):
r"""
Splits the given text at newline.
>>> splitline('foo\nbar')
('foo\n', 'bar')
>>> splitline('foo')
('foo', '')
>>> splitline('')
('', '')
"""
index = text.find('\n') + 1
if index:
return text[:index], text[index:]
else:
return text, ''
class Parser:
"""Parser Base.
"""
def __init__(self):
self.statement_nodes = STATEMENT_NODES
self.keywords = KEYWORDS
def parse(self, text, name="<template>"):
self.text = text
self.name = name
defwith, text = self.read_defwith(text)
suite = self.read_suite(text)
return DefwithNode(defwith, suite)
def read_defwith(self, text):
if text.startswith('$def with'):
defwith, text = splitline(text)
defwith = defwith[1:].strip() # strip $ and spaces
return defwith, text
else:
return '', text
def read_section(self, text):
r"""Reads one section from the given text.
section -> block | assignment | line
>>> read_section = Parser().read_section
>>> read_section('foo\nbar\n')
(<line: [t'foo\n']>, 'bar\n')
>>> read_section('$ a = b + 1\nfoo\n')
(<assignment: 'a = b + 1'>, 'foo\n')
read_section('$for in range(10):\n hello $i\nfoo)
"""
if text.lstrip(' ').startswith('$'):
index = text.index('$')
begin_indent, text2 = text[:index], text[index+1:]
ahead = self.python_lookahead(text2)
if ahead == 'var':
return self.read_var(text2)
elif ahead in self.statement_nodes:
return self.read_block_section(text2, begin_indent)
elif ahead in self.keywords:
return self.read_keyword(text2)
elif ahead.strip() == '':
# assignments starts with a space after $
# ex: $ a = b + 2
return self.read_assignment(text2)
return self.readline(text)
def read_var(self, text):
r"""Reads a var statement.
>>> read_var = Parser().read_var
>>> read_var('var x=10\nfoo')
(<var: x = 10>, 'foo')
>>> read_var('var x: hello $name\nfoo')
(<var: x = join_(u'hello ', escape_(name, True))>, 'foo')
"""
line, text = splitline(text)
tokens = self.python_tokens(line)
if len(tokens) < 4:
raise SyntaxError('Invalid var statement')
name = tokens[1]
sep = tokens[2]
value = line.split(sep, 1)[1].strip()
if sep == '=':
pass # no need to process value
elif sep == ':':
#@@ Hack for backward-compatability
if tokens[3] == '\n': # multi-line var statement
block, text = self.read_indented_block(text, ' ')
lines = [self.readline(x)[0] for x in block.splitlines()]
nodes = []
for x in lines:
nodes.extend(x.nodes)
nodes.append(TextNode('\n'))
else: # single-line var statement
linenode, _ = self.readline(value)
nodes = linenode.nodes
parts = [node.emit('') for node in nodes]
value = "join_(%s)" % ", ".join(parts)
else:
raise SyntaxError('Invalid var statement')
return VarNode(name, value), text
def read_suite(self, text):
r"""Reads section by section till end of text.
>>> read_suite = Parser().read_suite
>>> read_suite('hello $name\nfoo\n')
[<line: [t'hello ', $name, t'\n']>, <line: [t'foo\n']>]
"""
sections = []
while text:
section, text = self.read_section(text)
sections.append(section)
return SuiteNode(sections)
def readline(self, text):
r"""Reads one line from the text. Newline is supressed if the line ends with \.
>>> readline = Parser().readline
>>> readline('hello $name!\nbye!')
(<line: [t'hello ', $name, t'!\n']>, 'bye!')
>>> readline('hello $name!\\\nbye!')
(<line: [t'hello ', $name, t'!']>, 'bye!')
>>> readline('$f()\n\n')
(<line: [$f(), t'\n']>, '\n')
"""
line, text = splitline(text)
# supress new line if line ends with \
if line.endswith('\\\n'):
line = line[:-2]
nodes = []
while line:
node, line = self.read_node(line)
nodes.append(node)
return LineNode(nodes), text
def read_node(self, text):
r"""Reads a node from the given text and returns the node and remaining text.
>>> read_node = Parser().read_node
>>> read_node('hello $name')
(t'hello ', '$name')
>>> read_node('$name')
($name, '')
"""
if text.startswith('$$'):
return TextNode('$'), text[2:]
elif text.startswith('$#'): # comment
line, text = splitline(text)
return TextNode('\n'), text
elif text.startswith('$'):
text = text[1:] # strip $
if text.startswith(':'):
escape = False
text = text[1:] # strip :
else:
escape = True
return self.read_expr(text, escape=escape)
else:
return self.read_text(text)
def read_text(self, text):
r"""Reads a text node from the given text.
>>> read_text = Parser().read_text
>>> read_text('hello $name')
(t'hello ', '$name')
"""
index = text.find('$')
if index < 0:
return TextNode(text), ''
else:
return TextNode(text[:index]), text[index:]
def read_keyword(self, text):
line, text = splitline(text)
return StatementNode(line.strip() + "\n"), text
def read_expr(self, text, escape=True):
"""Reads a python expression from the text and returns the expression and remaining text.
expr -> simple_expr | paren_expr
simple_expr -> id extended_expr
extended_expr -> attr_access | paren_expr extended_expr | ''
attr_access -> dot id extended_expr
paren_expr -> [ tokens ] | ( tokens ) | { tokens }
>>> read_expr = Parser().read_expr
>>> read_expr("name")
($name, '')
>>> read_expr("a.b and c")
($a.b, ' and c')
>>> read_expr("a. b")
($a, '. b')
>>> read_expr("name</h1>")
($name, '</h1>')
>>> read_expr("(limit)ing")
($(limit), 'ing')
>>> read_expr('a[1, 2][:3].f(1+2, "weird string[).", 3 + 4) done.')
($a[1, 2][:3].f(1+2, "weird string[).", 3 + 4), ' done.')
"""
def simple_expr():
identifier()
extended_expr()
def identifier():
next(tokens)
def extended_expr():
lookahead = tokens.lookahead()
if lookahead is None:
return
elif lookahead.value == '.':
attr_access()
elif lookahead.value in parens:
paren_expr()
extended_expr()
else:
return
def attr_access():
from token import NAME # python token constants
dot = tokens.lookahead()
if tokens.lookahead2().type == NAME:
next(tokens) # consume dot
identifier()
extended_expr()
def paren_expr():
begin = next(tokens).value
end = parens[begin]
while True:
if tokens.lookahead().value in parens:
paren_expr()
else:
t = next(tokens)
if t.value == end:
break
return
parens = {
"(": ")",
"[": "]",
"{": "}"
}
def get_tokens(text):
"""tokenize text using python tokenizer.
Python tokenizer ignores spaces, but they might be important in some cases.
This function introduces dummy space tokens when it identifies any ignored space.
Each token is a storage object containing type, value, begin and end.
"""
i = iter([text])
readline = lambda: next(i)
end = None
for t in tokenize.generate_tokens(readline):
t = storage(type=t[0], value=t[1], begin=t[2], end=t[3])
if end is not None and end != t.begin:
_, x1 = end
_, x2 = t.begin
yield storage(type=-1, value=text[x1:x2], begin=end, end=t.begin)
end = t.end
yield t
class BetterIter:
"""Iterator like object with 2 support for 2 look aheads."""
def __init__(self, items):
self.iteritems = iter(items)
self.items = []
self.position = 0
self.current_item = None
def lookahead(self):
if len(self.items) <= self.position:
self.items.append(self._next())
return self.items[self.position]
def _next(self):
try:
return next(self.iteritems)
except StopIteration:
return None
def lookahead2(self):
if len(self.items) <= self.position+1:
self.items.append(self._next())
return self.items[self.position+1]
def __next__(self):
self.current_item = self.lookahead()
self.position += 1
return self.current_item
next = __next__ #Needed for Py2 compatibility
tokens = BetterIter(get_tokens(text))
if tokens.lookahead().value in parens:
paren_expr()
else:
simple_expr()
row, col = tokens.current_item.end
return ExpressionNode(text[:col], escape=escape), text[col:]
def read_assignment(self, text):
r"""Reads assignment statement from text.
>>> read_assignment = Parser().read_assignment
>>> read_assignment('a = b + 1\nfoo')
(<assignment: 'a = b + 1'>, 'foo')
"""
line, text = splitline(text)
return AssignmentNode(line.strip()), text
def python_lookahead(self, text):
"""Returns the first python token from the given text.
>>> python_lookahead = Parser().python_lookahead
>>> python_lookahead('for i in range(10):')
'for'
>>> python_lookahead('else:')
'else'
>>> python_lookahead(' x = 1')
' '
"""
i = iter([text])
readline = lambda: next(i)
tokens = tokenize.generate_tokens(readline)
return next(tokens)[1]
def python_tokens(self, text):
i = iter([text])
readline = lambda: next(i)
tokens = tokenize.generate_tokens(readline)
return [t[1] for t in tokens]
def read_indented_block(self, text, indent):
r"""Read a block of text. A block is what typically follows a for or it statement.
It can be in the same line as that of the statement or an indented block.
>>> read_indented_block = Parser().read_indented_block
>>> read_indented_block(' a\n b\nc', ' ')
('a\nb\n', 'c')
>>> read_indented_block(' a\n b\n c\nd', ' ')
('a\n b\nc\n', 'd')
>>> read_indented_block(' a\n\n b\nc', ' ')
('a\n\n b\n', 'c')
"""
if indent == '':
return '', text
block = ""
while text:
line, text2 = splitline(text)
if line.strip() == "":
block += '\n'
elif line.startswith(indent):
block += line[len(indent):]
else:
break
text = text2
return block, text
def read_statement(self, text):
r"""Reads a python statement.
>>> read_statement = Parser().read_statement
>>> read_statement('for i in range(10): hello $name')
('for i in range(10):', ' hello $name')
"""
tok = PythonTokenizer(text)
tok.consume_till(':')
return text[:tok.index], text[tok.index:]
def read_block_section(self, text, begin_indent=''):
r"""
>>> read_block_section = Parser().read_block_section
>>> read_block_section('for i in range(10): hello $i\nfoo')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, 'foo')
>>> read_block_section('for i in range(10):\n hello $i\n foo', begin_indent=' ')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, ' foo')
>>> read_block_section('for i in range(10):\n hello $i\nfoo')
(<block: 'for i in range(10):', [<line: [t'hello ', $i, t'\n']>]>, 'foo')
"""
line, text = splitline(text)
stmt, line = self.read_statement(line)
keyword = self.python_lookahead(stmt)
# if there is some thing left in the line
if line.strip():
block = line.lstrip()
else:
def find_indent(text):
rx = re_compile(' +')
match = rx.match(text)
first_indent = match and match.group(0)
return first_indent or ""
# find the indentation of the block by looking at the first line
first_indent = find_indent(text)[len(begin_indent):]
#TODO: fix this special case
if keyword == "code":
indent = begin_indent + first_indent
else:
indent = begin_indent + min(first_indent, INDENT)
block, text = self.read_indented_block(text, indent)
return self.create_block_node(keyword, stmt, block, begin_indent), text
def create_block_node(self, keyword, stmt, block, begin_indent):
if keyword in self.statement_nodes:
return self.statement_nodes[keyword](stmt, block, begin_indent)
else:
raise ParseError('Unknown statement: %s' % repr(keyword))
class PythonTokenizer:
"""Utility wrapper over python tokenizer."""
def __init__(self, text):
self.text = text
i = iter([text])
readline = lambda: next(i)
self.tokens = tokenize.generate_tokens(readline)
self.index = 0
def consume_till(self, delim):
"""Consumes tokens till colon.
>>> tok = PythonTokenizer('for i in range(10): hello $i')
>>> tok.consume_till(':')
>>> tok.text[:tok.index]
'for i in range(10):'
>>> tok.text[tok.index:]
' hello $i'
"""
try:
while True:
t = next(self)
if t.value == delim:
break
elif t.value == '(':
self.consume_till(')')
elif t.value == '[':
self.consume_till(']')
elif t.value == '{':
self.consume_till('}')
# if end of line is found, it is an exception.
# Since there is no easy way to report the line number,
# leave the error reporting to the python parser later
#@@ This should be fixed.
if t.value == '\n':
break
except:
#raise ParseError, "Expected %s, found end of line." % repr(delim)
# raising ParseError doesn't show the line number.
# if this error is ignored, then it will be caught when compiling the python code.
return
def __next__(self):
type, t, begin, end, line = next(self.tokens)
row, col = end
self.index = col
return storage(type=type, value=t, begin=begin, end=end)
next = __next__ #needed for Py2 compatibility
class DefwithNode:
def __init__(self, defwith, suite):
if defwith:
self.defwith = defwith.replace('with', '__template__') + ':'
# offset 4 lines. for encoding, __lineoffset__, loop and self.
self.defwith += "\n __lineoffset__ = -4"
else:
self.defwith = 'def __template__():'
# offset 4 lines for encoding, __template__, __lineoffset__, loop and self.
self.defwith += "\n __lineoffset__ = -5"
self.defwith += "\n loop = ForLoop()"
self.defwith += "\n self = TemplateResult(); extend_ = self.extend"
self.suite = suite
self.end = "\n return self"
def emit(self, indent):
encoding = "# coding: utf-8\n"
return encoding + self.defwith + self.suite.emit(indent + INDENT) + self.end
def __repr__(self):
return "<defwith: %s, %s>" % (self.defwith, self.suite)
class TextNode:
def __init__(self, value):
self.value = value
def emit(self, indent, begin_indent=''):
return repr(safeunicode(self.value))
def __repr__(self):
return 't' + repr(self.value)
class ExpressionNode:
def __init__(self, value, escape=True):
self.value = value.strip()
# convert ${...} to $(...)
if value.startswith('{') and value.endswith('}'):
self.value = '(' + self.value[1:-1] + ')'
self.escape = escape
def emit(self, indent, begin_indent=''):
return 'escape_(%s, %s)' % (self.value, bool(self.escape))
def __repr__(self):
if self.escape:
escape = ''
else:
escape = ':'
return "$%s%s" % (escape, self.value)
class AssignmentNode:
def __init__(self, code):
self.code = code
def emit(self, indent, begin_indent=''):
return indent + self.code + "\n"
def __repr__(self):
return "<assignment: %s>" % repr(self.code)
class LineNode:
def __init__(self, nodes):
self.nodes = nodes
def emit(self, indent, text_indent='', name=''):
text = [node.emit('') for node in self.nodes]
if text_indent:
text = [repr(text_indent)] + text
return indent + "extend_([%s])\n" % ", ".join(text)
def __repr__(self):
return "<line: %s>" % repr(self.nodes)
INDENT = ' ' # 4 spaces
class BlockNode:
def __init__(self, stmt, block, begin_indent=''):
self.stmt = stmt
self.suite = Parser().read_suite(block)
self.begin_indent = begin_indent
def emit(self, indent, text_indent=''):
text_indent = self.begin_indent + text_indent
out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
return out
def __repr__(self):
return "<block: %s, %s>" % (repr(self.stmt), repr(self.suite))
class ForNode(BlockNode):
def __init__(self, stmt, block, begin_indent=''):
self.original_stmt = stmt
tok = PythonTokenizer(stmt)
tok.consume_till('in')
a = stmt[:tok.index] # for i in
b = stmt[tok.index:-1] # rest of for stmt excluding :
stmt = a + ' loop.setup(' + b.strip() + '):'
BlockNode.__init__(self, stmt, block, begin_indent)
def __repr__(self):
return "<block: %s, %s>" % (repr(self.original_stmt), repr(self.suite))
class CodeNode:
def __init__(self, stmt, block, begin_indent=''):
# compensate one line for $code:
self.code = "\n" + block
def emit(self, indent, text_indent=''):
import re
rx = re.compile('^', re.M)
return rx.sub(indent, self.code).rstrip(' ')
def __repr__(self):
return "<code: %s>" % repr(self.code)
class StatementNode:
def __init__(self, stmt):
self.stmt = stmt
def emit(self, indent, begin_indent=''):
return indent + self.stmt
def __repr__(self):
return "<stmt: %s>" % repr(self.stmt)
class IfNode(BlockNode):
pass
class ElseNode(BlockNode):
pass
class ElifNode(BlockNode):
pass
class DefNode(BlockNode):
def __init__(self, *a, **kw):
BlockNode.__init__(self, *a, **kw)
code = CodeNode("", "")
code.code = "self = TemplateResult(); extend_ = self.extend\n"
self.suite.sections.insert(0, code)
code = CodeNode("", "")
code.code = "return self\n"
self.suite.sections.append(code)
def emit(self, indent, text_indent=''):
text_indent = self.begin_indent + text_indent
out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
return indent + "__lineoffset__ -= 3\n" + out
class VarNode:
def __init__(self, name, value):
self.name = name
self.value = value
def emit(self, indent, text_indent):
return indent + "self[%s] = %s\n" % (repr(self.name), self.value)
def __repr__(self):
return "<var: %s = %s>" % (self.name, self.value)
class SuiteNode:
"""Suite is a list of sections."""
def __init__(self, sections):
self.sections = sections
def emit(self, indent, text_indent=''):
return "\n" + "".join([s.emit(indent, text_indent) for s in self.sections])
def __repr__(self):
return repr(self.sections)
STATEMENT_NODES = {
'for': ForNode,
'while': BlockNode,
'if': IfNode,
'elif': ElifNode,
'else': ElseNode,
'def': DefNode,
'code': CodeNode
}
KEYWORDS = [
"pass",
"break",
"continue",
"return"
]
TEMPLATE_BUILTIN_NAMES = [
"dict", "enumerate", "float", "int", "bool", "list", "long", "reversed",
"set", "slice", "tuple", "xrange",
"abs", "all", "any", "callable", "chr", "cmp", "divmod", "filter", "hex",
"id", "isinstance", "iter", "len", "max", "min", "oct", "ord", "pow", "range",
"True", "False",
"None",
"__import__", # some c-libraries like datetime requires __import__ to present in the namespace
]
if PY2:
import __builtin__ as builtins
else:
import builtins
TEMPLATE_BUILTINS = dict([(name, getattr(builtins, name)) for name in TEMPLATE_BUILTIN_NAMES if name in builtins.__dict__])
class ForLoop:
"""
Wrapper for expression in for stament to support loop.xxx helpers.
>>> loop = ForLoop()
>>> for x in loop.setup(['a', 'b', 'c']):
... print(loop.index, loop.revindex, loop.parity, x)
...
1 3 odd a
2 2 even b
3 1 odd c
>>> loop.index
Traceback (most recent call last):
...
AttributeError: index
"""
def __init__(self):
self._ctx = None
def __getattr__(self, name):
if self._ctx is None:
raise AttributeError(name)
else:
return getattr(self._ctx, name)
def setup(self, seq):
self._push()
return self._ctx.setup(seq)
def _push(self):
self._ctx = ForLoopContext(self, self._ctx)
def _pop(self):
self._ctx = self._ctx.parent
class ForLoopContext:
"""Stackable context for ForLoop to support nested for loops.
"""
def __init__(self, forloop, parent):
self._forloop = forloop
self.parent = parent
def setup(self, seq):
try:
self.length = len(seq)
except:
self.length = 0
self.index = 0
for a in seq:
self.index += 1
yield a
self._forloop._pop()
index0 = property(lambda self: self.index-1)
first = property(lambda self: self.index == 1)
last = property(lambda self: self.index == self.length)
odd = property(lambda self: self.index % 2 == 1)
even = property(lambda self: self.index % 2 == 0)
parity = property(lambda self: ['odd', 'even'][self.even])
revindex0 = property(lambda self: self.length - self.index)
revindex = property(lambda self: self.length - self.index + 1)
class BaseTemplate:
def __init__(self, code, filename, filter, globals, builtins):
self.filename = filename
self.filter = filter
self._globals = globals
self._builtins = builtins
if code:
self.t = self._compile(code)
else:
self.t = lambda: ''
def _compile(self, code):
env = self.make_env(self._globals or {}, self._builtins)
exec(code, env)
#__template__ is a global function declared when executing "code"
return env['__template__']
def __call__(self, *a, **kw):
__hidetraceback__ = True
return self.t(*a, **kw)
def make_env(self, globals, builtins):
return dict(globals,
__builtins__=builtins,
ForLoop=ForLoop,
TemplateResult=TemplateResult,
escape_=self._escape,
join_=self._join
)
def _join(self, *items):
return u"".join(items)
def _escape(self, value, escape=False):
if value is None:
value = ''
value = safeunicode(value)
if escape and self.filter:
value = self.filter(value)
return value
class Template(BaseTemplate):
CONTENT_TYPES = {
'.html' : 'text/html; charset=utf-8',
'.xhtml' : 'application/xhtml+xml; charset=utf-8',
'.txt' : 'text/plain',
}
FILTERS = {
'.html': websafe,
'.xhtml': websafe,
'.xml': websafe
}
globals = {}
def __init__(self, text, filename='<template>', filter=None, globals=None, builtins=None, extensions=None):
self.extensions = extensions or []
text = Template.normalize_text(text)
code = self.compile_template(text, filename)
_, ext = os.path.splitext(filename)
filter = filter or self.FILTERS.get(ext, None)
self.content_type = self.CONTENT_TYPES.get(ext, None)
if globals is None:
globals = self.globals
if builtins is None:
builtins = TEMPLATE_BUILTINS
BaseTemplate.__init__(self, code=code, filename=filename, filter=filter, globals=globals, builtins=builtins)
def normalize_text(text):
"""Normalizes template text by correcting \r\n, tabs and BOM chars."""
text = text.replace('\r\n', '\n').replace('\r', '\n').expandtabs()
if not text.endswith('\n'):
text += '\n'
# ignore BOM chars at the begining of template
BOM = '\xef\xbb\xbf'
if isinstance(text, str) and text.startswith(BOM):
text = text[len(BOM):]
# support fort \$ for backward-compatibility
text = text.replace(r'\$', '$$')
return text
normalize_text = staticmethod(normalize_text)
def __call__(self, *a, **kw):
__hidetraceback__ = True
from . import webapi as web
if 'headers' in web.ctx and self.content_type:
web.header('Content-Type', self.content_type, unique=True)
return BaseTemplate.__call__(self, *a, **kw)
def generate_code(text, filename, parser=None):
# parse the text
parser = parser or Parser()
rootnode = parser.parse(text, filename)
# generate python code from the parse tree
code = rootnode.emit(indent="").strip()
return safestr(code)
generate_code = staticmethod(generate_code)
def create_parser(self):
p = Parser()
for ext in self.extensions:
p = ext(p)
return p
def compile_template(self, template_string, filename):
code = Template.generate_code(template_string, filename, parser=self.create_parser())
def get_source_line(filename, lineno):
try:
lines = open(filename, encoding='utf-8').read().splitlines()
return lines[lineno]
except:
return None
try:
# compile the code first to report the errors, if any, with the filename
compiled_code = compile(code, filename, 'exec')
except SyntaxError as err:
# display template line that caused the error along with the traceback.
# this works in Py3 but not Py2, duh ? TODO
err.msg += '\n\nTemplate traceback:\n File %s, line %s\n %s' % \
(repr(err.filename), err.lineno, get_source_line(err.filename, err.lineno-1))
raise
# make sure code is safe
ast_node = ast.parse(code, filename)
SafeVisitor().walk(ast_node, filename)
return compiled_code
class CompiledTemplate(Template):
def __init__(self, f, filename):
Template.__init__(self, '', filename)
self.t = f
def compile_template(self, *a):
return None
def _compile(self, *a):
return None
class Render:
"""The most preferred way of using templates.
render = web.template.render('templates')
print render.foo()
Optional parameter can be `base` can be used to pass output of
every template through the base template.
render = web.template.render('templates', base='layout')
"""
def __init__(self, loc='templates', cache=None, base=None, **keywords):
self._loc = loc
self._keywords = keywords
if cache is None:
cache = not config.get('debug', False)
if cache:
self._cache = {}
else:
self._cache = None
if base and not hasattr(base, '__call__'):
# make base a function, so that it can be passed to sub-renders
self._base = lambda page: self._template(base)(page)
else:
self._base = base
def _add_global(self, obj, name=None):
"""Add a global to this rendering instance."""
if 'globals' not in self._keywords: self._keywords['globals'] = {}
if not name:
name = obj.__name__
self._keywords['globals'][name] = obj
def _lookup(self, name):
path = os.path.join(self._loc, name)
if os.path.isdir(path):
return 'dir', path
else:
path = self._findfile(path)
if path:
return 'file', path
else:
return 'none', None
def _load_template(self, name):
kind, path = self._lookup(name)
if kind == 'dir':
return Render(path, cache=self._cache is not None, base=self._base, **self._keywords)
elif kind == 'file':
return Template(open(path, encoding='utf-8').read(), filename=path, **self._keywords)
else:
raise AttributeError("No template named " + name)
def _findfile(self, path_prefix):
p = [f for f in glob.glob(path_prefix + '.*') if not f.endswith('~')] # skip backup files
p.sort() # sort the matches for deterministic order
# support templates without extension (#364)
# When no templates are found and a file is found with the exact name, use it.
if not p and os.path.exists(path_prefix):
p = [path_prefix]
return p and p[0]
def _template(self, name):
if self._cache is not None:
if name not in self._cache:
self._cache[name] = self._load_template(name)
return self._cache[name]
else:
return self._load_template(name)
def __getattr__(self, name):
t = self._template(name)
if self._base and isinstance(t, Template):
def template(*a, **kw):
return self._base(t(*a, **kw))
return template
else:
return self._template(name)
class GAE_Render(Render):
# Render gets over-written. make a copy here.
super = Render
def __init__(self, loc, *a, **kw):
GAE_Render.super.__init__(self, loc, *a, **kw)
import types
if isinstance(loc, types.ModuleType):
self.mod = loc
else:
name = loc.rstrip('/').replace('/', '.')
self.mod = __import__(name, None, None, ['x'])
self.mod.__dict__.update(kw.get('builtins', TEMPLATE_BUILTINS))
self.mod.__dict__.update(Template.globals)
self.mod.__dict__.update(kw.get('globals', {}))
def _load_template(self, name):
t = getattr(self.mod, name)
import types
if isinstance(t, types.ModuleType):
return GAE_Render(t, cache=self._cache is not None, base=self._base, **self._keywords)
else:
return t
render = Render
# setup render for Google App Engine.
try:
from google import appengine
render = Render = GAE_Render
except ImportError:
pass
def frender(path, **keywords):
"""Creates a template from the given file path.
"""
return Template(open(path, encoding='utf-8').read(), filename=path, **keywords)
def compile_templates(root):
"""Compiles templates to python code."""
re_start = re_compile('^', re.M)
for dirpath, dirnames, filenames in os.walk(root):
filenames = [f for f in filenames if not f.startswith('.') and not f.endswith('~') and not f.startswith('__init__.py')]
for d in dirnames[:]:
if d.startswith('.'):
dirnames.remove(d) # don't visit this dir
out = open(os.path.join(dirpath, '__init__.py'), 'w', encoding='utf-8')
out.write('from web.template import CompiledTemplate, ForLoop, TemplateResult\n\n')
if dirnames:
out.write("import " + ", ".join(dirnames))
out.write("\n")
for f in filenames:
path = os.path.join(dirpath, f)
if '.' in f:
name, _ = f.split('.', 1)
else:
name = f
text = open(path, encoding='utf-8').read()
text = Template.normalize_text(text)
code = Template.generate_code(text, path)
code = code.replace("__template__", name, 1)
out.write(code)
out.write('\n\n')
out.write('%s = CompiledTemplate(%s, %s)\n' % (name, name, repr(path)))
out.write("join_ = %s._join; escape_ = %s._escape\n\n" % (name, name))
# create template to make sure it compiles
t = Template(open(path, encoding='utf-8').read(), path)
out.close()
class ParseError(Exception):
pass
class SecurityError(Exception):
"""The template seems to be trying to do something naughty."""
pass
ALLOWED_AST_NODES = ['Interactive', 'Expression', 'Suite', 'FunctionDef',
'ClassDef', 'Return', 'Delete', 'Assign', 'AugAssign', 'alias',
#'Print', 'Repr',
'For', 'While', 'If', 'With', 'comprehension','NameConstant', 'arg',
#'Raise', 'TryExcept', 'TryFinally', 'Assert', 'Import',
#'ImportFrom', 'Exec', 'Global',
'Expr', 'Pass', 'Break', 'Continue', 'BoolOp', 'BinOp', 'UnaryOp',
'Lambda', 'IfExp', 'Dict', 'Module', 'arguments', 'keyword',
'Set', 'ListComp', 'SetComp', 'DictComp', 'GeneratorExp', 'Yield',
'Compare', 'Call', 'Num', 'Str', 'Attribute', 'Subscript',
'Name', 'List', 'Tuple', 'Load', 'Store', 'Del', 'AugLoad', 'AugStore',
'Param', 'Ellipsis', 'Slice', 'ExtSlice', 'Index', 'And', 'Or', 'Add',
'Sub', 'Mult', 'Div', 'Mod', 'Pow', 'LShift', 'RShift', 'BitOr', 'BitXor',
'BitAnd', 'FloorDiv', 'Invert', 'Not', 'UAdd', 'USub', 'Eq', 'NotEq',
'Lt', 'LtE', 'Gt', 'GtE', 'Is', 'IsNot', 'In', 'NotIn', 'ExceptHandler']
class SafeVisitor(ast.NodeVisitor):
"""
Make sure code is safe by walking through the AST.
Code considered unsafe if:
* it has restricted AST nodes (only nodes defined in ALLOWED_AST_NODES are allowed)
* it is trying to assign to attributes
* it is trying to access resricted attributes
Adopted from http://www.zafar.se/bkz/uploads/safe.txt (public domain, Babar K. Zafar)
* Using ast rather than compiler tree, for jython and Py3 support since Py2.6
* Simplified with ast.NodeVisitor class
"""
def __init__(self, *args, **kwargs):
"Initialize visitor by generating callbacks for all AST node types."
super(SafeVisitor, self).__init__(*args, **kwargs)
self.errors = []
def walk(self, tree, filename):
"Validate each node in AST and raise SecurityError if the code is not safe."
self.filename = filename
self.visit(tree)
if self.errors:
raise SecurityError('\n'.join([str(err) for err in self.errors]))
def generic_visit(self, node):
nodename = type(node).__name__
if nodename not in ALLOWED_AST_NODES:
self.fail_name(node, nodename)
super(SafeVisitor, self).generic_visit(node)
def visit_Attribute(self, node):
attrname = self.get_node_attr(node)
if self.is_unallowed_attr(attrname):
self.fail_attribute(node, attrname)
super(SafeVisitor, self).generic_visit(node)
def visit_Assign(self, node):
self.check_assign_targets(node)
def visit_AugAssign(self, node):
self.check_assign_target(node)
def check_assign_targets(self, node):
for target in node.targets:
self.check_assign_target(target)
super(SafeVisitor, self).generic_visit(node)
def check_assign_target(self, targetnode):
targetname = type(targetnode).__name__
if targetname == "Attribute":
attrname = self.get_node_attr(targetnode)
self.fail_attribute(targetnode, attrname)
# failure modes
def fail_name(self, node, nodename):
lineno = self.get_node_lineno(node)
e = SecurityError("%s:%d - execution of '%s' statements is denied" % (self.filename, lineno, nodename))
self.errors.append(e)
def fail_attribute(self, node, attrname):
lineno = self.get_node_lineno(node)
e = SecurityError("%s:%d - access to attribute '%s' is denied" % (self.filename, lineno, attrname))
self.errors.append(e)
# helpers
def is_unallowed_attr(self, name):
return name.startswith('_') \
or name.startswith('func_') \
or name.startswith('im_')
def get_node_attr(self, node):
return 'attr' in node._fields and node.attr or None
def get_node_lineno(self, node):
return (node.lineno) and node.lineno or 0
class TemplateResult(MutableMapping):
"""Dictionary like object for storing template output.
The result of a template execution is usally a string, but sometimes it
contains attributes set using $var. This class provides a simple
dictionary like interface for storing the output of the template and the
attributes. The output is stored with a special key __body__. Convering
the the TemplateResult to string or unicode returns the value of __body__.
When the template is in execution, the output is generated part by part
and those parts are combined at the end. Parts are added to the
TemplateResult by calling the `extend` method and the parts are combined
seemlessly when __body__ is accessed.
>>> d = TemplateResult(__body__='hello, world', x='foo')
>>> print(d)
hello, world
>>> d.x
'foo'
>>> d = TemplateResult()
>>> d.extend([u'hello', u'world'])
>>> d
<TemplateResult: {'__body__': u'helloworld'}>
"""
def __init__(self, *a, **kw):
self.__dict__["_d"] = dict(*a, **kw)
self._d.setdefault("__body__", u'')
self.__dict__['_parts'] = []
self.__dict__["extend"] = self._parts.extend
self._d.setdefault("__body__", None)
def keys(self):
return self._d.keys()
def _prepare_body(self):
"""Prepare value of __body__ by joining parts.
"""
if self._parts:
value = u"".join(self._parts)
self._parts[:] = []
body = self._d.get('__body__')
if body:
self._d['__body__'] = body + value
else:
self._d['__body__'] = value
def __getitem__(self, name):
if name == "__body__":
self._prepare_body()
return self._d[name]
def __setitem__(self, name, value):
if name == "__body__":
self._prepare_body()
return self._d.__setitem__(name, value)
def __delitem__(self, name):
if name == "__body__":
self._prepare_body()
return self._d.__delitem__(name)
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
def __unicode__(self):
self._prepare_body()
return self["__body__"]
def __str__(self):
self._prepare_body()
if PY2:
return self["__body__"].encode('utf-8')
else:
return self["__body__"]
def __repr__(self):
self._prepare_body()
return "<TemplateResult: %s>" % self._d
def __len__(self):
return self._d.__len__()
def __iter__(self):
for i in self._d.__iter__():
if i == "__body__":
self._prepare_body()
yield i
def test():
r"""Doctest for testing template module.
Define a utility function to run template test.
>>> class TestResult:
... def __init__(self, t): self.t = t
... def __getattr__(self, name): return getattr(self.t, name)
... def __repr__(self): return repr(unicode(self.t) if PY2 else str(self.t))
...
>>> def t(code, **keywords):
... tmpl = Template(code, **keywords)
... return lambda *a, **kw: TestResult(tmpl(*a, **kw))
...
Simple tests.
>>> t('1')()
u'1\n'
>>> t('$def with ()\n1')()
u'1\n'
>>> t('$def with (a)\n$a')(1)
u'1\n'
>>> t('$def with (a=0)\n$a')(1)
u'1\n'
>>> t('$def with (a=0)\n$a')(a=1)
u'1\n'
Test complicated expressions.
>>> t('$def with (x)\n$x.upper()')('hello')
u'HELLO\n'
>>> t('$(2 * 3 + 4 * 5)')()
u'26\n'
>>> t('${2 * 3 + 4 * 5}')()
u'26\n'
>>> t('$def with (limit)\nkeep $(limit)ing.')('go')
u'keep going.\n'
>>> t('$def with (a)\n$a.b[0]')(storage(b=[1]))
u'1\n'
Test html escaping.
>>> t('$def with (x)\n$x', filename='a.html')('<html>')
u'<html>\n'
>>> t('$def with (x)\n$x', filename='a.txt')('<html>')
u'<html>\n'
Test if, for and while.
>>> t('$if 1: 1')()
u'1\n'
>>> t('$if 1:\n 1')()
u'1\n'
>>> t('$if 1:\n 1\\')()
u'1'
>>> t('$if 0: 0\n$elif 1: 1')()
u'1\n'
>>> t('$if 0: 0\n$elif None: 0\n$else: 1')()
u'1\n'
>>> t('$if 0 < 1 and 1 < 2: 1')()
u'1\n'
>>> t('$for x in [1, 2, 3]: $x')()
u'1\n2\n3\n'
>>> t('$def with (d)\n$for k, v in d.items(): $k')({1: 1})
u'1\n'
>>> t('$for x in [1, 2, 3]:\n\t$x')()
u' 1\n 2\n 3\n'
>>> t('$def with (a)\n$while a and a.pop():1')([1, 2, 3])
u'1\n1\n1\n'
The space after : must be ignored.
>>> t('$if True: foo')()
u'foo\n'
Test loop.xxx.
>>> t("$for i in range(5):$loop.index, $loop.parity")()
u'1, odd\n2, even\n3, odd\n4, even\n5, odd\n'
>>> t("$for i in range(2):\n $for j in range(2):$loop.parent.parity $loop.parity")()
u'odd odd\nodd even\neven odd\neven even\n'
Test assignment.
>>> t('$ a = 1\n$a')()
u'1\n'
>>> t('$ a = [1]\n$a[0]')()
u'1\n'
>>> t('$ a = {1: 1}\n$list(a.keys())[0]')()
u'1\n'
>>> t('$ a = []\n$if not a: 1')()
u'1\n'
>>> t('$ a = {}\n$if not a: 1')()
u'1\n'
>>> t('$ a = -1\n$a')()
u'-1\n'
>>> t('$ a = "1"\n$a')()
u'1\n'
Test comments.
>>> t('$# 0')()
u'\n'
>>> t('hello$#comment1\nhello$#comment2')()
u'hello\nhello\n'
>>> t('$#comment0\nhello$#comment1\nhello$#comment2')()
u'\nhello\nhello\n'
Test unicode.
>>> t('$def with (a)\n$a')(u'\u203d')
u'\u203d\n'
>>> t(u'$def with (a)\n$a $:a')(u'\u203d')
u'\u203d \u203d\n'
>>> t(u'$def with ()\nfoo')()
u'foo\n'
>>> def f(x): return x
...
>>> t(u'$def with (f)\n$:f("x")')(f)
u'x\n'
>>> t('$def with (f)\n$:f("x")')(f)
u'x\n'
Test dollar escaping.
>>> t("Stop, $$money isn't evaluated.")()
u"Stop, $money isn't evaluated.\n"
>>> t("Stop, \$money isn't evaluated.")()
u"Stop, $money isn't evaluated.\n"
Test space sensitivity.
>>> t('$def with (x)\n$x')(1)
u'1\n'
>>> t('$def with(x ,y)\n$x')(1, 1)
u'1\n'
>>> t('$(1 + 2*3 + 4)')()
u'11\n'
Make sure globals are working.
>>> t('$x')()
Traceback (most recent call last):
...
NameError: global name 'x' is not defined
>>> t('$x', globals={'x': 1})()
u'1\n'
Can't change globals.
>>> t('$ x = 2\n$x', globals={'x': 1})()
u'2\n'
>>> t('$ x = x + 1\n$x', globals={'x': 1})()
Traceback (most recent call last):
...
UnboundLocalError: local variable 'x' referenced before assignment
Make sure builtins are customizable.
>>> t('$min(1, 2)')()
u'1\n'
>>> t('$min(1, 2)', builtins={})()
Traceback (most recent call last):
...
NameError: global name 'min' is not defined
Test vars.
>>> x = t('$var x: 1')()
>>> x.x
u'1'
>>> x = t('$var x = 1')()
>>> x.x
1
>>> x = t('$var x: \n foo\n bar')()
>>> x.x
u'foo\nbar\n'
Test BOM chars.
>>> t('\xef\xbb\xbf$def with(x)\n$x')('foo')
u'foo\n'
Test for with weird cases.
>>> t('$for i in range(10)[1:5]:\n $i')()
u'1\n2\n3\n4\n'
>>> t("$for k, v in sorted({'a': 1, 'b': 2}.items()):\n $k $v", globals={'sorted':sorted})()
u'a 1\nb 2\n'
Test for syntax error.
>>> try:
... t("$for k, v in ({'a': 1, 'b': 2}.items():\n $k $v")()
... except SyntaxError:
... print("OK")
... else:
... print("Expected SyntaxError")
...
OK
Test datetime.
>>> import datetime
>>> t("$def with (date)\n$date.strftime('%m %Y')")(datetime.datetime(2009, 1, 1))
u'01 2009\n'
"""
pass
if __name__ == "__main__":
import sys
if '--compile' in sys.argv:
compile_templates(sys.argv[2])
else:
import doctest
doctest.testmod()
|
bobintetley/asm3
|
src/web039/template.py
|
Python
|
gpl-3.0
| 50,611
|
[
"VisIt"
] |
245c50fa610319ef405649cfd2fcf9defe0b2ebe2806e108064a4d688c38e64c
|
from gpaw import GPAW
print('state LDA PBE')
for name in ['ferro', 'anti', 'non']:
calc = GPAW(name + '.gpw', txt=None)
atoms = calc.get_atoms()
eLDA = atoms.get_potential_energy()
deltaxc = calc.get_xc_difference('PBE')
ePBE = eLDA + deltaxc
if name == 'ferro':
eLDA0 = eLDA
ePBE0 = ePBE
eLDA -= eLDA0
ePBE -= ePBE0
print('%-5s: %7.3f eV %7.3f eV' % (name, eLDA, ePBE))
|
robwarm/gpaw-symm
|
doc/exercises/iron/PBE.py
|
Python
|
gpl-3.0
| 450
|
[
"GPAW"
] |
2deb12f6327821b3d9e8f30d1ce0e1ff318449e1767362cc804e9944292f2ed3
|
#
# Copyright (C) 2010-2019 The ESPResSo project
# Copyright (C) 2002,2003,2004,2005,2006,2007,2008,2009,2010
# Max-Planck-Institute for Polymer Research, Theory Group
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import espressomd
from espressomd import electrostatics, electrostatic_extensions, assert_features
from espressomd.shapes import Wall
import numpy
assert_features(["ELECTROSTATICS", "MASS", "LENNARD_JONES"])
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
system.seed = system.cell_system.get_state()['n_nodes'] * [1234]
numpy.random.seed(system.seed)
print("\n--->Setup system")
# System parameters
n_part = 500
n_ionpairs = n_part / 2
density = 1.1138
time_step = 0.001823
temp = 1198.3
gamma = 50
#l_bjerrum = 0.885^2 * e^2/(4*pi*epsilon_0*k_B*T)
l_bjerrum = 130878.0 / temp
num_steps_equilibration = 3000
num_configs = 500
integ_steps_per_config = 100
# Particle parameters
types = {"Cl": 0, "Na": 1, "Electrode": 2}
numbers = {"Cl": n_ionpairs, "Na": n_ionpairs}
charges = {"Cl": -1.0, "Na": 1.0}
lj_sigmas = {"Cl": 3.85, "Na": 2.52, "Electrode": 3.37}
lj_epsilons = {"Cl": 192.45, "Na": 17.44, "Electrode": 24.72}
lj_cuts = {"Cl": 3.0 * lj_sigmas["Cl"],
"Na": 3.0 * lj_sigmas["Na"],
"Electrode": 3.0 * lj_sigmas["Electrode"]}
masses = {"Cl": 35.453, "Na": 22.99, "Electrode": 12.01}
# Setup System
box_l = (n_ionpairs * sum(masses.values()) / density)**(1. / 3.)
box_z = box_l + 2.0 * (lj_sigmas["Electrode"])
box_volume = box_l * box_l * box_z
elc_gap = box_z * 0.15
system.box_l = [box_l, box_l, box_z + elc_gap]
system.periodicity = [True, True, True]
system.time_step = time_step
system.cell_system.skin = 0.3
system.thermostat.set_langevin(kT=temp, gamma=gamma, seed=42)
# Uniform electric field between two parallel plates
# E = V/d in units of V/m
# E = V/d/k_b*e in units of eV/m
Ez = 15 / (8.61733e-5 * box_z) # in units of eV/m
# Walls
system.constraints.add(shape=Wall(dist=0, normal=[0, 0, 1]),
particle_type=types["Electrode"])
system.constraints.add(shape=Wall(dist=-box_z, normal=[0, 0, -1]),
particle_type=types["Electrode"])
# Place particles
for i in range(int(n_ionpairs)):
p = numpy.random.random(3) * box_l
p[2] += lj_sigmas["Electrode"]
system.part.add(id=len(system.part), type=types["Cl"],
pos=p, q=charges["Cl"], mass=masses["Cl"])
for i in range(int(n_ionpairs)):
p = numpy.random.random(3) * box_l
p[2] += lj_sigmas["Electrode"]
system.part.add(id=len(system.part), type=types["Na"],
pos=p, q=charges["Na"], mass=masses["Na"])
# Lennard-Jones interactions parameters
def combination_rule_epsilon(rule, eps1, eps2):
if rule == "Lorentz":
return (eps1 * eps2)**0.5
else:
return ValueError("No combination rule defined")
def combination_rule_sigma(rule, sig1, sig2):
if rule == "Berthelot":
return (sig1 + sig2) * 0.5
else:
return ValueError("No combination rule defined")
for s in [["Cl", "Na"], ["Cl", "Cl"], ["Na", "Na"],
["Na", "Electrode"], ["Cl", "Electrode"]]:
lj_sig = combination_rule_sigma(
"Berthelot", lj_sigmas[s[0]], lj_sigmas[s[1]])
lj_cut = combination_rule_sigma("Berthelot", lj_cuts[s[0]], lj_cuts[s[1]])
lj_eps = combination_rule_epsilon(
"Lorentz", lj_epsilons[s[0]], lj_epsilons[s[1]])
system.non_bonded_inter[types[s[0]], types[s[1]]].lennard_jones.set_params(
epsilon=lj_eps, sigma=lj_sig, cutoff=lj_cut, shift="auto")
energy = system.analysis.energy()
print("Before Minimization: E_total=", energy['total'])
system.minimize_energy.init(
f_max=10, gamma=50.0, max_steps=1000, max_displacement=0.2)
system.minimize_energy.minimize()
energy = system.analysis.energy()
print("After Minimization: E_total=", energy['total'])
print("\n--->Tuning Electrostatics")
p3m = electrostatics.P3M(prefactor=l_bjerrum, accuracy=1e-2)
system.actors.add(p3m)
elc = electrostatic_extensions.ELC(gap_size=elc_gap, maxPWerror=1e-3)
system.actors.add(elc)
for p in system.part:
p.ext_force = [0, 0, p.q * Ez]
print("\n--->Temperature Equilibration")
system.time = 0.0
for i in range(int(num_steps_equilibration / 100)):
energy = system.analysis.energy()
temp_measured = energy['kinetic'] / ((3.0 / 2.0) * n_part)
print("t={0:.1f}, E_total={1:.2f}, E_coulomb={2:.2f}, T_cur={3:.4f}"
.format(system.time, energy['total'], energy['coulomb'],
temp_measured))
system.integrator.run(100)
print("\n--->Integration")
bins = 100
z_dens_na = numpy.zeros(bins)
z_dens_cl = numpy.zeros(bins)
system.time = 0.0
cnt = 0
for i in range(num_configs):
temp_measured = system.analysis.energy()['kinetic'] / ((3. / 2.) * n_part)
print("t={0:.1f}, E_total={1:.2f}, E_coulomb={2:.2f}, T_cur={3:.4f}"
.format(system.time, system.analysis.energy()['total'],
system.analysis.energy()['coulomb'], temp_measured))
system.integrator.run(integ_steps_per_config)
for p in system.part:
bz = int(p.pos[2] / box_z * bins)
if p.type == types["Na"]:
z_dens_na[bz] += 1.0
elif p.type == types["Cl"]:
z_dens_cl[bz] += 1.0
cnt += 1
print("\n--->Analysis")
# Average / Normalize with Volume
z_dens_na /= (cnt * box_volume / bins)
z_dens_cl /= (cnt * box_volume / bins)
z_values = numpy.linspace(0, box_l, num=bins)
res = numpy.column_stack((z_values, z_dens_na, z_dens_cl))
numpy.savetxt("z_density.data", res, header="#z rho_na(z) rho_cl(z)")
print("\n--->Written z_density.data")
print("\n--->Done")
|
psci2195/espresso-ffans
|
doc/tutorials/02-charged_system/scripts/nacl_units_confined.py
|
Python
|
gpl-3.0
| 6,242
|
[
"ESPResSo"
] |
9ccba70f156505e0395c192c05a57905d15ea132e8c201819dcfa6cafce5f298
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Elemental masses (most common isotope), symbols, and atomic numbers from psi4.
"""
_temp_element = ["GHOST", "HYDROGEN", "HELIUM", "LITHIUM", "BERYLLIUM",
"BORON", "CARBON", "NITROGEN", "OXYGEN", "FLUORINE",
"NEON", "SODIUM", "MAGNESIUM", "ALUMINUM", "SILICON",
"PHOSPHORUS", "SULFUR", "CHLORINE", "ARGON", "POTASSIUM",
"CALCIUM", "SCANDIUM", "TITANIUM", "VANADIUM", "CHROMIUM",
"MANGANESE", "IRON", "COBALT", "NICKEL", "COPPER",
"ZINC", "GALLIUM", "GERMANIUM", "ARSENIC", "SELENIUM",
"BROMINE", "KRYPTON", "RUBIDIUM", "STRONTIUM", "YTTRIUM",
"ZIRCONIUM", "NIOBIUM", "MOLYBDENUM", "TECHNETIUM", "RUTHENIUM",
"RHODIUM", "PALLADIUM", "SILVER", "CADMIUM", "INDIUM",
"TIN", "ANTIMONY", "TELLURIUM", "IODINE", "XENON",
"CESIUM", "BARIUM", "LANTHANUM", "CERIUM", "PRASEODYMIUM",
"NEODYMIUM", "PROMETHIUM", "SAMARIUM", "EUROPIUM", "GADOLINIUM",
"TERBIUM", "DYSPROSIUM", "HOLMIUM", "ERBIUM", "THULIUM",
"YTTERBIUM", "LUTETIUM", "HAFNIUM", "TANTALUM", "TUNGSTEN",
"RHENIUM", "OSMIUM", "IRIDIUM", "PLATINUM", "GOLD",
"MERCURY", "THALLIUM", "LEAD", "BISMUTH", "POLONIUM",
"ASTATINE", "RADON", "FRANCIUM", "RADIUM", "ACTINIUM",
"THORIUM", "PROTACTINIUM", "URANIUM", "NEPTUNIUM", "PLUTONIUM",
"AMERICIUM", "CURIUM", "BERKELIUM", "CALIFORNIUM", "EINSTEINIUM",
"FERMIUM", "MENDELEVIUM", "NOBELIUM", "LAWRENCIUM", "RUTHERFORDIUM",
"DUBNIUM", "SEABORGIUM", "BOHRIUM"]
_temp_symbol = ["X", "H", "HE", "LI", "BE", "B", "C", "N", "O", "F", "NE", "NA", "MG",
"AL", "SI", "P", "S", "CL", "AR", "K", "CA", "SC", "TI", "V", "CR", "MN", "FE", "CO",
"NI", "CU", "ZN", "GA", "GE", "AS", "SE", "BR", "KR", "RB", "SR", "Y", "ZR", "NB",
"MO", "TC", "RU", "RH", "PD", "AG", "CD", "IN", "SN", "SB", "TE", "I", "XE", "CS",
"BA", "LA", "CE", "PR", "ND", "PM", "SM", "EU", "GD", "TB", "DY", "HO", "ER", "TM",
"YB", "LU", "HF", "TA", "W", "RE", "OS", "IR", "PT", "AU", "HG", "TL", "PB", "BI",
"PO", "AT", "RN", "FR", "RA", "AC", "TH", "PA", "U", "NP", "PU", "AM", "CM", "BK",
"CF", "ES", "FM", "MD", "NO", "LR", "RF", "DB", "SG", "BH", "HS", "MT", "DS", "RG",
"UUB", "UUT", "UUQ", "UUP", "UUH", "UUS", "UUO"]
_temp_z = list(range(0, 108))
_temp_mass = [
0., 1.00782503207, 4.00260325415, 7.016004548, 9.012182201, 11.009305406,
12, 14.00307400478, 15.99491461956, 18.998403224, 19.99244017542,
22.98976928087, 23.985041699, 26.981538627, 27.97692653246, 30.973761629,
31.972070999, 34.968852682, 39.96238312251, 38.963706679, 39.962590983,
44.955911909, 47.947946281, 50.943959507, 51.940507472, 54.938045141,
55.934937475, 58.933195048, 57.935342907, 62.929597474, 63.929142222,
68.925573587, 73.921177767, 74.921596478, 79.916521271, 78.918337087,
85.910610729, 84.911789737, 87.905612124, 88.905848295, 89.904704416,
92.906378058, 97.905408169, 98.906254747, 101.904349312, 102.905504292,
105.903485715, 106.90509682, 113.90335854, 114.903878484, 119.902194676,
120.903815686, 129.906224399, 126.904472681, 131.904153457, 132.905451932,
137.905247237, 138.906353267, 139.905438706, 140.907652769, 141.907723297,
144.912749023, 151.919732425, 152.921230339, 157.924103912, 158.925346757,
163.929174751, 164.93032207, 165.930293061, 168.93421325, 173.938862089,
174.940771819, 179.946549953, 180.947995763, 183.950931188, 186.955753109,
191.96148069, 192.96292643, 194.964791134, 196.966568662, 201.970643011,
204.974427541, 207.976652071, 208.980398734, 208.982430435, 210.987496271,
222.017577738, 222.01755173, 228.031070292, 227.027752127, 232.038055325,
231.03588399, 238.050788247, 237.048173444, 242.058742611, 243.06138108,
247.07035354, 247.07030708, 251.079586788, 252.082978512, 257.095104724,
258.098431319, 255.093241131, 260.105504, 263.112547, 255.107398, 259.114500,
262.122892, 263.128558, 265.136151, 281.162061, 272.153615, 283.171792, 283.176451,
285.183698, 287.191186, 292.199786, 291.206564, 293.214670]
_temp_iso_symbol = [
"H", "H1", "H2", "D", "H3", "T", "H4", "H5", "H6", "H7", "HE", "HE3", "HE4",
"HE5", "HE6", "HE7", "HE8", "HE9", "HE10", "LI", "LI3", "LI4", "LI5", "LI6",
"LI7", "LI8", "LI9", "LI10", "LI11", "LI12", "BE", "BE5", "BE6", "BE7", "BE8",
"BE9", "BE10", "BE11", "BE12", "BE13", "BE14", "BE15", "BE16", "B", "B6", "B7",
"B8", "B9", "B10", "B11", "B12", "B13", "B14", "B15", "B16", "B17", "B18", "B19",
"C", "C8", "C9", "C10", "C11", "C12", "C13", "C14", "C15", "C16", "C17", "C18",
"C19", "C20", "C21", "C22", "N", "N10", "N11", "N12", "N13", "N14", "N15", "N16",
"N17", "N18", "N19", "N20", "N21", "N22", "N23", "N24", "N25", "O", "O12", "O13",
"O14", "O15", "O16", "O17", "O18", "O19", "O20", "O21", "O22", "O23", "O24",
"O25", "O26", "O27", "O28", "F", "F14", "F15", "F16", "F17", "F18", "F19", "F20",
"F21", "F22", "F23", "F24", "F25", "F26", "F27", "F28", "F29", "F30", "F31",
"NE", "NE16", "NE17", "NE18", "NE19", "NE20", "NE21", "NE22", "NE23", "NE24",
"NE25", "NE26", "NE27", "NE28", "NE29", "NE30", "NE31", "NE32", "NE33", "NE34",
"NA", "NA18", "NA19", "NA20", "NA21", "NA22", "NA23", "NA24", "NA25", "NA26",
"NA27", "NA28", "NA29", "NA30", "NA31", "NA32", "NA33", "NA34", "NA35", "NA36",
"NA37", "MG", "MG19", "MG20", "MG21", "MG22", "MG23", "MG24", "MG25", "MG26",
"MG27", "MG28", "MG29", "MG30", "MG31", "MG32", "MG33", "MG34", "MG35", "MG36",
"MG37", "MG38", "MG39", "MG40", "AL", "AL21", "AL22", "AL23", "AL24", "AL25",
"AL26", "AL27", "AL28", "AL29", "AL30", "AL31", "AL32", "AL33", "AL34", "AL35",
"AL36", "AL37", "AL38", "AL39", "AL40", "AL41", "AL42", "SI", "SI22", "SI23",
"SI24", "SI25", "SI26", "SI27", "SI28", "SI29", "SI30", "SI31", "SI32", "SI33",
"SI34", "SI35", "SI36", "SI37", "SI38", "SI39", "SI40", "SI41", "SI42", "SI43",
"SI44", "P", "P24", "P25", "P26", "P27", "P28", "P29", "P30", "P31", "P32",
"P33", "P34", "P35", "P36", "P37", "P38", "P39", "P40", "P41", "P42", "P43",
"P44", "P45", "P46", "S", "S26", "S27", "S28", "S29", "S30", "S31", "S32", "S33",
"S34", "S35", "S36", "S37", "S38", "S39", "S40", "S41", "S42", "S43", "S44",
"S45", "S46", "S47", "S48", "S49", "CL", "CL28", "CL29", "CL30", "CL31", "CL32",
"CL33", "CL34", "CL35", "CL36", "CL37", "CL38", "CL39", "CL40", "CL41", "CL42",
"CL43", "CL44", "CL45", "CL46", "CL47", "CL48", "CL49", "CL50", "CL51", "AR",
"AR30", "AR31", "AR32", "AR33", "AR34", "AR35", "AR36", "AR37", "AR38", "AR39",
"AR40", "AR41", "AR42", "AR43", "AR44", "AR45", "AR46", "AR47", "AR48", "AR49",
"AR50", "AR51", "AR52", "AR53", "K", "K32", "K33", "K34", "K35", "K36", "K37",
"K38", "K39", "K40", "K41", "K42", "K43", "K44", "K45", "K46", "K47", "K48",
"K49", "K50", "K51", "K52", "K53", "K54", "K55", "CA", "CA34", "CA35", "CA36",
"CA37", "CA38", "CA39", "CA40", "CA41", "CA42", "CA43", "CA44", "CA45", "CA46",
"CA47", "CA48", "CA49", "CA50", "CA51", "CA52", "CA53", "CA54", "CA55", "CA56",
"CA57", "SC", "SC36", "SC37", "SC38", "SC39", "SC40", "SC41", "SC42", "SC43",
"SC44", "SC45", "SC46", "SC47", "SC48", "SC49", "SC50", "SC51", "SC52", "SC53",
"SC54", "SC55", "SC56", "SC57", "SC58", "SC59", "SC60", "TI", "TI38", "TI39",
"TI40", "TI41", "TI42", "TI43", "TI44", "TI45", "TI46", "TI47", "TI48", "TI49",
"TI50", "TI51", "TI52", "TI53", "TI54", "TI55", "TI56", "TI57", "TI58", "TI59",
"TI60", "TI61", "TI62", "TI63", "V", "V40", "V41", "V42", "V43", "V44", "V45",
"V46", "V47", "V48", "V49", "V50", "V51", "V52", "V53", "V54", "V55", "V56",
"V57", "V58", "V59", "V60", "V61", "V62", "V63", "V64", "V65", "CR", "CR42",
"CR43", "CR44", "CR45", "CR46", "CR47", "CR48", "CR49", "CR50", "CR51", "CR52",
"CR53", "CR54", "CR55", "CR56", "CR57", "CR58", "CR59", "CR60", "CR61", "CR62",
"CR63", "CR64", "CR65", "CR66", "CR67", "MN", "MN44", "MN45", "MN46", "MN47",
"MN48", "MN49", "MN50", "MN51", "MN52", "MN53", "MN54", "MN55", "MN56", "MN57",
"MN58", "MN59", "MN60", "MN61", "MN62", "MN63", "MN64", "MN65", "MN66", "MN67",
"MN68", "MN69", "FE", "FE45", "FE46", "FE47", "FE48", "FE49", "FE50", "FE51",
"FE52", "FE53", "FE54", "FE55", "FE56", "FE57", "FE58", "FE59", "FE60", "FE61",
"FE62", "FE63", "FE64", "FE65", "FE66", "FE67", "FE68", "FE69", "FE70", "FE71",
"FE72", "CO", "CO47", "CO48", "CO49", "CO50", "CO51", "CO52", "CO53", "CO54",
"CO55", "CO56", "CO57", "CO58", "CO59", "CO60", "CO61", "CO62", "CO63", "CO64",
"CO65", "CO66", "CO67", "CO68", "CO69", "CO70", "CO71", "CO72", "CO73", "CO74",
"CO75", "NI", "NI48", "NI49", "NI50", "NI51", "NI52", "NI53", "NI54", "NI55",
"NI56", "NI57", "NI58", "NI59", "NI60", "NI61", "NI62", "NI63", "NI64", "NI65",
"NI66", "NI67", "NI68", "NI69", "NI70", "NI71", "NI72", "NI73", "NI74", "NI75",
"NI76", "NI77", "NI78", "CU", "CU52", "CU53", "CU54", "CU55", "CU56", "CU57",
"CU58", "CU59", "CU60", "CU61", "CU62", "CU63", "CU64", "CU65", "CU66", "CU67",
"CU68", "CU69", "CU70", "CU71", "CU72", "CU73", "CU74", "CU75", "CU76", "CU77",
"CU78", "CU79", "CU80", "ZN", "ZN54", "ZN55", "ZN56", "ZN57", "ZN58", "ZN59",
"ZN60", "ZN61", "ZN62", "ZN63", "ZN64", "ZN65", "ZN66", "ZN67", "ZN68", "ZN69",
"ZN70", "ZN71", "ZN72", "ZN73", "ZN74", "ZN75", "ZN76", "ZN77", "ZN78", "ZN79",
"ZN80", "ZN81", "ZN82", "ZN83", "GA", "GA56", "GA57", "GA58", "GA59", "GA60",
"GA61", "GA62", "GA63", "GA64", "GA65", "GA66", "GA67", "GA68", "GA69", "GA70",
"GA71", "GA72", "GA73", "GA74", "GA75", "GA76", "GA77", "GA78", "GA79", "GA80",
"GA81", "GA82", "GA83", "GA84", "GA85", "GA86", "GE", "GE58", "GE59", "GE60",
"GE61", "GE62", "GE63", "GE64", "GE65", "GE66", "GE67", "GE68", "GE69", "GE70",
"GE71", "GE72", "GE73", "GE74", "GE75", "GE76", "GE77", "GE78", "GE79", "GE80",
"GE81", "GE82", "GE83", "GE84", "GE85", "GE86", "GE87", "GE88", "GE89", "AS",
"AS60", "AS61", "AS62", "AS63", "AS64", "AS65", "AS66", "AS67", "AS68", "AS69",
"AS70", "AS71", "AS72", "AS73", "AS74", "AS75", "AS76", "AS77", "AS78", "AS79",
"AS80", "AS81", "AS82", "AS83", "AS84", "AS85", "AS86", "AS87", "AS88", "AS89",
"AS90", "AS91", "AS92", "SE", "SE65", "SE66", "SE67", "SE68", "SE69", "SE70",
"SE71", "SE72", "SE73", "SE74", "SE75", "SE76", "SE77", "SE78", "SE79", "SE80",
"SE81", "SE82", "SE83", "SE84", "SE85", "SE86", "SE87", "SE88", "SE89", "SE90",
"SE91", "SE92", "SE93", "SE94", "BR", "BR67", "BR68", "BR69", "BR70", "BR71",
"BR72", "BR73", "BR74", "BR75", "BR76", "BR77", "BR78", "BR79", "BR80", "BR81",
"BR82", "BR83", "BR84", "BR85", "BR86", "BR87", "BR88", "BR89", "BR90", "BR91",
"BR92", "BR93", "BR94", "BR95", "BR96", "BR97", "KR", "KR69", "KR70", "KR71",
"KR72", "KR73", "KR74", "KR75", "KR76", "KR77", "KR78", "KR79", "KR80", "KR81",
"KR82", "KR83", "KR84", "KR85", "KR86", "KR87", "KR88", "KR89", "KR90", "KR91",
"KR92", "KR93", "KR94", "KR95", "KR96", "KR97", "KR98", "KR99", "KR100", "RB",
"RB71", "RB72", "RB73", "RB74", "RB75", "RB76", "RB77", "RB78", "RB79", "RB80",
"RB81", "RB82", "RB83", "RB84", "RB85", "RB86", "RB87", "RB88", "RB89", "RB90",
"RB91", "RB92", "RB93", "RB94", "RB95", "RB96", "RB97", "RB98", "RB99",
"RB100", "RB101", "RB102", "SR", "SR73", "SR74", "SR75", "SR76", "SR77",
"SR78", "SR79", "SR80", "SR81", "SR82", "SR83", "SR84", "SR85", "SR86", "SR87",
"SR88", "SR89", "SR90", "SR91", "SR92", "SR93", "SR94", "SR95", "SR96", "SR97",
"SR98", "SR99", "SR100", "SR101", "SR102", "SR103", "SR104", "SR105", "Y",
"Y76", "Y77", "Y78", "Y79", "Y80", "Y81", "Y82", "Y83", "Y84", "Y85", "Y86",
"Y87", "Y88", "Y89", "Y90", "Y91", "Y92", "Y93", "Y94", "Y95", "Y96", "Y97",
"Y98", "Y99", "Y100", "Y101", "Y102", "Y103", "Y104", "Y105", "Y106", "Y107",
"Y108", "ZR", "ZR78", "ZR79", "ZR80", "ZR81", "ZR82", "ZR83", "ZR84", "ZR85",
"ZR86", "ZR87", "ZR88", "ZR89", "ZR90", "ZR91", "ZR92", "ZR93", "ZR94", "ZR95",
"ZR96", "ZR97", "ZR98", "ZR99", "ZR100", "ZR101", "ZR102", "ZR103", "ZR104",
"ZR105", "ZR106", "ZR107", "ZR108", "ZR109", "ZR110", "NB", "NB81", "NB82",
"NB83", "NB84", "NB85", "NB86", "NB87", "NB88", "NB89", "NB90", "NB91", "NB92",
"NB93", "NB94", "NB95", "NB96", "NB97", "NB98", "NB99", "NB100", "NB101",
"NB102", "NB103", "NB104", "NB105", "NB106", "NB107", "NB108", "NB109",
"NB110", "NB111", "NB112", "NB113", "MO", "MO83", "MO84", "MO85", "MO86",
"MO87", "MO88", "MO89", "MO90", "MO91", "MO92", "MO93", "MO94", "MO95", "MO96",
"MO97", "MO98", "MO99", "MO100", "MO101", "MO102", "MO103", "MO104", "MO105",
"MO106", "MO107", "MO108", "MO109", "MO110", "MO111", "MO112", "MO113",
"MO114", "MO115", "TC", "TC85", "TC86", "TC87", "TC88", "TC89", "TC90", "TC91",
"TC92", "TC93", "TC94", "TC95", "TC96", "TC97", "TC98", "TC99", "TC100",
"TC101", "TC102", "TC103", "TC104", "TC105", "TC106", "TC107", "TC108",
"TC109", "TC110", "TC111", "TC112", "TC113", "TC114", "TC115", "TC116",
"TC117", "TC118", "RU", "RU87", "RU88", "RU89", "RU90", "RU91", "RU92", "RU93",
"RU94", "RU95", "RU96", "RU97", "RU98", "RU99", "RU100", "RU101", "RU102",
"RU103", "RU104", "RU105", "RU106", "RU107", "RU108", "RU109", "RU110",
"RU111", "RU112", "RU113", "RU114", "RU115", "RU116", "RU117", "RU118",
"RU119", "RU120", "RH", "RH89", "RH90", "RH91", "RH92", "RH93", "RH94", "RH95",
"RH96", "RH97", "RH98", "RH99", "RH100", "RH101", "RH102", "RH103", "RH104",
"RH105", "RH106", "RH107", "RH108", "RH109", "RH110", "RH111", "RH112",
"RH113", "RH114", "RH115", "RH116", "RH117", "RH118", "RH119", "RH120",
"RH121", "RH122", "PD", "PD91", "PD92", "PD93", "PD94", "PD95", "PD96", "PD97",
"PD98", "PD99", "PD100", "PD101", "PD102", "PD103", "PD104", "PD105", "PD106",
"PD107", "PD108", "PD109", "PD110", "PD111", "PD112", "PD113", "PD114",
"PD115", "PD116", "PD117", "PD118", "PD119", "PD120", "PD121", "PD122",
"PD123", "PD124", "AG", "AG93", "AG94", "AG95", "AG96", "AG97", "AG98", "AG99",
"AG100", "AG101", "AG102", "AG103", "AG104", "AG105", "AG106", "AG107",
"AG108", "AG109", "AG110", "AG111", "AG112", "AG113", "AG114", "AG115",
"AG116", "AG117", "AG118", "AG119", "AG120", "AG121", "AG122", "AG123",
"AG124", "AG125", "AG126", "AG127", "AG128", "AG129", "AG130", "CD", "CD95",
"CD96", "CD97", "CD98", "CD99", "CD100", "CD101", "CD102", "CD103", "CD104",
"CD105", "CD106", "CD107", "CD108", "CD109", "CD110", "CD111", "CD112",
"CD113", "CD114", "CD115", "CD116", "CD117", "CD118", "CD119", "CD120",
"CD121", "CD122", "CD123", "CD124", "CD125", "CD126", "CD127", "CD128",
"CD129", "CD130", "CD131", "CD132", "IN", "IN97", "IN98", "IN99", "IN100",
"IN101", "IN102", "IN103", "IN104", "IN105", "IN106", "IN107", "IN108",
"IN109", "IN110", "IN111", "IN112", "IN113", "IN114", "IN115", "IN116",
"IN117", "IN118", "IN119", "IN120", "IN121", "IN122", "IN123", "IN124",
"IN125", "IN126", "IN127", "IN128", "IN129", "IN130", "IN131", "IN132",
"IN133", "IN134", "IN135", "SN", "SN99", "SN100", "SN101", "SN102", "SN103",
"SN104", "SN105", "SN106", "SN107", "SN108", "SN109", "SN110", "SN111",
"SN112", "SN113", "SN114", "SN115", "SN116", "SN117", "SN118", "SN119",
"SN120", "SN121", "SN122", "SN123", "SN124", "SN125", "SN126", "SN127",
"SN128", "SN129", "SN130", "SN131", "SN132", "SN133", "SN134", "SN135",
"SN136", "SN137", "SB", "SB103", "SB104", "SB105", "SB106", "SB107", "SB108",
"SB109", "SB110", "SB111", "SB112", "SB113", "SB114", "SB115", "SB116",
"SB117", "SB118", "SB119", "SB120", "SB121", "SB122", "SB123", "SB124",
"SB125", "SB126", "SB127", "SB128", "SB129", "SB130", "SB131", "SB132",
"SB133", "SB134", "SB135", "SB136", "SB137", "SB138", "SB139", "TE", "TE105",
"TE106", "TE107", "TE108", "TE109", "TE110", "TE111", "TE112", "TE113",
"TE114", "TE115", "TE116", "TE117", "TE118", "TE119", "TE120", "TE121",
"TE122", "TE123", "TE124", "TE125", "TE126", "TE127", "TE128", "TE129",
"TE130", "TE131", "TE132", "TE133", "TE134", "TE135", "TE136", "TE137",
"TE138", "TE139", "TE140", "TE141", "TE142", "I", "I108", "I109", "I110",
"I111", "I112", "I113", "I114", "I115", "I116", "I117", "I118", "I119", "I120",
"I121", "I122", "I123", "I124", "I125", "I126", "I127", "I128", "I129", "I130",
"I131", "I132", "I133", "I134", "I135", "I136", "I137", "I138", "I139", "I140",
"I141", "I142", "I143", "I144", "XE", "XE110", "XE111", "XE112", "XE113",
"XE114", "XE115", "XE116", "XE117", "XE118", "XE119", "XE120", "XE121",
"XE122", "XE123", "XE124", "XE125", "XE126", "XE127", "XE128", "XE129",
"XE130", "XE131", "XE132", "XE133", "XE134", "XE135", "XE136", "XE137",
"XE138", "XE139", "XE140", "XE141", "XE142", "XE143", "XE144", "XE145",
"XE146", "XE147", "CS", "CS112", "CS113", "CS114", "CS115", "CS116", "CS117",
"CS118", "CS119", "CS120", "CS121", "CS122", "CS123", "CS124", "CS125",
"CS126", "CS127", "CS128", "CS129", "CS130", "CS131", "CS132", "CS133",
"CS134", "CS135", "CS136", "CS137", "CS138", "CS139", "CS140", "CS141",
"CS142", "CS143", "CS144", "CS145", "CS146", "CS147", "CS148", "CS149",
"CS150", "CS151", "BA", "BA114", "BA115", "BA116", "BA117", "BA118", "BA119",
"BA120", "BA121", "BA122", "BA123", "BA124", "BA125", "BA126", "BA127",
"BA128", "BA129", "BA130", "BA131", "BA132", "BA133", "BA134", "BA135",
"BA136", "BA137", "BA138", "BA139", "BA140", "BA141", "BA142", "BA143",
"BA144", "BA145", "BA146", "BA147", "BA148", "BA149", "BA150", "BA151",
"BA152", "BA153", "LA", "LA117", "LA118", "LA119", "LA120", "LA121", "LA122",
"LA123", "LA124", "LA125", "LA126", "LA127", "LA128", "LA129", "LA130",
"LA131", "LA132", "LA133", "LA134", "LA135", "LA136", "LA137", "LA138",
"LA139", "LA140", "LA141", "LA142", "LA143", "LA144", "LA145", "LA146",
"LA147", "LA148", "LA149", "LA150", "LA151", "LA152", "LA153", "LA154",
"LA155", "CE", "CE119", "CE120", "CE121", "CE122", "CE123", "CE124", "CE125",
"CE126", "CE127", "CE128", "CE129", "CE130", "CE131", "CE132", "CE133",
"CE134", "CE135", "CE136", "CE137", "CE138", "CE139", "CE140", "CE141",
"CE142", "CE143", "CE144", "CE145", "CE146", "CE147", "CE148", "CE149",
"CE150", "CE151", "CE152", "CE153", "CE154", "CE155", "CE156", "CE157", "PR",
"PR121", "PR122", "PR123", "PR124", "PR125", "PR126", "PR127", "PR128",
"PR129", "PR130", "PR131", "PR132", "PR133", "PR134", "PR135", "PR136",
"PR137", "PR138", "PR139", "PR140", "PR141", "PR142", "PR143", "PR144",
"PR145", "PR146", "PR147", "PR148", "PR149", "PR150", "PR151", "PR152",
"PR153", "PR154", "PR155", "PR156", "PR157", "PR158", "PR159", "ND", "ND124",
"ND125", "ND126", "ND127", "ND128", "ND129", "ND130", "ND131", "ND132",
"ND133", "ND134", "ND135", "ND136", "ND137", "ND138", "ND139", "ND140",
"ND141", "ND142", "ND143", "ND144", "ND145", "ND146", "ND147", "ND148",
"ND149", "ND150", "ND151", "ND152", "ND153", "ND154", "ND155", "ND156",
"ND157", "ND158", "ND159", "ND160", "ND161", "PM", "PM126", "PM127", "PM128",
"PM129", "PM130", "PM131", "PM132", "PM133", "PM134", "PM135", "PM136",
"PM137", "PM138", "PM139", "PM140", "PM141", "PM142", "PM143", "PM144",
"PM145", "PM146", "PM147", "PM148", "PM149", "PM150", "PM151", "PM152",
"PM153", "PM154", "PM155", "PM156", "PM157", "PM158", "PM159", "PM160",
"PM161", "PM162", "PM163", "SM", "SM128", "SM129", "SM130", "SM131", "SM132",
"SM133", "SM134", "SM135", "SM136", "SM137", "SM138", "SM139", "SM140",
"SM141", "SM142", "SM143", "SM144", "SM145", "SM146", "SM147", "SM148",
"SM149", "SM150", "SM151", "SM152", "SM153", "SM154", "SM155", "SM156",
"SM157", "SM158", "SM159", "SM160", "SM161", "SM162", "SM163", "SM164",
"SM165", "EU", "EU130", "EU131", "EU132", "EU133", "EU134", "EU135", "EU136",
"EU137", "EU138", "EU139", "EU140", "EU141", "EU142", "EU143", "EU144",
"EU145", "EU146", "EU147", "EU148", "EU149", "EU150", "EU151", "EU152",
"EU153", "EU154", "EU155", "EU156", "EU157", "EU158", "EU159", "EU160",
"EU161", "EU162", "EU163", "EU164", "EU165", "EU166", "EU167", "GD", "GD134",
"GD135", "GD136", "GD137", "GD138", "GD139", "GD140", "GD141", "GD142",
"GD143", "GD144", "GD145", "GD146", "GD147", "GD148", "GD149", "GD150",
"GD151", "GD152", "GD153", "GD154", "GD155", "GD156", "GD157", "GD158",
"GD159", "GD160", "GD161", "GD162", "GD163", "GD164", "GD165", "GD166",
"GD167", "GD168", "GD169", "TB", "TB136", "TB137", "TB138", "TB139", "TB140",
"TB141", "TB142", "TB143", "TB144", "TB145", "TB146", "TB147", "TB148",
"TB149", "TB150", "TB151", "TB152", "TB153", "TB154", "TB155", "TB156",
"TB157", "TB158", "TB159", "TB160", "TB161", "TB162", "TB163", "TB164",
"TB165", "TB166", "TB167", "TB168", "TB169", "TB170", "TB171", "DY", "DY138",
"DY139", "DY140", "DY141", "DY142", "DY143", "DY144", "DY145", "DY146",
"DY147", "DY148", "DY149", "DY150", "DY151", "DY152", "DY153", "DY154",
"DY155", "DY156", "DY157", "DY158", "DY159", "DY160", "DY161", "DY162",
"DY163", "DY164", "DY165", "DY166", "DY167", "DY168", "DY169", "DY170",
"DY171", "DY172", "DY173", "HO", "HO140", "HO141", "HO142", "HO143", "HO144",
"HO145", "HO146", "HO147", "HO148", "HO149", "HO150", "HO151", "HO152",
"HO153", "HO154", "HO155", "HO156", "HO157", "HO158", "HO159", "HO160",
"HO161", "HO162", "HO163", "HO164", "HO165", "HO166", "HO167", "HO168",
"HO169", "HO170", "HO171", "HO172", "HO173", "HO174", "HO175", "ER", "ER143",
"ER144", "ER145", "ER146", "ER147", "ER148", "ER149", "ER150", "ER151",
"ER152", "ER153", "ER154", "ER155", "ER156", "ER157", "ER158", "ER159",
"ER160", "ER161", "ER162", "ER163", "ER164", "ER165", "ER166", "ER167",
"ER168", "ER169", "ER170", "ER171", "ER172", "ER173", "ER174", "ER175",
"ER176", "ER177", "TM", "TM145", "TM146", "TM147", "TM148", "TM149", "TM150",
"TM151", "TM152", "TM153", "TM154", "TM155", "TM156", "TM157", "TM158",
"TM159", "TM160", "TM161", "TM162", "TM163", "TM164", "TM165", "TM166",
"TM167", "TM168", "TM169", "TM170", "TM171", "TM172", "TM173", "TM174",
"TM175", "TM176", "TM177", "TM178", "TM179", "YB", "YB148", "YB149", "YB150",
"YB151", "YB152", "YB153", "YB154", "YB155", "YB156", "YB157", "YB158",
"YB159", "YB160", "YB161", "YB162", "YB163", "YB164", "YB165", "YB166",
"YB167", "YB168", "YB169", "YB170", "YB171", "YB172", "YB173", "YB174",
"YB175", "YB176", "YB177", "YB178", "YB179", "YB180", "YB181", "LU", "LU150",
"LU151", "LU152", "LU153", "LU154", "LU155", "LU156", "LU157", "LU158",
"LU159", "LU160", "LU161", "LU162", "LU163", "LU164", "LU165", "LU166",
"LU167", "LU168", "LU169", "LU170", "LU171", "LU172", "LU173", "LU174",
"LU175", "LU176", "LU177", "LU178", "LU179", "LU180", "LU181", "LU182",
"LU183", "LU184", "HF", "HF153", "HF154", "HF155", "HF156", "HF157", "HF158",
"HF159", "HF160", "HF161", "HF162", "HF163", "HF164", "HF165", "HF166",
"HF167", "HF168", "HF169", "HF170", "HF171", "HF172", "HF173", "HF174",
"HF175", "HF176", "HF177", "HF178", "HF179", "HF180", "HF181", "HF182",
"HF183", "HF184", "HF185", "HF186", "HF187", "HF188", "TA", "TA155", "TA156",
"TA157", "TA158", "TA159", "TA160", "TA161", "TA162", "TA163", "TA164",
"TA165", "TA166", "TA167", "TA168", "TA169", "TA170", "TA171", "TA172",
"TA173", "TA174", "TA175", "TA176", "TA177", "TA178", "TA179", "TA180",
"TA181", "TA182", "TA183", "TA184", "TA185", "TA186", "TA187", "TA188",
"TA189", "TA190", "W", "W158", "W159", "W160", "W161", "W162", "W163", "W164",
"W165", "W166", "W167", "W168", "W169", "W170", "W171", "W172", "W173", "W174",
"W175", "W176", "W177", "W178", "W179", "W180", "W181", "W182", "W183", "W184",
"W185", "W186", "W187", "W188", "W189", "W190", "W191", "W192", "RE", "RE160",
"RE161", "RE162", "RE163", "RE164", "RE165", "RE166", "RE167", "RE168",
"RE169", "RE170", "RE171", "RE172", "RE173", "RE174", "RE175", "RE176",
"RE177", "RE178", "RE179", "RE180", "RE181", "RE182", "RE183", "RE184",
"RE185", "RE186", "RE187", "RE188", "RE189", "RE190", "RE191", "RE192",
"RE193", "RE194", "OS", "OS162", "OS163", "OS164", "OS165", "OS166", "OS167",
"OS168", "OS169", "OS170", "OS171", "OS172", "OS173", "OS174", "OS175",
"OS176", "OS177", "OS178", "OS179", "OS180", "OS181", "OS182", "OS183",
"OS184", "OS185", "OS186", "OS187", "OS188", "OS189", "OS190", "OS191",
"OS192", "OS193", "OS194", "OS195", "OS196", "IR", "IR164", "IR165", "IR166",
"IR167", "IR168", "IR169", "IR170", "IR171", "IR172", "IR173", "IR174",
"IR175", "IR176", "IR177", "IR178", "IR179", "IR180", "IR181", "IR182",
"IR183", "IR184", "IR185", "IR186", "IR187", "IR188", "IR189", "IR190",
"IR191", "IR192", "IR193", "IR194", "IR195", "IR196", "IR197", "IR198",
"IR199", "PT", "PT166", "PT167", "PT168", "PT169", "PT170", "PT171", "PT172",
"PT173", "PT174", "PT175", "PT176", "PT177", "PT178", "PT179", "PT180",
"PT181", "PT182", "PT183", "PT184", "PT185", "PT186", "PT187", "PT188",
"PT189", "PT190", "PT191", "PT192", "PT193", "PT194", "PT195", "PT196",
"PT197", "PT198", "PT199", "PT200", "PT201", "PT202", "AU", "AU169", "AU170",
"AU171", "AU172", "AU173", "AU174", "AU175", "AU176", "AU177", "AU178",
"AU179", "AU180", "AU181", "AU182", "AU183", "AU184", "AU185", "AU186",
"AU187", "AU188", "AU189", "AU190", "AU191", "AU192", "AU193", "AU194",
"AU195", "AU196", "AU197", "AU198", "AU199", "AU200", "AU201", "AU202",
"AU203", "AU204", "AU205", "HG", "HG171", "HG172", "HG173", "HG174", "HG175",
"HG176", "HG177", "HG178", "HG179", "HG180", "HG181", "HG182", "HG183",
"HG184", "HG185", "HG186", "HG187", "HG188", "HG189", "HG190", "HG191",
"HG192", "HG193", "HG194", "HG195", "HG196", "HG197", "HG198", "HG199",
"HG200", "HG201", "HG202", "HG203", "HG204", "HG205", "HG206", "HG207",
"HG208", "HG209", "HG210", "TL", "TL176", "TL177", "TL178", "TL179", "TL180",
"TL181", "TL182", "TL183", "TL184", "TL185", "TL186", "TL187", "TL188",
"TL189", "TL190", "TL191", "TL192", "TL193", "TL194", "TL195", "TL196",
"TL197", "TL198", "TL199", "TL200", "TL201", "TL202", "TL203", "TL204",
"TL205", "TL206", "TL207", "TL208", "TL209", "TL210", "TL211", "TL212", "PB",
"PB178", "PB179", "PB180", "PB181", "PB182", "PB183", "PB184", "PB185",
"PB186", "PB187", "PB188", "PB189", "PB190", "PB191", "PB192", "PB193",
"PB194", "PB195", "PB196", "PB197", "PB198", "PB199", "PB200", "PB201",
"PB202", "PB203", "PB204", "PB205", "PB206", "PB207", "PB208", "PB209",
"PB210", "PB211", "PB212", "PB213", "PB214", "PB215", "BI", "BI184", "BI185",
"BI186", "BI187", "BI188", "BI189", "BI190", "BI191", "BI192", "BI193",
"BI194", "BI195", "BI196", "BI197", "BI198", "BI199", "BI200", "BI201",
"BI202", "BI203", "BI204", "BI205", "BI206", "BI207", "BI208", "BI209",
"BI210", "BI211", "BI212", "BI213", "BI214", "BI215", "BI216", "BI217",
"BI218", "PO", "PO188", "PO189", "PO190", "PO191", "PO192", "PO193", "PO194",
"PO195", "PO196", "PO197", "PO198", "PO199", "PO200", "PO201", "PO202",
"PO203", "PO204", "PO205", "PO206", "PO207", "PO208", "PO209", "PO210",
"PO211", "PO212", "PO213", "PO214", "PO215", "PO216", "PO217", "PO218",
"PO219", "PO220", "AT", "AT193", "AT194", "AT195", "AT196", "AT197", "AT198",
"AT199", "AT200", "AT201", "AT202", "AT203", "AT204", "AT205", "AT206",
"AT207", "AT208", "AT209", "AT210", "AT211", "AT212", "AT213", "AT214",
"AT215", "AT216", "AT217", "AT218", "AT219", "AT220", "AT221", "AT222",
"AT223", "RN", "RN195", "RN196", "RN197", "RN198", "RN199", "RN200", "RN201",
"RN202", "RN203", "RN204", "RN205", "RN206", "RN207", "RN208", "RN209",
"RN210", "RN211", "RN212", "RN213", "RN214", "RN215", "RN216", "RN217",
"RN218", "RN219", "RN220", "RN221", "RN222", "RN223", "RN224", "RN225",
"RN226", "RN227", "RN228", "FR", "FR199", "FR200", "FR201", "FR202", "FR203",
"FR204", "FR205", "FR206", "FR207", "FR208", "FR209", "FR210", "FR211",
"FR212", "FR213", "FR214", "FR215", "FR216", "FR217", "FR218", "FR219",
"FR220", "FR221", "FR222", "FR223", "FR224", "FR225", "FR226", "FR227",
"FR228", "FR229", "FR230", "FR231", "FR232", "RA", "RA202", "RA203", "RA204",
"RA205", "RA206", "RA207", "RA208", "RA209", "RA210", "RA211", "RA212",
"RA213", "RA214", "RA215", "RA216", "RA217", "RA218", "RA219", "RA220",
"RA221", "RA222", "RA223", "RA224", "RA225", "RA226", "RA227", "RA228",
"RA229", "RA230", "RA231", "RA232", "RA233", "RA234", "AC", "AC206", "AC207",
"AC208", "AC209", "AC210", "AC211", "AC212", "AC213", "AC214", "AC215",
"AC216", "AC217", "AC218", "AC219", "AC220", "AC221", "AC222", "AC223",
"AC224", "AC225", "AC226", "AC227", "AC228", "AC229", "AC230", "AC231",
"AC232", "AC233", "AC234", "AC235", "AC236", "TH", "TH209", "TH210", "TH211",
"TH212", "TH213", "TH214", "TH215", "TH216", "TH217", "TH218", "TH219",
"TH220", "TH221", "TH222", "TH223", "TH224", "TH225", "TH226", "TH227",
"TH228", "TH229", "TH230", "TH231", "TH232", "TH233", "TH234", "TH235",
"TH236", "TH237", "TH238", "PA", "PA212", "PA213", "PA214", "PA215", "PA216",
"PA217", "PA218", "PA219", "PA220", "PA221", "PA222", "PA223", "PA224",
"PA225", "PA226", "PA227", "PA228", "PA229", "PA230", "PA231", "PA232",
"PA233", "PA234", "PA235", "PA236", "PA237", "PA238", "PA239", "PA240", "U",
"U217", "U218", "U219", "U220", "U221", "U222", "U223", "U224", "U225", "U226",
"U227", "U228", "U229", "U230", "U231", "U232", "U233", "U234", "U235", "U236",
"U237", "U238", "U239", "U240", "U241", "U242", "NP", "NP225", "NP226",
"NP227", "NP228", "NP229", "NP230", "NP231", "NP232", "NP233", "NP234",
"NP235", "NP236", "NP237", "NP238", "NP239", "NP240", "NP241", "NP242",
"NP243", "NP244", "PU", "PU228", "PU229", "PU230", "PU231", "PU232", "PU233",
"PU234", "PU235", "PU236", "PU237", "PU238", "PU239", "PU240", "PU241",
"PU242", "PU243", "PU244", "PU245", "PU246", "PU247", "AM", "AM231", "AM232",
"AM233", "AM234", "AM235", "AM236", "AM237", "AM238", "AM239", "AM240",
"AM241", "AM242", "AM243", "AM244", "AM245", "AM246", "AM247", "AM248",
"AM249", "CM", "CM233", "CM234", "CM235", "CM236", "CM237", "CM238", "CM239",
"CM240", "CM241", "CM242", "CM243", "CM244", "CM245", "CM246", "CM247",
"CM248", "CM249", "CM250", "CM251", "CM252", "BK", "BK235", "BK236", "BK237",
"BK238", "BK239", "BK240", "BK241", "BK242", "BK243", "BK244", "BK245",
"BK246", "BK247", "BK248", "BK249", "BK250", "BK251", "BK252", "BK253",
"BK254", "CF", "CF237", "CF238", "CF239", "CF240", "CF241", "CF242", "CF243",
"CF244", "CF245", "CF246", "CF247", "CF248", "CF249", "CF250", "CF251",
"CF252", "CF253", "CF254", "CF255", "CF256", "ES", "ES240", "ES241", "ES242",
"ES243", "ES244", "ES245", "ES246", "ES247", "ES248", "ES249", "ES250",
"ES251", "ES252", "ES253", "ES254", "ES255", "ES256", "ES257", "ES258", "FM",
"FM242", "FM243", "FM244", "FM245", "FM246", "FM247", "FM248", "FM249",
"FM250", "FM251", "FM252", "FM253", "FM254", "FM255", "FM256", "FM257",
"FM258", "FM259", "FM260", "MD", "MD245", "MD246", "MD247", "MD248", "MD249",
"MD250", "MD251", "MD252", "MD253", "MD254", "MD255", "MD256", "MD257",
"MD258", "MD259", "MD260", "MD261", "MD262", "NO", "NO248", "NO249", "NO250",
"NO251", "NO252", "NO253", "NO254", "NO255", "NO256", "NO257", "NO258",
"NO259", "NO260", "NO261", "NO262", "NO263", "NO264", "LR", "LR251", "LR252",
"LR253", "LR254", "LR255", "LR256", "LR257", "LR258", "LR259", "LR260",
"LR261", "LR262", "LR263", "LR264", "LR265", "LR266", "RF", "RF253", "RF254",
"RF255", "RF256", "RF257", "RF258", "RF259", "RF260", "RF261", "RF262",
"RF263", "RF264", "RF265", "RF266", "RF267", "RF268", "DB", "DB255", "DB256",
"DB257", "DB258", "DB259", "DB260", "DB261", "DB262", "DB263", "DB264",
"DB265", "DB266", "DB267", "DB268", "DB269", "DB270", "SG", "SG258", "SG259",
"SG260", "SG261", "SG262", "SG263", "SG264", "SG265", "SG266", "SG267",
"SG268", "SG269", "SG270", "SG271", "SG272", "SG273", "BH", "BH260", "BH261",
"BH262", "BH263", "BH264", "BH265", "BH266", "BH267", "BH268", "BH269",
"BH270", "BH271", "BH272", "BH273", "BH274", "BH275", "HS", "HS263", "HS264",
"HS265", "HS266", "HS267", "HS268", "HS269", "HS270", "HS271", "HS272",
"HS273", "HS274", "HS275", "HS276", "HS277", "MT", "MT265", "MT266", "MT267",
"MT268", "MT269", "MT270", "MT271", "MT272", "MT273", "MT274", "MT275",
"MT276", "MT277", "MT278", "MT279", "DS", "DS267", "DS268", "DS269", "DS270",
"DS271", "DS272", "DS273", "DS274", "DS275", "DS276", "DS277", "DS278",
"DS279", "DS280", "DS281", "RG", "RG272", "RG273", "RG274", "RG275", "RG276",
"RG277", "RG278", "RG279", "RG280", "RG281", "RG282", "RG283", "UUB",
"UUB277", "UUB278", "UUB279", "UUB280", "UUB281", "UUB282", "UUB283",
"UUB284", "UUB285", "UUT", "UUT283", "UUT284", "UUT285", "UUT286", "UUT287",
"UUQ", "UUQ285", "UUQ286", "UUQ287", "UUQ288", "UUQ289", "UUP", "UUP287",
"UUP288", "UUP289", "UUP290", "UUP291", "UUH", "UUH289", "UUH290", "UUH291",
"UUH292", "UUS", "UUS291", "UUS292", "UUO", "UUO293"]
_temp_iso_mass = [
1.00782503207, 1.00782503207, 2.01410177785, 2.01410177785, 3.01604927767,
3.01604927767, 4.027806424, 5.035311488, 6.044942594, 7.052749,
4.00260325415, 3.01602931914, 4.00260325415, 5.012223624, 6.018889124,
7.028020618, 8.033921897, 9.043950286, 10.052398837, 7.016004548, 3.030775,
4.027185558, 5.0125378, 6.015122794, 7.016004548, 8.022487362, 9.026789505,
10.035481259, 11.043797715, 12.053780, 9.012182201, 5.040790, 6.019726317,
7.016929828, 8.005305103, 9.012182201, 10.013533818, 11.021657749,
12.026920737, 13.035693007, 14.04289292, 15.053460, 16.061920, 11.009305406,
6.046810, 7.029917901, 8.024607233, 9.013328782, 10.012936992, 11.009305406,
12.014352104, 13.017780217, 14.025404009, 15.031103021, 16.039808829,
17.046989906, 18.056170, 19.063730, 12, 8.037675025, 9.031036689,
10.016853228, 11.011433613, 12, 13.00335483778, 14.0032419887, 15.010599256,
16.014701252, 17.022586116, 18.026759354, 19.034805018, 20.040319754,
21.049340, 22.057200, 14.00307400478, 10.041653674, 11.026090956,
12.018613197, 13.005738609, 14.00307400478, 15.00010889823, 16.006101658,
17.008450261, 18.014078959, 19.017028697, 20.023365807, 21.02710824,
22.034394934, 23.041220, 24.051040, 25.060660, 15.99491461956,
12.034404895, 13.024812213, 14.00859625, 15.003065617, 15.99491461956,
16.999131703, 17.999161001, 19.00358013, 20.004076742, 21.008655886,
22.009966947, 23.015687659, 24.020472917, 25.029460, 26.038340, 27.048260,
28.057810, 18.998403224, 14.035060, 15.018009103, 16.011465724,
17.002095237, 18.000937956, 18.998403224, 19.999981315, 20.999948951,
22.002998815, 23.003574631, 24.008115485, 25.012101747, 26.019615555,
27.026760086, 28.035670, 29.043260, 30.052500, 31.060429, 19.99244017542,
16.025761262, 17.017671504, 18.005708213, 19.001880248, 19.99244017542,
20.993846684, 21.991385113, 22.994466904, 23.993610779, 24.997736888,
26.000461206, 27.007589903, 28.012071575, 29.019385933, 30.024801045,
31.033110, 32.040020, 33.049380, 34.057028, 22.98976928087, 18.025969,
19.013877499, 20.007351328, 20.997655206, 21.994436425, 22.98976928087,
23.990962782, 24.989953968, 25.992633, 26.994076788, 27.998938, 29.002861,
30.008976, 31.013585452, 32.02046656, 33.026719756, 34.035170, 35.042493,
36.051480, 37.059340, 23.985041699, 19.03547, 20.018862545, 21.01171291,
21.999573843, 22.994123669, 23.985041699, 24.985836917, 25.982592929,
26.984340585, 27.983876825, 28.9886, 29.990434, 30.996546, 31.998975,
33.005254, 34.009456424, 35.017340, 36.023000, 37.031400, 38.037570,
39.046772, 40.053930, 26.981538627, 21.028040, 22.019520, 23.007267432,
23.999938865, 24.990428095, 25.986891692, 26.981538627, 27.981910306,
28.980445046, 29.982960256, 30.983946619, 31.988124489, 32.990843336,
33.996851837, 34.999860235, 36.006207204, 37.01067782, 38.017231021,
39.02297, 40.031450, 41.038330, 42.046890, 27.97692653246, 22.034530,
23.025520, 24.011545616, 25.004105574, 25.992329921, 26.986704905,
27.97692653246, 28.9764947, 29.973770171, 30.975363226999998,
31.974148082, 32.97800022, 33.978575524, 34.984583575, 35.986599477,
36.99293608, 37.995633601, 39.002070013, 40.005869121, 41.01456,
42.019790, 43.028660, 44.035260, 30.973761629, 24.034350, 25.020260,
26.011780, 26.999230236, 27.992314761, 28.981800606, 29.978313789,
30.973761629, 31.973907274, 32.971725543, 33.973636257, 34.973314117,
35.97825968, 36.979608946, 37.984156827, 38.986179475, 39.991296951,
40.994335435, 42.001007913, 43.00619, 44.012990, 45.019220, 46.027380,
31.972070999, 26.027880, 27.018833, 28.004372763, 28.996608049,
29.984903249, 30.979554728, 31.972070999, 32.971458759, 33.967866902,
34.969032161, 35.96708076, 36.971125567, 37.971163317, 38.975134306,
39.975451728, 40.979582149, 41.981022419, 42.98715479, 43.99021339,
44.996508112, 46.000750, 47.008590, 48.014170, 49.023619, 34.968852682,
28.028510, 29.014110, 30.004770, 30.992413086, 31.985689901, 32.977451887,
33.973762819, 34.968852682, 35.968306981, 36.965902591, 37.968010425,
38.968008164, 39.970415472, 40.970684525, 41.973254804, 42.974054403,
43.978281071, 44.980286886, 45.98421004, 46.988710, 47.994950, 49.000320,
50.007840, 51.014490, 39.96238312251, 30.021560, 31.012123, 31.997637984,
32.989925709, 33.980271244, 34.975257585, 35.967545105, 36.96677632,
37.962732394, 38.964313231, 39.96238312251, 40.964500611, 41.963045736,
42.965636056, 43.964924033, 44.968039956, 45.968094129, 46.972186792,
47.974540, 48.980520, 49.984430, 50.991630, 51.996780, 53.004940,
38.963706679, 32.021920, 33.007260, 33.998410, 34.988009692, 35.981292235,
36.973375889, 37.969081184, 38.963706679, 39.963998475, 40.961825762,
41.96240281, 42.96071554, 43.961556804, 44.960699493, 45.961976864,
46.961678473, 47.965513535, 48.967450928, 49.972783355, 50.976380,
51.982610, 52.987120, 53.994200, 54.999710, 39.962590983, 34.014120,
35.004940, 35.993087063, 36.985870269, 37.976318452, 38.970719725,
39.962590983, 40.962278062, 41.958618014, 42.958766628, 43.955481754,
44.956186566, 45.953692587, 46.954546006, 47.952534177, 48.955674148,
49.957518962, 50.961499214, 51.9651, 52.970050, 53.974350, 54.980550,
55.985570, 56.992356, 44.955911909, 36.014920, 37.003050, 37.994700,
38.984790002, 39.977967407, 40.969251125, 41.965516429, 42.961150658,
43.959402752, 44.955911909, 45.95517189, 46.952407508, 47.952231468,
48.950023975, 49.952187685, 50.953603368, 51.956675468, 52.959610,
53.963264561, 54.968243949, 55.972870, 56.977790, 57.983710, 58.989220,
59.995710, 47.947946281, 38.009770, 39.001610, 39.990498838, 40.983145,
41.973030902, 42.968522499, 43.959690069, 44.958125616, 45.952631555,
46.951763088, 47.947946281, 48.947869982, 49.944791194, 50.946614955,
51.946897311, 52.949727171, 53.951052401, 54.955265056, 55.958199639,
56.963989137, 57.966970, 58.972930, 59.976760, 60.983200, 61.987490,
62.994420, 50.943959507, 40.011090, 40.999780, 41.991230, 42.980650,
43.97411, 44.965775808, 45.960200481, 46.95490894, 47.952253707,
48.948516101, 49.947158485, 50.943959507, 51.944775479, 52.944337979,
53.946439854, 54.947233701, 55.950530966, 56.952561432, 57.956834136,
58.960207407, 59.965026862, 60.968480, 61.973780, 62.977550, 63.983470,
64.987920, 51.940507472, 42.006430, 42.997710, 43.985549, 44.97964,
45.968358635, 46.962900046, 47.954031716, 48.951335721, 49.946044205,
50.944767431, 51.940507472, 52.940649386, 53.938880395, 54.940839672,
55.940653139, 56.943613013, 57.944353129, 58.948586367, 59.950076033,
60.954717204, 61.95661319, 62.961860, 63.964410, 64.970160, 65.973380,
66.979550, 54.938045141, 44.006870, 44.994510, 45.986720, 46.976100,
47.96852, 48.959618005, 49.95423823, 50.948210787, 51.945565464,
52.941290117, 53.940358854, 54.938045141, 55.93890491, 56.938285378,
57.939981549, 58.940440237, 59.942911246, 60.944652638, 61.94842822,
62.95023999, 63.95424909, 64.956336065, 65.961080, 66.964140, 67.969300,
68.972840, 55.934937475, 45.014578, 46.000810, 46.992890, 47.980504,
48.973610, 49.962988982, 50.956819538, 51.948113875, 52.945307942,
53.939610501, 54.938293357, 55.934937475, 56.935393969, 57.933275558,
58.934875464, 59.934071683, 60.936745281, 61.936767442, 62.940369091,
63.941201265, 64.94538027, 65.946780638, 66.950947244, 67.9537, 68.958780,
69.961460, 70.966720, 71.969620, 58.933195048, 47.011490, 48.001760,
48.989720, 49.981540, 50.970720, 51.963590, 52.954218896, 53.948459635,
54.941999029, 55.939839278, 56.936291373, 57.935752814, 58.933195048,
59.933817059, 60.932475763, 61.934050563, 62.933611611, 63.935809908,
64.93647846, 65.939762004, 66.940889529, 67.944873058, 68.94632, 69.951,
70.9529, 71.957810, 72.960240, 73.965380, 74.968330, 57.935342907,
48.019750, 49.009660, 49.995930, 50.987720, 51.975680, 52.968470,
53.957905495, 54.951330251, 55.942132022, 56.939793526, 57.935342907,
58.934346705, 59.930786372, 60.931056033, 61.928345115, 62.929669374,
63.927965959, 64.930084304, 65.929139334, 66.931569414, 67.931868789,
68.935610269, 69.9365, 70.940736283, 71.942092682, 72.946470, 73.948070,
74.952870, 75.955330, 76.960550, 77.963180, 62.929597474, 51.997180,
52.985550, 53.976710, 54.966050, 55.958560, 56.949211078, 57.944538499,
58.939498028, 59.93736503, 60.933457821, 61.932583745, 62.929597474,
63.929764183, 64.927789485, 65.928868813, 66.927730314, 67.929610889,
68.929429269, 69.932392343, 70.932676833, 71.935820307, 72.936675282,
73.939874862, 74.9419, 75.945275026, 76.947850, 77.951960, 78.954560,
79.960870, 63.929142222, 53.992950, 54.983980, 55.972380, 56.964788,
57.954591555, 58.949263764, 59.941827035, 60.939510635, 61.934329764,
62.933211566, 63.929142222, 64.929240984, 65.926033419, 66.927127345,
67.924844154, 68.926550281, 69.925319274, 70.927721599, 71.926857951,
72.929779104, 73.929458609, 74.932936741, 75.93329357, 76.936958967,
77.938440216, 78.942652, 79.944342348, 80.950480, 81.954420, 82.961030,
68.925573587, 55.994910, 56.982930, 57.974250, 58.963370, 59.957060,
60.949446287, 61.944175238, 62.939294196, 63.936838747, 64.932734754,
65.93158901, 66.928201703, 67.927980084, 68.925573587, 69.926021972,
70.924701349, 71.926366268, 72.925174682, 73.926945762, 74.926500246,
75.928827626, 76.9291543, 77.93160818, 78.93289326, 79.936515781,
80.937752355, 81.942990, 82.946980, 83.952650, 84.957000, 85.963120,
73.921177767, 57.991010, 58.981750, 59.970190, 60.963790, 61.954650,
62.949640, 63.941653, 64.939436406, 65.933843453, 66.93273407,
67.92809424, 68.927964533, 69.924247381, 70.924950954, 71.922075815,
72.923458945, 73.921177767, 74.922858948, 75.921402557, 76.923548591,
77.922852739, 78.925400995, 79.925372392, 80.928820467, 81.929549725,
82.934620, 83.937470, 84.943030, 85.946490, 86.952510, 87.956910,
88.963830, 74.921596478, 59.993130, 60.980620, 61.973200, 62.963690,
63.957572, 64.949564, 65.94471, 66.939186071, 67.936769069, 68.932273675,
69.930924826, 70.927112428, 71.926752283, 72.923824844, 73.923928692,
74.921596478, 75.922394021, 76.920647286, 77.921827281, 78.920947934,
79.922533816, 80.922132287, 81.924504067, 82.924980024, 83.929058,
84.932020, 85.936500, 86.939900, 87.944940, 88.949390, 89.955500,
90.960430, 91.966800, 79.916521271, 64.964660, 65.955210, 66.950090,
67.941798, 68.939557817, 69.933390644, 70.932241822, 71.927112352,
72.926765345, 73.922476436, 74.922523368, 75.919213597, 76.919914038,
77.91730909, 78.918499098, 79.916521271, 80.917992474, 81.916699401,
82.919118473, 83.918462354, 84.922245053, 85.924271579, 86.928521358,
87.931423998, 88.936450, 89.939960, 90.945960, 91.949920, 92.956290,
93.960490, 78.918337087, 66.964790, 67.958516, 68.950106, 69.944792,
70.93874, 71.936644572, 72.931691524, 73.929891034, 74.925776207,
75.924541469, 76.921379082, 77.921145706, 78.918337087, 79.918529296,
80.916290563, 81.916804119, 82.915180421, 83.916478974, 84.915608403,
85.918797577, 86.920711324, 87.924065926, 88.926385334, 89.930627737,
90.933968095, 91.939258714, 92.943050, 93.948680, 94.952870, 95.958530,
96.962800, 85.910610729, 68.965180, 69.955259, 70.949625738, 71.942092038,
72.939289195, 73.933084369, 74.930945746, 75.925910078, 76.92467,
77.920364783, 78.920082431, 79.916378965, 80.916592015, 81.9134836,
82.914136099, 83.911506687, 84.912527331, 85.910610729, 86.913354862,
87.914446969, 88.917630581, 89.919516555, 90.923445215, 91.92615621,
92.931274357, 93.934360, 94.939840, 95.943070, 96.948560, 97.951910,
98.957600, 99.961140, 84.911789737, 70.965320, 71.959080, 72.950561,
73.944264751, 74.93857, 75.935072226, 76.930408, 77.928141, 78.92398946,
79.92251925, 80.918995913, 81.918208598, 82.915109701, 83.914384821,
84.911789737, 85.911167419, 86.909180526, 87.911315588, 88.912278016,
89.914801694, 90.916536958, 91.9197289, 92.922041876, 93.926404946,
94.929302889, 95.934272637, 96.937351916, 97.941790668, 98.945379283,
99.949870, 100.953196445, 101.958870, 87.905612124, 72.965970,
73.956310, 74.949949568, 75.941766782, 76.937944782, 77.93218,
78.929708, 79.924521013, 80.923211846, 81.918401639, 82.917556701,
83.913425275, 84.912932803, 85.909260204, 86.908877124, 87.905612124,
88.907450675, 89.907737888, 90.910203095, 91.911037858, 92.914025634,
93.915361312, 94.919358766, 95.921696802, 96.926152923, 97.928452934,
98.933240926, 99.935351911, 100.940517888, 101.943018987, 102.948950,
103.952330, 104.958580, 88.905848295, 75.958450, 76.949645, 77.943610,
78.937351634, 79.93428, 80.929127468, 81.926792451, 82.922354243,
83.920388264, 84.916433039, 85.914885576, 86.91087573, 87.909501146,
88.905848295, 89.907151886, 90.907304791, 91.908949143, 92.909582713,
93.911595245, 94.912820621, 95.915891343, 96.918133995, 97.92220302,
98.924636204, 99.927756586, 100.93031385, 101.933555695, 102.936730,
103.941050, 104.944870, 105.949790, 106.954140, 107.959480,
89.904704416, 77.955230, 78.949160, 79.9404, 80.937210026, 81.931087,
82.928653801, 83.923250, 84.921471182, 85.916473591, 86.914816252,
87.910226904, 88.9088895, 89.904704416, 90.905645767, 91.905040847,
92.906476006, 93.906315192, 94.9080426, 95.908273386, 96.910953109,
97.912734892, 98.916512106, 99.917761889, 100.921140415, 101.922981285,
102.926599606, 103.928780, 104.933050, 105.935910, 106.940750,
107.943960, 108.949240, 109.952870, 92.906378058, 80.949030,
81.943130, 82.936705382, 83.933570, 84.927912447, 85.925038326,
86.920361108, 87.918332163, 88.913418245, 89.911264845,
90.906996243, 91.907193888, 92.906378058, 93.907283888, 94.906835792,
95.908100647, 96.908098556, 97.910328412, 98.911618375, 99.914181619,
100.915252025, 101.918037614, 102.919143842, 103.922464701,
104.923936545, 105.927970, 106.930310, 107.934840, 108.937630,
109.942440, 110.945650, 111.950830, 112.954700, 97.905408169, 82.948740,
83.940090, 84.936550, 85.930695904, 86.927326502, 87.921953241,
88.919480009, 89.913936896, 90.911750194, 91.906810991, 92.90681261,
93.905088269, 94.905842129, 95.904679477, 96.906021465, 97.905408169,
98.90771187, 99.907477336, 100.910347001, 101.91029736, 102.913207142,
103.913763625, 104.91697461, 105.918136802, 106.921692604, 107.923453,
108.927810, 109.929730, 110.934410, 111.936840, 112.941880, 113.944920,
114.950290, 98.906254747, 84.948830, 85.942880, 86.936530, 87.932678,
88.927167, 89.923556564, 90.918427639, 91.915260166, 92.910248984,
93.909657002, 94.907657084, 95.907871383, 96.906365358, 97.907215966,
98.906254747, 99.90765778, 100.907314659, 101.909215019, 102.909181351,
103.911447454, 104.911660566, 105.914357927, 106.915079572, 107.918461226,
108.919982665, 109.923820483, 110.92569283, 111.929146493, 112.931590,
113.935880, 114.938690, 115.943370, 116.946480, 117.951480, 101.904349312,
86.949180, 87.940260, 88.936110, 89.929890, 90.926292, 91.920120,
92.917052034, 93.911359711, 94.910412929, 95.907597835, 96.9075547,
97.905287132, 98.905939302, 99.904219476, 100.905582087, 101.904349312,
102.906323847, 103.905432701, 104.907752866, 105.907329433,
106.909905089, 107.910173465, 108.913203233, 109.914136041, 110.917696,
111.918965, 112.922487194, 113.924281, 114.928686173, 115.930810,
116.935580, 117.937820, 118.942840, 119.945310, 102.905504292,
88.948837, 89.942870, 90.936550, 91.931980, 92.925740, 93.921698,
94.91589874, 95.914460631, 96.911336797, 97.910708158, 98.908132104,
99.90812155, 100.906163625, 101.906843196, 102.905504292, 103.906655518,
104.905693821, 105.907287135, 106.906748423, 107.908728018, 108.908737289,
109.911136411, 110.911585913, 111.914394159, 112.915530627, 113.918806,
114.920334, 115.924062, 116.925980, 117.930070, 118.932110, 119.936410,
120.938720, 121.943210, 105.903485715, 90.949110, 91.940420, 92.935910,
93.928770, 94.924690, 95.918164359, 96.916479073, 97.912720902,
98.911767833, 99.908505886, 100.908289242, 101.905608544, 102.906087307,
103.904035834, 104.90508492, 105.903485715, 106.905133481, 107.903891701,
108.905950451, 109.905153254, 110.907670734, 111.907314058, 112.910152908,
113.910362638, 114.913683824, 115.914158662, 116.917841338, 117.9189843,
118.923110, 119.924691878, 120.928870, 121.930550, 122.934930, 123.936880,
106.90509682, 92.949780, 93.942780, 94.935480, 95.930680, 96.923972412,
97.921566201, 98.917597178, 99.916104255, 100.912802233, 101.911685,
102.90897272, 103.908629157, 104.906528661, 105.906668921, 106.90509682,
107.905955556, 108.904752292, 109.906107231, 110.905291157, 111.907004814,
112.906566579, 113.908803704, 114.908762698, 115.911359933, 116.911684562,
117.914582768, 118.915665059, 119.918787384, 120.919848046, 121.923530,
122.924900, 123.928640, 124.930430, 125.934500, 126.936770, 127.941170,
128.943690, 129.950448, 113.90335854, 94.949870, 95.939770, 96.934940,
97.927395546, 98.925010, 99.920289525, 100.918681538, 101.914462258,
102.913419246, 103.909849475, 104.909467905, 105.90645941, 106.906617928,
107.904183683, 108.904982293, 109.90300207, 110.904178107, 111.902757809,
112.904401662, 113.90335854, 114.905430969, 115.904755809, 116.907218618,
117.90691453, 118.909921597, 119.909850129, 120.912977363, 121.913332432,
122.917002999, 123.917647616, 124.92124637, 125.922353321, 126.926443864,
127.927762285, 128.932150, 129.933901937, 130.940670, 131.945550,
114.903878484, 96.949540, 97.942140, 98.934220, 99.931110851,
100.926340, 101.924090238, 102.919914188, 103.918296171, 104.91467354,
105.913465411, 106.9102951, 107.90969818, 108.907150507, 109.907165274,
110.905103278, 111.905532331, 112.904057761, 113.904913876,
114.903878484, 115.905259703, 116.904513564, 117.906354367, 118.90584535,
119.907959608, 120.907845822, 121.91027601, 122.910438276, 123.913175231,
124.913600588, 125.916463857, 126.917353091, 127.920172328, 128.92169698,
129.924970049, 130.926851767, 131.93299026, 132.937810, 133.944150,
134.949330, 119.902194676, 98.949330, 99.939044343, 100.936060,
101.930295324, 102.928100, 103.923143223, 104.921349437, 105.91688062,
106.915644329, 107.911925378, 108.911283214, 109.907842791, 110.90773446,
111.904818207, 112.905170577, 113.902778869, 114.903342397, 115.90174053,
116.902951656, 117.901603167, 118.90330763, 119.902194676, 120.90423548,
121.903439046, 122.905720838, 123.905273946, 124.907784125, 125.90765328,
126.910360024, 127.910536624, 128.913479, 129.913967295, 130.916999769,
131.917815713, 132.923829249, 133.928291765, 134.934730, 135.939340,
136.945990, 120.903815686, 102.939690, 103.936472, 104.931486348,
105.928791, 106.924150, 107.922160, 108.918132426, 109.916753, 110.913163,
111.912398009, 112.909371672, 113.909269, 114.906598, 115.906793629,
116.904835941, 117.905528731, 118.903942009, 119.905072427, 120.903815686,
121.905173651, 122.90421397, 123.905935743, 124.905253818, 125.90724748,
126.906923609, 127.909169001, 128.909148442, 129.911656324, 130.911982275,
131.914466896, 132.91525163, 133.920379744, 134.925165771, 135.930350,
136.935310, 137.940790, 138.945980, 129.906224399, 104.943640,
105.937504237, 106.935006, 107.929444597, 108.927415515, 109.922407316,
110.921110692, 111.917013672, 112.915891, 113.912089, 114.911902,
115.90846, 116.908644719, 117.905827581, 118.906403645, 119.904020222,
120.904936424, 121.903043898, 122.904270029, 123.902817896, 124.904430731,
125.903311696, 126.905226336, 127.904463056, 128.906598238, 129.906224399,
130.908523864, 131.90855316, 132.910955306, 133.911368737, 134.916448592,
135.920101246, 136.925322954, 137.929220, 138.934730, 139.938850,
140.944650, 141.949080, 126.904472681, 107.943475, 108.938149417,
109.935242, 110.930276, 111.927970, 112.923640583, 113.921850, 114.918048,
115.916808633, 116.91365, 117.913074, 118.910074, 119.910048173,
120.907366811, 121.907589284, 122.905588965, 123.906209852, 124.904630164,
125.905624153, 126.904472681, 127.905809443, 128.904987722, 129.906674247,
130.906124609, 131.907997381, 132.907796939, 133.909744465, 134.910048121,
135.914653993, 136.91787084, 137.922349591, 138.926099478, 139.931000,
140.935030, 141.940180, 142.944560, 143.949990, 131.904153457, 109.944278068,
110.941602, 111.935623112, 112.933341174, 113.927980306, 114.92629392,
115.921581087, 116.920358735, 117.916178655, 118.915410688, 119.911784244,
120.911461829, 121.908367632, 122.90848191, 123.905893003, 124.906395464,
125.904273634, 126.905183723, 127.903531275, 128.904779435, 129.903508007,
130.905082362, 131.904153457, 132.905910722, 133.905394464, 134.907227495,
135.907218794, 136.911562125, 137.913954475, 138.918792936, 139.921640943,
140.926648049, 141.92970959, 142.935110, 143.938510, 144.944070, 145.947750,
146.953560, 132.905451932, 111.950301, 112.944493274, 113.941450, 114.935910,
115.933367, 116.928670701, 117.926559494, 118.922377304, 119.920677253,
120.917229209, 121.916113434, 122.912996036, 123.912257798, 124.90972827,
125.909451977, 126.907417525, 127.907748866, 128.906064426, 129.906708552,
130.905463926, 131.90643426, 132.905451932, 133.906718475, 134.905977008,
135.907311576, 136.907089473, 137.911016704, 138.913363999, 139.917282354,
140.920045752, 141.924298927, 142.92735175, 143.932076914, 144.93552617,
145.940289423, 146.944155008, 147.949218153, 148.952930, 149.958170,
150.962190, 137.905247237, 113.950675405, 114.947370, 115.941380,
116.938499, 117.933040, 118.930659661, 119.926044974, 120.924054499,
121.919904, 122.918781036, 123.915093603, 124.914472912, 125.911250177,
126.911093797, 127.908317698, 128.908679439, 129.906320811, 130.906941118,
131.905061288, 132.90600749, 133.904508383, 134.905688591, 135.904575945,
136.905827384, 137.905247237, 138.908841341, 139.910604505, 140.914411009,
141.91645341, 142.920626719, 143.922952853, 144.927627032, 145.930219572,
146.934945, 147.937720047, 148.942580, 149.945680, 150.950810, 151.954270,
152.959610, 138.906353267, 116.950068, 117.946730, 118.940990, 119.938070,
120.933010, 121.930710, 122.926240, 123.924574275, 124.920816034,
125.919512667, 126.916375448, 127.915585177, 128.912692815, 129.912368724,
130.91007, 131.910101145, 132.908218, 133.908514011, 134.906976844,
135.907635536, 136.906493598, 137.90711193, 138.906353267, 139.909477645,
140.910962152, 141.91407913, 142.91606272, 143.919599647, 144.921645401,
145.92579346, 146.928235284, 147.932228868, 148.934734, 149.938770,
150.941720, 151.946250, 152.949620, 153.954500, 154.958350, 139.905438706,
118.952760, 119.946640, 120.943420, 121.937910, 122.935400, 123.930410,
124.928440, 125.923971, 126.922731, 127.918911, 128.918102, 129.914736,
130.914422, 131.911460487, 132.91151502, 133.908924821, 134.909151396,
135.907172422, 136.907805577, 137.905991321, 138.906652651, 139.905438706,
140.90827627, 141.909244205, 142.91238591, 143.913647336, 144.917233135,
145.918759009, 146.922673954, 147.92443241, 148.928399883, 149.930408931,
150.933976196, 151.936540, 152.940580, 153.943420, 154.948040, 155.951260,
156.956340, 140.907652769, 120.955364, 121.951810, 122.945960, 123.942960,
124.937830, 125.935310, 126.930830, 127.928791, 128.925095, 129.92359,
130.920259, 131.919255, 132.916330532, 133.915711737, 134.913111745,
135.912691611, 136.910705455, 137.910754636, 138.908938399, 139.909075874,
140.907652769, 141.910044806, 142.910816926, 143.913305245, 144.9145117,
145.917644336, 146.918995992, 147.922135026, 148.923717651, 149.926672997,
150.928318618, 151.931499225, 152.933838905, 153.937518153, 154.940120,
155.944270, 156.947430, 157.951980, 158.955500, 141.907723297, 123.952230,
124.948880, 125.943220, 126.940500, 127.935390, 128.933188, 129.928506,
130.927247, 131.923321237, 132.922348, 133.918790181, 134.91818116,
135.914976035, 136.914567137, 137.911949961, 138.911978288, 139.909552,
140.909609854, 141.907723297, 142.90981429, 143.910087274, 144.912573636,
145.913116939, 146.916100441, 147.916893288, 148.920148842, 149.920890888,
150.923828929, 151.924682219, 152.927698232, 153.929477307, 154.932932,
155.935018114, 156.939030, 157.941600, 158.946090, 159.949090, 160.953880,
144.912749023, 125.957520, 126.951630, 127.948420, 128.943160, 129.940450,
130.935870, 131.933750, 132.929782, 133.928353, 134.924876, 135.923565829,
136.920479493, 137.919548281, 138.916804082, 139.916041789, 140.913555054,
141.912874471, 142.910932616, 143.912590843, 144.912749023, 145.914696305,
146.915138545, 147.917474618, 148.918334155, 149.920983561, 150.921206973,
151.923496795, 152.924116889, 153.926463943, 154.928101267, 155.931056736,
156.933039369, 157.936561407, 158.938970, 159.942990, 160.945860,
161.950290, 162.953680, 151.919732425, 127.958080, 128.954640, 129.948920,
130.946110, 131.940690, 132.938670, 133.933970, 134.93252, 135.928275527,
136.926971746, 137.923243961, 138.922296605, 139.918994687, 140.918476488,
141.915197641, 142.914628338, 143.911999478, 144.913410353, 145.9130409,
146.914897923, 147.914822674, 148.917184735, 149.917275539, 150.919932409,
151.919732425, 152.922097356, 153.922209273, 154.924640161, 155.925527887,
156.928358717, 157.929991317, 158.933211271, 159.935140, 160.938830,
161.941220, 162.945360, 163.948280, 164.952980, 152.921230339, 129.963569,
130.957753, 131.954370, 132.949240, 133.946510, 134.941820, 135.939600,
136.935570, 137.933709, 138.92979228, 139.928087607, 140.92493072,
141.923434945, 142.920297509, 143.918816823, 144.916265237, 145.917205817,
146.916746111, 147.918085895, 148.917931238, 149.919701819, 150.919850161,
151.921744534, 152.921230339, 153.922979237, 154.92289326, 155.924752249,
156.925423647, 157.927845302, 158.929088861, 159.931971, 160.933680,
161.937040, 162.939210, 163.942990, 164.945720, 165.949970, 166.953210,
157.924103912, 133.955370, 134.952570, 135.947340, 136.945020, 137.940120,
138.938240, 139.933674, 140.932126, 141.928116, 142.92674951, 143.922963,
144.921709252, 145.918310608, 146.91909442, 147.918114524, 148.919340915,
149.918658876, 150.920348482, 151.919790996, 152.921749543, 153.920865598,
154.922622022, 155.922122743, 156.923960135, 157.924103912, 158.926388658,
159.927054146, 160.929669211, 161.930984751, 162.933990, 163.935860,
164.939380, 165.941600, 166.945570, 167.948360, 168.952870, 158.925346757,
135.961380, 136.955980, 137.953160, 138.948290, 139.945805049, 140.941448,
141.938744, 142.935121, 143.933045, 144.929274, 145.927246584, 146.924044585,
147.924271701, 148.923245909, 149.923659686, 150.923102543, 151.924074438,
152.923434588, 153.924678019, 154.923505236, 155.924747213, 156.924024604,
157.925413137, 158.925346757, 159.927167606, 160.927569919, 161.929488234,
162.930647536, 163.933350838, 164.934880, 165.937991959, 166.940050,
167.943640, 168.946220, 169.950250, 170.953300, 163.929174751, 137.962490,
138.959540, 139.954010, 140.951350, 141.946366, 142.943830, 143.939254,
144.937425, 145.932845369, 146.9310915, 147.927149831, 148.927304787,
149.925585184, 150.926184601, 151.9247183, 152.92576467, 153.924424457,
154.925753775, 155.92428311, 156.925466095, 157.924409487, 158.925739214,
159.925197517, 160.926933364, 161.926798447, 162.928731159, 163.929174751,
164.931703333, 165.932806741, 166.935655462, 167.937128769, 168.940307614,
169.942390, 170.946200, 171.948760, 172.953000, 164.93032207, 139.968539,
140.963098, 141.959770, 142.954610, 143.951480, 144.947200, 145.944640,
146.940056, 147.937718, 148.933774771, 149.933496182, 150.931688142,
151.931713714, 152.930198789, 153.930601579, 154.929103491, 155.929839,
156.928256188, 157.928941007, 158.927711959, 159.928729478, 160.927854776,
161.929095504, 162.928733903, 163.930233507, 164.93032207, 165.932284162,
166.933132633, 167.935515708, 168.936872273, 169.939618929, 170.94146515,
171.944820, 172.947290, 173.951150, 174.954050, 165.930293061, 142.966340,
143.960380, 144.957390, 145.952000, 146.949490, 147.944550, 148.942306,
149.937913839, 150.937448903, 151.935050389, 152.935063492, 153.932783081,
154.933208949, 155.931064698, 156.931916, 157.929893474, 158.930684066,
159.929083292, 160.929995309, 161.928778264, 162.930032749, 163.929200229,
164.930726003, 165.930293061, 166.932048159, 167.932370224, 168.934590364,
169.935464312, 170.938029808, 171.939356113, 172.942400, 173.944230,
174.947770, 175.950080, 176.954050, 168.93421325, 144.970073, 145.966425,
146.960961, 147.957840, 148.952720, 149.949960, 150.94548349, 151.944422,
152.942012112, 153.941567808, 154.939199459, 155.938979933, 156.936973,
157.936979525, 158.934975, 159.935262801, 160.933549, 161.933994682,
162.932651124, 163.93356, 164.932435492, 165.933554131, 166.932851622,
167.934172776, 168.93421325, 169.935801397, 170.93642944, 171.938400044,
172.939603607, 173.942168605, 174.943836853, 175.946994685, 176.949040,
177.952640, 178.955340, 173.938862089, 147.967420, 148.964040, 149.958420,
150.955400769, 151.950288919, 152.949480, 153.946393928, 154.945782332,
155.942818215, 156.942627848, 157.939865617, 158.940050099, 159.937552344,
160.937901678, 161.93576821, 162.936334305, 163.934489416, 164.935279,
165.933882042, 166.934949605, 167.933896895, 168.935189802, 169.934761837,
170.936325799, 171.936381469, 172.938210787, 173.938862089, 174.94127645,
175.942571683, 176.945260822, 177.94664668, 178.950170, 179.952330,
180.956150, 174.940771819, 149.973228, 150.967577, 151.964120,
152.958767331, 153.957522, 154.954316216, 155.953032523, 156.9500983,
157.949313283, 158.946628776, 159.946033, 160.943572, 161.943277288,
162.941179, 163.941339, 164.939406724, 165.939859, 166.93827,
167.938739111, 168.937651439, 169.938474968, 170.937913136, 171.939085669,
172.938930602, 173.94033748, 174.940771819, 175.94268631, 176.943758055,
177.945954559, 178.947327443, 179.94988116, 180.951970, 181.955040,
182.957570, 183.960910, 179.946549953, 152.970690, 153.964860, 154.963390,
155.959364025, 156.958396, 157.954799366, 158.95399487, 159.950684379,
160.950274844, 161.947210498, 162.947089, 163.944367284, 164.944567,
165.94218, 166.9426, 167.940568, 168.941259, 169.939609, 170.940492,
171.939448301, 172.940513, 173.940046178, 174.941509181, 175.941408631,
176.943220651, 177.943698766, 178.945816145, 179.946549953, 180.949101246,
181.950554096, 182.953530439, 183.955446515, 184.958820, 185.960890,
186.964590, 187.966850, 180.947995763, 154.974592, 155.972303,
156.968192445, 157.966699, 158.963018173, 159.961486056, 160.958417,
161.957291859, 162.954330271, 163.953534, 164.950772514, 165.950512,
166.948093, 167.948047, 168.946011, 169.946175, 170.944476, 171.944895,
172.94375, 173.944454, 174.943737, 175.944857, 176.944472403,
177.945778221, 178.945929535, 179.947464831, 180.947995763, 181.950151849,
182.951372616, 183.954007966, 184.955559375, 185.958552023, 186.960530,
187.963700, 188.965830, 189.969230, 183.950931188, 157.974562, 158.972918,
159.968478805, 160.967357, 161.963497417, 162.962523542, 163.958954382,
164.958279949, 165.955027253, 166.954816014, 167.951808394, 168.95177879,
169.949228482, 170.949451, 171.947292, 172.947689, 173.946079, 174.946717,
175.945634, 176.946643, 177.945876236, 178.947070447, 179.946704459,
180.948197248, 181.948204156, 182.950222951, 183.950931188, 184.953419264,
185.954364127, 186.957160466, 187.958489105, 188.961912868, 189.963181378,
190.966600, 191.968170, 186.955753109, 159.982115, 160.977589119,
161.976002, 162.972080535, 163.970323, 164.967088557, 165.965808,
166.962601, 167.961572608, 168.958791096, 169.958220071, 170.955716,
171.955422961, 172.953243, 173.953115, 174.951381, 175.951623, 176.950328,
177.950989, 178.949987641, 179.950789084, 180.950067916, 181.95121008,
182.950819841, 183.952520756, 184.952954982, 185.954986084, 186.955753109,
187.958114438, 188.959229007, 189.961817977, 190.963125242, 191.965960,
192.967470, 193.970420, 191.96148069, 161.984431, 162.982690,
163.978035649, 164.976762, 165.972690753, 166.971547969, 167.967803678,
168.96701927, 169.963577028, 170.963184819, 171.960023303, 172.959808409,
173.957062202, 174.956945835, 175.954806, 176.954965324, 177.953251241,
178.953816017, 179.952378803, 180.953244, 181.952110186, 182.953126102,
183.952489071, 184.954042265, 185.953838158, 186.955750458, 187.955838228,
188.95814747, 189.958447048, 190.960929718, 191.96148069, 192.964151563,
193.965182083, 194.968126661, 195.969639333, 192.96292643, 163.992201,
164.987520, 165.985824, 166.981665156, 167.979881, 168.976294942, 169.974965,
170.971626042, 171.970456, 172.967501739, 173.966861045, 174.964112895,
175.963648688, 176.9613015, 177.961082, 178.959122266, 179.959229446,
180.957625297, 181.958076296, 182.956846458, 183.957476, 184.956698,
185.957946104, 186.957363361, 187.958853121, 188.958718935, 189.960545968,
190.960594046, 191.962605012, 192.96292643, 193.965078378, 194.965979573,
195.968396542, 196.969653285, 197.972280, 198.973804583, 194.964791134,
165.994855, 166.992979, 167.988150742, 168.986715, 169.982495289,
170.981244542, 171.977347128, 172.976444754, 173.972818767, 174.972420552,
175.968944622, 176.968469481, 177.965648724, 178.965363404, 179.963031477,
180.963097285, 181.961170656, 182.961596703, 183.959922251, 184.960619,
185.959350813, 186.960587, 187.959395391, 188.960833686, 189.959931655,
190.961676661, 191.961038005, 192.962987401, 193.962680253, 194.964791134,
195.964951521, 196.967340182, 197.96789279, 198.970593094, 199.971440677,
200.974512868, 201.975740, 196.966568662, 168.998080, 169.996122,
170.991878881, 171.990035, 172.98623738, 173.984761, 174.981274107,
175.980099, 176.976864908, 177.97603192, 178.973212812, 179.972521124,
180.970079048, 181.969617874, 182.967593034, 183.967451524, 184.965789411,
185.965952703, 186.964567541, 187.965323661, 188.963948286, 189.964700339,
190.963704225, 191.964812953, 192.964149715, 193.96536525, 194.96503464,
195.966569813, 196.966568662, 197.968242303, 198.968765193, 199.970725647,
200.97165724, 201.973805838, 202.975154542, 203.977724, 204.979870,
201.970643011, 171.003760, 171.998832686, 172.997242, 173.992863695,
174.99142327, 175.98735458, 176.986279158, 177.982483143, 178.981833861,
179.978266394, 180.977819311, 181.974689964, 182.974449841, 183.971713051,
184.971899086, 185.96936179, 186.969814236, 187.967577049, 188.968190034,
189.966322449, 190.967157105, 191.965634327, 192.966665421, 193.965439409,
194.966720113, 195.965832649, 196.967212908, 197.966769032, 198.968279932,
199.968326004, 200.970302268, 201.970643011, 202.972872484, 203.973493933,
204.976073386, 205.977514066, 206.982588545, 207.985940, 208.991040,
209.994510, 204.974427541, 176.000590, 176.996427286, 177.994897,
178.991089082, 179.989906, 180.986257447, 181.985667104, 182.982192802,
183.981873122, 184.978791305, 185.978325, 186.975905897, 187.976009782,
188.973588428, 189.973877149, 190.971786154, 191.972225, 192.970672,
193.9712, 194.969774335, 195.970481151, 196.969574511, 197.970483495,
198.969877, 199.970962672, 200.970818891, 201.972105808, 202.97234422,
203.973863522, 204.974427541, 205.97611032, 206.977419429, 207.9820187,
208.985358952, 209.990073689, 210.993477, 211.998228, 207.976652071,
178.003830191, 179.002150, 179.997918173, 180.996623958, 181.992671842,
182.991874629, 183.988142339, 184.987609944, 185.984238945, 186.98391837,
187.980874338, 188.980807, 189.978081517, 190.978265, 191.975785171,
192.976173234, 193.97401207, 194.97454205, 195.972774109, 196.973431124,
197.972033959, 198.97291665, 199.971826675, 200.972884511, 201.972159133,
202.973390521, 203.973043589, 204.974481755, 205.974465278, 206.975896887,
207.976652071, 208.98109012, 209.984188527, 210.988736964, 211.991897543,
212.996581499, 213.999805408, 215.004807, 208.980398734, 184.001124,
184.997625, 185.996597625, 186.993157835, 187.992265154, 188.989199012,
189.988295129, 190.985786119, 191.985457954, 192.982959771, 193.98283396,
194.980650737, 195.980666509, 196.978864454, 197.979206, 198.977671961,
199.978131829, 200.977009036, 201.977742324, 202.976876001, 203.977812736,
204.977389366, 205.97849913, 206.978470679, 207.979742196, 208.980398734,
209.984120371, 210.98726946, 211.991285724, 212.994384666, 213.998711539,
215.001769776, 216.006305943, 217.009470, 218.014316, 208.982430435,
187.999422048, 188.998480562, 189.995101185, 190.994574485, 191.991335149,
192.991025275, 193.988185606, 194.988110728, 195.98553458, 196.98565963,
197.983388616, 198.983666063, 199.981798604, 200.982259764, 201.980757541,
202.981420103, 203.980318121, 204.981203322, 205.980481099, 206.981593173,
207.981245702, 208.982430435, 209.982873673, 210.986653154, 211.988867969,
212.99285728, 213.99520135, 214.999419988, 216.001915035, 217.006334796,
218.008973037, 219.013744, 220.016602, 210.987496271, 192.999843112,
193.998725085, 194.996268098, 195.995788077, 196.993189215, 197.992837202,
198.990532254, 199.990351264, 200.988416999, 201.988630236, 202.986941984,
203.987251326, 204.986074483, 205.986667036, 206.985783502, 207.986589977,
208.986173143, 209.98714771, 210.987496271, 211.990744771, 212.992936646,
213.996371733, 214.99865257, 216.002423257, 217.004718822, 218.008694336,
219.011161691, 220.015407682, 221.018050, 222.022330, 223.025190,
222.017577738, 195.005437696, 196.002115223, 197.001584351, 197.998678663,
198.998370297, 199.9956993, 200.995628335, 201.993263492, 202.993386687,
203.99142874, 204.991718799, 205.990214104, 206.990734225, 207.98964247,
208.990414742, 209.989696216, 210.990600523, 211.990703529, 212.993882668,
213.995362554, 214.998745483, 216.00027437, 217.003927675, 218.005601256,
219.009480204, 220.011393981, 221.015536782, 222.017577738, 223.021790,
224.024090, 225.028440, 226.030890, 227.035407, 228.037986, 222.01755173,
199.007258147, 200.00657249, 201.003860867, 202.003372847, 203.000924647,
204.000653204, 204.99859396, 205.998666066, 206.996949414, 207.997138783,
208.995953555, 209.996407738, 210.995536544, 211.996202244, 212.996189081,
213.998971145, 215.000341497, 216.00319799, 217.004631951, 218.007578322,
219.009252149, 220.012327405, 221.014254762, 222.01755173, 223.019735857,
224.023249951, 225.025565414, 226.029386231, 227.031835938, 228.035729,
229.038450228, 230.042510, 231.045440, 232.049772, 228.031070292,
202.009890686, 203.009271619, 204.006499668, 205.00626857, 206.00382727,
207.003798105, 208.00183994, 209.001991373, 210.000494978, 211.000897987,
211.999794499, 213.000383959, 214.000107894, 215.002719834, 216.003533035,
217.006320327, 218.00714023, 219.010085078, 220.011028384, 221.013917338,
222.01537453, 223.018502171, 224.020211821, 225.023611564, 226.025409823,
227.029177842, 228.031070292, 229.034957577, 230.037056394, 231.041220,
232.043638, 233.048060, 234.050704, 227.027752127, 206.01450498,
207.011949748, 208.011551551, 209.009494863, 210.009435986, 211.007734835,
212.007813822, 213.006607643, 214.006901798, 215.006453625, 216.008720075,
217.009346914, 218.011641453, 219.012420389, 220.014762979, 221.015591248,
222.017843851, 223.019137468, 224.021722866, 225.023229585, 226.026098089,
227.027752127, 228.031021112, 229.033015243, 230.036294178, 231.038558786,
232.042027438, 233.044550, 234.048420, 235.051232, 236.055296,
232.038055325, 209.017715682, 210.015075342, 211.014928413, 212.012980288,
213.01301014, 214.01149977, 215.01173033, 216.011062115, 217.013114328,
218.013284499, 219.015536895, 220.015747762, 221.018183674, 222.018468121,
223.020811448, 224.021466895, 225.023951021, 226.024903069, 227.02770407,
228.028741127, 229.03176243, 230.033133843, 231.036304343, 232.038055325,
233.041581843, 234.04360123, 235.047510074, 236.049870, 237.053894,
238.056496, 231.03588399, 212.023204138, 213.02110934, 214.020918417,
215.019185865, 216.019109564, 217.018323986, 218.020041889, 219.019883143,
220.021875303, 221.021877983, 222.023742, 223.023962273, 224.025625738,
225.026130678, 226.027947753, 227.028805072, 228.031051376, 229.032096793,
230.034540754, 231.03588399, 232.038591592, 233.040247277, 234.043308058,
235.045443615, 236.048681284, 237.051145659, 238.05450271, 239.057260,
240.060980, 238.050788247, 217.024368791, 218.023535671, 219.02491916,
220.024723, 221.026399, 222.026086, 223.0277386, 224.027604778,
225.029390717, 226.029338702, 227.031156367, 228.031374006, 229.033505939,
230.033939784, 231.036293704, 232.037156152, 233.039635207, 234.040952088,
235.043929918, 236.045568006, 237.048730184, 238.050788247, 239.054293299,
240.056591988, 241.060330, 242.062931, 237.048173444, 225.033913933,
226.035145, 227.034956789, 228.036180, 229.036263808, 230.037827597,
231.038245085, 232.040108, 233.040740546, 234.042895038, 235.044063267,
236.0465696, 237.048173444, 238.050946405, 239.052939025, 240.056162182,
241.058252431, 242.06164118, 243.064279, 244.067850, 242.058742611,
228.038742328, 229.040150212, 230.039649886, 231.041101107, 232.041187097,
233.042997375, 234.043317076, 235.04528605, 236.046057964, 237.048409658,
238.049559894, 239.052163381, 240.053813545, 241.056851456, 242.058742611,
243.062003092, 244.064203907, 245.067747154, 246.070204627, 247.074070,
243.06138108, 231.045560, 232.046590, 233.046348, 234.047809, 235.047946,
236.049579, 237.049996, 238.051984324, 239.053024479, 240.055300179,
241.056829144, 242.059549159, 243.06138108, 244.064284847, 245.066452114,
246.069774619, 247.072093, 248.075752, 249.078480, 247.07035354,
233.050771232, 234.050159841, 235.051434, 236.051413, 237.052901,
238.053028697, 239.054957, 240.055529539, 241.057653001, 242.058835824,
243.061389114, 244.062752578, 245.065491249, 246.067223662, 247.07035354,
248.072348508, 249.075953413, 250.078356959, 251.082284605, 252.084870,
247.07030708, 235.056580, 236.057330, 237.057003, 238.058281, 239.058279,
240.059759, 241.060230, 242.061981, 243.063007572, 244.065180774,
245.066361616, 246.068672947, 247.07030708, 248.073086, 249.074986657,
250.07831652, 251.080760172, 252.084310, 253.086880, 254.090600,
251.079586788, 237.062070, 238.061410, 239.062422, 240.062302, 241.063726,
242.063701552, 243.065427, 244.066000689, 245.068048612, 246.068805309,
247.071000589, 248.072184861, 249.074853537, 250.076406066, 251.079586788,
252.081625846, 253.085133145, 254.087322909, 255.091046, 256.093440,
252.082978512, 240.068920, 241.068538, 242.069745, 243.069548, 244.070883,
245.071324, 246.072896, 247.073656, 248.075471, 249.076411, 250.078612,
251.079992142, 252.082978512, 253.084824697, 254.088022021, 255.090273122,
256.093598, 257.095979, 258.099520, 257.095104724, 242.073430, 243.074353,
244.074084, 245.075385, 246.075299023, 247.076847, 248.077194714,
249.079034, 250.079521264, 251.081575017, 252.082466855, 253.085185236,
254.08685422, 255.089962202, 256.091773117, 257.095104724, 258.097076,
259.100595, 260.102678, 258.098431319, 245.080829, 246.081886, 247.081635,
248.082823, 249.083013, 250.084420, 251.084839, 252.086560, 253.087280,
254.089656, 255.091082705, 256.094059025, 257.095541368, 258.098431319,
259.100509, 260.103652, 261.105721, 262.108865, 255.093241131, 248.086596,
249.087833, 250.087510, 251.089012, 252.088976521, 253.090678,
254.090955253, 255.093241131, 256.094282666, 257.09687719, 258.098207,
259.101031, 260.102643, 261.105749, 262.107301, 263.110552, 264.112345,
260.105504, 251.094360, 252.095371, 253.095210, 254.096454, 255.096681,
256.098629, 257.099555, 258.101814, 259.102901, 260.105504, 261.106883,
262.109634, 263.111293, 264.114038, 265.115839, 266.119305, 263.112547,
253.100689, 254.100184, 255.101340, 256.101166194, 257.102990,
258.103489, 259.105637, 260.106440, 261.108766556, 262.109925, 263.112547,
264.113985, 265.116704, 266.117956, 267.121529, 268.123644, 255.107398,
255.107398, 256.108127, 257.107722, 258.109231, 259.109610, 260.111300,
261.112056, 262.114084, 263.114988, 264.117404, 265.118601, 266.121029,
267.122377, 268.125445, 269.127460, 270.130712, 259.114500, 258.113168,
259.114500, 260.114422071, 261.116117, 262.116398, 263.118322, 264.118931,
265.121114693, 266.122065, 267.124425, 268.125606, 269.128755, 270.130329,
271.133472, 272.135158, 273.138220, 262.122892, 260.121970, 261.121664,
262.122892, 263.123035, 264.124604, 265.125147, 266.126942, 267.127650,
268.129755, 269.130694, 270.133616, 271.135179, 272.138032, 273.139618,
274.142440, 275.144250, 263.128558, 263.128558, 264.128394885, 265.130085,
266.130097, 267.131789, 268.132162, 269.134056, 270.134650, 271.137657,
272.139052, 273.141986, 274.143131, 275.145952, 276.147208, 277.149841,
265.136151, 265.136151, 266.137299, 267.137307, 268.138728, 269.139055,
270.140657, 271.141139, 272.143738, 273.144913, 274.147492, 275.148647,
276.151156, 277.152420, 278.154812, 279.156193, 281.162061, 267.144341,
268.143795, 269.145124, 270.144720, 271.146062, 272.146317, 273.148863,
274.149492, 275.152176, 276.153034, 277.155647, 278.156469, 279.158861,
280.159795, 281.162061, 272.153615, 272.153615, 273.153682, 274.155713,
275.156142, 276.158493, 277.159519, 278.161604, 279.162468, 280.164473,
281.165372, 282.167486, 283.168415, 283.171792, 277.163943, 278.164312,
279.166546, 280.167039, 281.169286, 282.169765, 283.171792, 284.172384,
285.174105, 283.176451, 283.176451, 284.178080, 285.178732, 286.180481,
287.181045, 285.183698, 285.183698, 286.183855, 287.185599, 288.185689,
289.187279, 287.191186, 287.191186, 288.192492, 289.192715, 290.194141,
291.194384, 292.199786, 289.198862, 290.198590, 291.200011, 292.199786,
291.206564, 291.206564, 292.207549, 293.214670, 293.214670]
el2mass = dict(zip(_temp_symbol, _temp_mass))
el2mass["GH"] = 0. # note that ghost atoms in Cfour have mass 100.
eliso2mass = dict(zip(_temp_iso_symbol, _temp_iso_mass)) # encompasses el2mass
eliso2mass["GH"] = 0. # note that ghost atoms in Cfour have mass 100. # encompasses el2mass
#eliso2mass["X0"] = 0. # probably needed, just checking
el2z = dict(zip(_temp_symbol, _temp_z))
el2z["GH"] = 0
z2mass = dict(zip(_temp_z, _temp_mass))
z2el = dict(zip(_temp_z, _temp_symbol))
z2element = dict(zip(_temp_z, _temp_element))
|
andysim/psi4
|
psi4/driver/qcdb/periodictable.py
|
Python
|
gpl-2.0
| 78,238
|
[
"CFOUR",
"Psi4"
] |
1e01e0d48fd3eea5fdde0ab748f30282a37bd18d7bb51d6ac5f9d136973e32ae
|
import dryscrape
import sqlalchemy
import time
import re
import os
import numpy as np
import pandas as pd
project_dir = '/Users/mdelhey/rice-scrape/'
YEAR_SCRAPE = '2013'
TERM_SCRAPE = 'Spring'
dbuser = 'mdelhey'
dbname = 'ricedb'
dbhost = 'localhost'
tbl_out = 't_courses_raw2'
tbl_action = 'replace' # replace / append / fail
f_out = 'data/courses_tmp.csv'
# Boilerplate
os.chdir(project_dir)
from helpers import try_row_scrape
try: __file__
except: __file__ = 'repl'
# Create pandas df
data_cols = ['courseid', 'yearterm', 'year', 'term', 'crn', 'course', 'title',
'faculty', 'meeting', 'credits', 'enrolled', 'raw']
data = pd.DataFrame(None, columns=data_cols)
# set up a web scraping session
sess = dryscrape.Session(base_url = 'http://courses.rice.edu/')
# we don't need images
sess.set_attribute('auto_load_images', False)
# visit courses.rice.edu
sess.visit('/')
# visit full course page
print '[%s] Visiting courses.rice.edu (Year: %s, Term: %s)' % (__file__, YEAR_SCRAPE, TERM_SCRAPE)
if TERM_SCRAPE == 'Fall':
p_term = str(int(YEAR_SCRAPE) + 1) + '10'
if TERM_SCRAPE == 'Spring':
p_term = str(int(YEAR_SCRAPE)) + '20'
if TERM_SCRAPE == 'Summer':
p_term = str(int(YEAR_SCRAPE)) + '30'
sess.visit('/admweb/!SWKSCAT.cat?p_action=QUERY&p_term=%s&p_name=STATIC' % p_term)
#sess.render('tmp.png')
# get a list of all crn's
print "[%s] Getting all CRN's" % (__file__)
classes = []
for c in sess.xpath('//*[@id="container"]/div[3]/table/tbody/tr[*]/td[1]/a'):
classes.append(c.text())
classes = classes[0:100]
# time scrape
start_time = time.time()
# Loop through all
print '[%s] Scraping %i classes' % (__file__, len(classes))
for idx,c in enumerate(classes):
if ((idx % 50) == 0): print '[%s] ... Class %i' % (__file__, idx)
# get link, navigate to it
link = '/admweb/!SWKSCAT.cat?p_action=COURSE&p_term=%s&p_crn=%s' % (p_term, c)
sess.visit(link)
# grab data: term, course, enrolled, instructors, etc.
row = { i: None for i in data.columns }
row['yearterm'] = YEAR_SCRAPE + ' ' + TERM_SCRAPE
row['term'] = TERM_SCRAPE
row['year'] = YEAR_SCRAPE
row['crn'] = c
row['courseid'] = row['yearterm'] + '_' + str(row['crn'])
row['course'] = try_row_scrape('//*[@id="container"]/div[3]/div/table/tbody/tr[1]/td[2]', sess)
row['title'] = try_row_scrape('//*[@id="container"]/div[3]/div/table/tbody/tr[1]/td[3]', sess)
row['faculty'] = try_row_scrape('//*[@id="container"]/div[3]/div/table/tbody/tr[2]/td[3]', sess)
row['meeting'] = try_row_scrape('//*[@id="container"]/div[3]/div/table/tbody/tr[3]/td[3]', sess)
row['credits'] = try_row_scrape('//*[@id="container"]/div[3]/div/table/tbody/tr[1]/td[4]', sess)
row['enrolled'] = try_row_scrape('//*[contains(text(), "Enrolled")]', sess)
row['raw'] = try_row_scrape('//*[@id="container"]/div[3]/div', sess)
# append row
data = data.append(row, ignore_index=True)
print '[%s] scrape took %.2f minutes' % (__file__, (time.time() - start_time)/60)
print '[%s] saving csv to %s' % (__file__, f_out)
data.to_csv(f_out, index=False)
print '[%s] saving (action = %s) to postgres (table = %s)' % (__file__, tbl_action, tbl_out)
rdb_con = sqlalchemy.create_engine('postgresql://%s@%s/%s' % (dbuser, dbhost, dbname))
data.to_sql(tbl_out, rdb_con, if_exists = tbl_action, index = False)
|
mdelhey/rice-scrape
|
scrape/scrape_courses.py
|
Python
|
mit
| 3,405
|
[
"VisIt"
] |
9fc31aeac8ea5981309fcaabbd09252ad2b069aed99333409e35468561bcc3cf
|
"""A thorough test of polling PAIR sockets."""
#
# Copyright (c) 2010 Brian E. Granger
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import zmq
print "Running polling tests for PAIR sockets..."
addr = 'tcp://127.0.0.1:5555'
ctx = zmq.Context()
s1 = ctx.socket(zmq.PAIR)
s2 = ctx.socket(zmq.PAIR)
s1.bind(addr)
s2.connect(addr)
# Sleep to allow sockets to connect.
time.sleep(1.0)
poller = zmq.Poller()
poller.register(s1, zmq.POLLIN|zmq.POLLOUT)
poller.register(s2, zmq.POLLIN|zmq.POLLOUT)
# Now make sure that both are send ready.
socks = dict(poller.poll())
assert socks[s1] == zmq.POLLOUT
assert socks[s2] == zmq.POLLOUT
# Now do a send on both, wait and test for zmq.POLLOUT|zmq.POLLIN
s1.send('msg1')
s2.send('msg2')
time.sleep(1.0)
socks = dict(poller.poll())
assert socks[s1] == zmq.POLLOUT|zmq.POLLIN
assert socks[s2] == zmq.POLLOUT|zmq.POLLIN
# Make sure that both are in POLLOUT after recv.
s1.recv()
s2.recv()
socks = dict(poller.poll())
assert socks[s1] == zmq.POLLOUT
assert socks[s2] == zmq.POLLOUT
poller.unregister(s1)
poller.unregister(s2)
# Wait for everything to finish.
time.sleep(1.0)
print "Finished."
|
takluyver/pyzmq
|
examples/poll/pair.py
|
Python
|
lgpl-3.0
| 1,813
|
[
"Brian"
] |
4c84cb8f9311d8d12ad5a0eb332d9cca2fede899b612d5257653f5e5290d1821
|
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Okinawa Institute of Science and Technology, Japan.
#
# This script runs on STEPS 2.x http://steps.sourceforge.net
#
# H Anwar, I Hepburn, H Nedelescu, W Chen and E De Schutter
# Stochastic calcium mechanisms cause dendritic calcium spike variability
# J Neuroscience 2013
#
# constants_withampa.py : provides a set of parameters and other constants
# for the synaptically-induced dendritic ca burst model in the above study.
# It is intended that this file is not altered.
#
# Script authors: Haroon Anwar and Iain Hepburn
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import math
# # # # # # # # # # # # # # # # SIMULATION CONTROLS # # # # # # # # # # # # #
EF_DT = 2.0e-5 # The EField dt
NTIMEPOINTS = 25001
TIMECONVERTER = 2.0e-5
NITER = 1
############################PARAMETERS################################
init_pot = -60e-3
TEMPERATURE = 34.0
Q10 = 3
# Faraday constant: unit of FARADAY is C/mol
# Source: http://physics.nist.gov/cgi-bin/cuu/Value?f 24/2/2012
FARADAY = 96485.3365
# Molar Gas Constant: unit of R is J/mol K
# Source: http://physics.nist.gov/cgi-bin/cuu/Value?r 24/2/2012
R = 8.3144621
# Avogadro constant: unit of AVOGADRO is /mol
# Source: http://physics.nist.gov/cgi-bin/cuu/Value?na 24/2/2012
AVOGADRO = 6.02214129e23
# Elementary charge: unit of E_CHARGE is C
# Source: http://physics.nist.gov/cgi-bin/cuu/Value?e 24/2/2012
E_CHARGE = 1.602176565e-19
#FOR MSLO, THERE IS A NEW VALUE FOR Qt wrt to 25 degC
Qt = math.pow(Q10, ((TEMPERATURE-23)/10))
Qt_mslo = math.pow(Q10, ((TEMPERATURE-25)/10))
########## BULK RESISTIVITY ##########
Ra = 235.7*1.0e-2
########## MEMBRANE CAPACITANCE ##########
memb_capac = 1.5e-2
########## CaP channels density & permiability per channel ##########
# CaP_P is permiability per channel (m3/s)
# CaP_ro is channel/surface area (/m2)
# P in Ca Dynamics model is 0.95e-4 cm/s --> 0.95e-6 m/s
CaP_P = 2.5e-20
CaP_ro = 3.8e13
##########CaP channel parameters ####################
#Units (mV)
vhalfm = -29.458
cvm = 8.429
def minf_cap(V):
#Units (mV)
vhalfm = -29.458
cvm = 8.429
vshift = 0.0
return (1.0/(1.0 + math.exp(-(V-vhalfm-vshift)/cvm)))
def tau_cap(V):
vshift = 0.0
if (V-vshift) >= -40:
return (0.2702 + 1.1622 * math.exp(-(V+26.798-vshift)*(V+26.798-vshift)/164.19))
else:
return (0.6923 * math.exp((V-vshift)/1089.372))
def alpha_cap(V):
return (minf_cap(V)/tau_cap(V))
def beta_cap(V):
return ((1.0-minf_cap(V))/tau_cap(V))
## Intitial conditions
CaP_m0_p = 0.92402
CaP_m1_p = 0.073988
CaP_m2_p = 0.0019748
CaP_m3_p = 1.7569e-05
########## CaT channels density & permiability per channel ##########
# CaT_P is permiability per channel (m3/s)
# CaT_ro is channel/surface area (/m2)
# P in Ca Dynamics model is 6.2e-6 cm/s -->6.2e-8 m/s
# P in Ca Dynamics model with ampa is 3.24e-6 cm/s --> 3.24e-8 m/s
CaT_P = 1.65e-20
CaT_ro = 1.9636e12
#CaT_ro = 3.7576e12 (previously used value in model with no ampa)
def minf_cat(V):
#Units (mV)
vhalfm = -52.0
cvm = -5.0
vshift = 0.0
return (1.0/(1.0 + math.exp((V-vhalfm-vshift)/cvm)))
def taum_cat(V):
vshift = 0.0
if V > -90.0:
return (1.0 + 1.0 / (math.exp((V+40.0-vshift)/9.0) + math.exp(-(V+102.0-vshift)/18.0)))
else:
return 1.0
def hinf_cat(V):
vhalfh = -72.0
cvh = 7.0
vshift = 0.0
return (1.0/(1.0 + math.exp((V-vhalfh-vshift)/cvh)))
def tauh_cat(V):
vshift = 0.0
return (15.0 + 1.0 / (math.exp((V+32.0-vshift)/7.0)))
def alpham_cat(V):
return (minf_cat(V)/taum_cat(V))
def betam_cat(V):
return ((1-minf_cat(V))/taum_cat(V))
def alphah_cat(V):
return (hinf_cat(V)/tauh_cat(V))
def betah_cat(V):
return ((1-hinf_cat(V))/tauh_cat(V))
## Initial conditions
CaT_m0h0_p = 0.58661
CaT_m1h0_p = 0.23687
CaT_m2h0_p = 0.023912
CaT_m0h1_p = 0.10564
CaT_m1h1_p = 0.042658
CaT_m2h1_p = 0.0043063
########## BK channels density & conductance per channel ##########
# Total conductance = BK_G (conductance/channel) * BK_ro (channel/surface area)
# BK in Ca Dynamics model is 4.25e-2 S/cm2 --> 4.25e2 S/m2
BK_G = 2.1e-10
BK_ro = 2.0238e12
BK_rev = -77e-3
######### BK channel parameters ######################
#Units (1)
Qo = 0.73
Qc = -0.67
#Units (/s)
pf0 = 2.39
pf1 = 5.4918
pf2 = 24.6205
pf3 = 142.4546
pf4 = 211.0220
pb0 = 3936
pb1 = 687.3251
pb2 = 234.5875
pb3 = 103.2204
pb4 = 11.6581
#Units(/M)
k1 = 1.0e6
#Units(/s)
onoffrate = 1.0e3
L0 = 1806
#Units (M)
Kc = 8.63e-6
Ko = 0.6563e-6
c_01 = 4.*k1*onoffrate*Qt_mslo
c_12 = 3.*k1*onoffrate*Qt_mslo
c_23 = 2.*k1*onoffrate*Qt_mslo
c_34 = 1.*k1*onoffrate*Qt_mslo
o_01 = 4.*k1*onoffrate*Qt_mslo
o_12 = 3.*k1*onoffrate*Qt_mslo
o_23 = 2.*k1*onoffrate*Qt_mslo
o_34 = 1.*k1*onoffrate*Qt_mslo
c_10 = 1.*Kc*k1*onoffrate*Qt_mslo
c_21 = 2.*Kc*k1*onoffrate*Qt_mslo
c_32 = 3.*Kc*k1*onoffrate*Qt_mslo
c_43 = 4.*Kc*k1*onoffrate*Qt_mslo
o_10 = 1.*Ko*k1*onoffrate*Qt_mslo
o_21 = 2.*Ko*k1*onoffrate*Qt_mslo
o_32 = 3.*Ko*k1*onoffrate*Qt_mslo
o_43 = 4.*Ko*k1*onoffrate*Qt_mslo
f_0 = lambda mV: pf0*Qt_mslo*(math.exp((Qo* FARADAY* mV) / (R* (TEMPERATURE + 273.15))))
f_1 = lambda mV: pf1*Qt_mslo*(math.exp((Qo* FARADAY* mV) / (R* (TEMPERATURE + 273.15))))
f_2 = lambda mV: pf2*Qt_mslo*(math.exp((Qo* FARADAY* mV) / (R* (TEMPERATURE + 273.15))))
f_3 = lambda mV: pf3*Qt_mslo*(math.exp((Qo* FARADAY* mV) / (R* (TEMPERATURE + 273.15))))
f_4 = lambda mV: pf4*Qt_mslo*(math.exp((Qo* FARADAY* mV) / (R* (TEMPERATURE + 273.15))))
b_0 = lambda mV: pb0*Qt_mslo*(math.exp((Qc* FARADAY* mV) / (R* (TEMPERATURE + 273.15))))
b_1 = lambda mV: pb1*Qt_mslo*(math.exp((Qc* FARADAY* mV) / (R* (TEMPERATURE + 273.15))))
b_2 = lambda mV: pb2*Qt_mslo*(math.exp((Qc* FARADAY* mV) / (R* (TEMPERATURE + 273.15))))
b_3 = lambda mV: pb3*Qt_mslo*(math.exp((Qc* FARADAY* mV) / (R* (TEMPERATURE + 273.15))))
b_4 = lambda mV: pb4*Qt_mslo*(math.exp((Qc* FARADAY* mV) / (R* (TEMPERATURE + 273.15))))
# Initial conditions
BK_C0_p= 0.99997
BK_C1_p= 4.3619e-07
BK_C2_p= 4.1713e-09
BK_C3_p= 4.4449e-11
BK_C4_p= 6.3132e-14
BK_O0_p= 2.5202e-05
BK_O1_p= 1.1765e-06
BK_O2_p= 6.6148e-08
BK_O3_p= 2.4392e-09
BK_O4_p= 4.0981e-11
########## SK channel density & conductance per channel #############
# Total conductance = SK_G (conductance/channel) * SK_ro (channel/surface area)
# SK in Ca Dynamics model is 3.1e-4 S/cm2 --> 3.1 S/m2
SK_G = 1.0e-11
SK_ro = 31.0e10
SK_rev = -77e-3
######### SK channel parameters ###################
#Units (/s)
invc1 = 80
invc2 = 80
invc3 = 200
invo1 = 1000
invo2 = 100
diro1 = 160
diro2 = 1200
#Units ( /s M)
dirc2 = 200e6
dirc3 = 160e6
dirc4 = 80e6
invc1_t = invc1*Qt
invc2_t = invc2*Qt
invc3_t = invc3*Qt
invo1_t = invo1*Qt
invo2_t = invo2*Qt
diro1_t = diro1*Qt
diro2_t = diro2*Qt
dirc2_t = dirc2*Qt/3.0
dirc3_t = dirc3*Qt/3.0
dirc4_t = dirc4*Qt/3.0
# Intital conditions
SK_C1_p= 0.96256
SK_C2_p= 0.036096
SK_C3_p= 0.0010829
SK_C4_p= 6.4973e-06
SK_O1_p= 0.00017326
SK_O2_p= 7.7967e-05
######## AMPA rate constants ##############
#Total conductance = 20nS, 30nS and 40nS ---> 20e-9 S, 30e-9 S and 40e-9 S
#Single AMPA receptor conductance (Hausser and Roth 1997; Momiyama et al. 2003; Tanaka et al. 2005) - 7-8 pS
#Units (S)
AMPA_G = 7e-12
AMPA_TotalG = 40e-9
#Units (1)
AMPA_receptors = AMPA_TotalG/AMPA_G
#Units (V)
AMPA_rev = 0e3
#Units (/s M)
rb = 13e6
#Units (/s)
ru1 = 0.0059e3
ru2 = 86e3
ro = 2.7e3
rc = 0.2e3
rd = 0.9e3
rr = 0.064e3
######### leak current channel density & conductance per channel ########
# Total conductance = 1e-6 S/cm2 --> 1e-2 S/m2
L_G = 4.0e-14
L_ro = 25.0e10
L_rev = -61e-3
######### Pump parameters ###################
P_f_kcst = 3e9
P_b_kcst = 1.75e4
P_k_kcst = 7.255e4
############################CALCIUM BUFFERING MODEL################################
########## Ca concentrations #########
Ca_oconc = 2e-3
Ca_iconc = 45e-9
########## Mg concentrations #########
Mg_conc = 590e-6
########## Buffer concentrations #############
iCBsf_conc = 27.704e-6
iCBCaf_conc = 2.6372e-6
iCBsCa_conc= 1.5148e-6
iCBCaCa_conc= 0.14420e-6
CBsf_conc= 110.82e-6
CBCaf_conc= 10.549e-6
CBsCa_conc= 6.0595e-6
CBCaCa_conc= 0.57682e-6
PV_conc= 3.2066e-6
PVCa_conc= 16.252e-6
PVMg_conc= 60.541e-6
# Diffusion constant of Calcium
DCST = 0.223e-9
# Diffusion constant of Calbindin (CB)
DCB = 0.028e-9
# Diffusion constant of Parvalbumin (PV)
DPV = 0.043e-9
#iCBsf-fast
iCBsf1_f_kcst = 4.35e7
iCBsf1_b_kcst = 35.8
#iCBsCa
iCBsCa_f_kcst = 0.55e7
iCBsCa_b_kcst = 2.6
#iCBsf_slow
iCBsf2_f_kcst = 0.55e7
iCBsf2_b_kcst = 2.6
#iCBCaf
iCBCaf_f_kcst = 4.35e7
iCBCaf_b_kcst = 35.8
#CBsf-fast
CBsf1_f_kcst = 4.35e7
CBsf1_b_kcst = 35.8
#CBsCa
CBsCa_f_kcst = 0.55e7
CBsCa_b_kcst = 2.6
#CBsf_slow
CBsf2_f_kcst = 0.55e7
CBsf2_b_kcst = 2.6
#CBCaf
CBCaf_f_kcst = 4.35e7
CBCaf_b_kcst = 35.8
#PVca
PVca_f_kcst = 10.7e7
PVca_b_kcst = 0.95
#PVmg
PVmg_f_kcst = 0.8e6
PVmg_b_kcst = 25
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
|
CNS-OIST/STEPS_Example
|
publication_models/API_1/Anwar_J Neurosci_2013/extra/constants_withampa.py
|
Python
|
gpl-2.0
| 9,093
|
[
"Avogadro"
] |
10e500a725629751c1ea30f7c10f3fc08bbf81861821fd4c1b7a422f5310539f
|
"""
A Galaxy wrapper script for corrector-1.0
Peter Li - GigaScience and BGI-HK
"""
import optparse
import os
import shutil
import subprocess
import sys
import tempfile
import fnmatch
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
def cleanup_before_exit(tmp_dir):
if tmp_dir and os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
def html_report_from_directory(html_out, dir):
html_out.write('<html>\n<head>\n</head>\n<body>\n<font face="arial">\n<p>Corrector outputs</p>\n<p/>\n')
for dirname, dirnames, filenames in os.walk(dir):
# Link supplementary documents in HTML file
for file in filenames:
if fnmatch.fnmatch(file, '*pair_*'):
continue
else:
html_out.write('<p><a href="%s">%s</a></p>\n' % (file, file))
html_out.write('</font>\n</body>\n</html>\n')
def main():
thread_num = 4
# Parse command line
parser = optparse.OptionParser()
# List of mandatory inputs and params
parser.add_option("-i", "--filelist", dest="filelist")
parser.add_option("-r", "--freq", dest="freq")
parser.add_option("-n", "--kmer_index", dest="kmer_index")
parser.add_option("-k", "--start_cutoff", dest="start_cutoff")
parser.add_option("-e", "--end_cutoff", dest="end_cutoff")
parser.add_option("-d", "--delta", dest="delta")
parser.add_option("-s", "--seed_length", dest="seed_length")
# Removed from galaxy interface to keep under own control
# parser.add_option("-t", "--thread_num", dest="thread_num")
parser.add_option("", "--file_format", dest="file_format")
# Outputs for reads
parser.add_option("", "--corrected_forward", dest="corrected_forward")
parser.add_option("", "--corrected_reverse", dest="corrected_reverse")
parser.add_option("", "--corr_filelist", dest="corr_filelist")
opts, args = parser.parse_args()
# Temp directory for data processing
tmp_dir = tempfile.mkdtemp(prefix="tmp-corrector-")
print tmp_dir
# Set up command line call
cmd = "Corrector_v1.0 -i %s -r %s -n %s -k %s -e %s -d %s -s %s -t %s -f %s -l 1 -g 0" % (opts.filelist, opts.freq, opts.kmer_index, opts.start_cutoff, opts.end_cutoff, opts.delta, opts.seed_length, thread_num, opts.file_format)
print "Command: ", cmd
# Execute Corrector
try:
tmp_out_file = tempfile.NamedTemporaryFile(dir=tmp_dir).name
tmp_stdout = open(tmp_out_file, 'w') # Contains Corrector stdout log
tmp_err_file = tempfile.NamedTemporaryFile(dir=tmp_dir).name
tmp_stderr = open(tmp_err_file, 'w')
# Perform Corrector call
proc = subprocess.Popen(args=cmd, shell=True, cwd=tmp_dir, stdout=tmp_stdout, stderr=tmp_stderr)
returncode = proc.wait()
# Read tool stdout into galaxy stdout
f = open(tmp_out_file)
lines = f.readlines()
for line in lines:
sys.stdout.write(line)
f.close()
# get stderr, allowing for case where it's very large
tmp_stderr = open(tmp_err_file, 'r')
stderr = ''
buffsize = 1048576
try:
while True:
stderr += tmp_stderr.read(buffsize)
if not stderr or len(stderr) % buffsize != 0:
break
except OverflowError:
pass
tmp_stdout.close()
tmp_stderr.close()
# if returncode != 0:
# raise Exception, stderr
except Exception, e:
raise Exception, 'Problem performing Corrector process: ' + str(e)
# Need to move and rename files for galaxy for display multiple files
filelist = open(opts.filelist)
# # Read file paths in read.lst
pair_index = 1
for path in filelist:
# Read corrected forward and reverse files into outputs
source = path.rstrip() + ".corr"
if pair_index == 1:
corrected_forward_in = open(opts.corrected_forward, 'w')
file_out = open(source, 'r')
data = file_out.read()
corrected_forward_in.write(data)
corrected_forward_in.close()
file_out.close()
if pair_index == 2:
corrected_reverse_in = open(opts.corrected_reverse, 'w')
file_out = open(source, 'r')
data = file_out.read()
corrected_reverse_in.write(data)
corrected_reverse_in.close()
file_out.close()
pair_index += 1
if pair_index == 3:
pair_index = 1
filelist.close()
# Create corrected file list
corrected_files_in = open(opts.corr_filelist, 'w')
corrected_files_in.write(opts.corrected_forward + "\n")
corrected_files_in.write(opts.corrected_reverse + "\n")
corrected_files_in.close()
# Clean up temp files
cleanup_before_exit(tmp_dir)
# Check results in output file
if os.path.getsize(opts.corrected_forward) > 0:
sys.stdout.write('Status complete')
else:
stop_err("The output is empty")
if __name__ == "__main__":
main()
|
gigascience/galaxy-bgisoap
|
tools/corrector1/corrector1_wrapper.py
|
Python
|
gpl-3.0
| 5,068
|
[
"Galaxy"
] |
f431dcac8ba6e5dfc6685a3328e6cb2225df7196d40df8824b6ff5d0ece87ae3
|
# -*- coding: utf-8 -*-
import os
import logging
import unittest
from riboplot import config, ribocore
# use testing configuration
CFG = config.TestingConfig()
logging.disable(logging.CRITICAL)
# reference_orfs were obtained manually (UGENE, ORF finder)
REFERENCE_ORFS = [{'start': 45, 'stop': 203, 'sequence': 'ATGATTGAGAGGGACGGCCGGGGGCATTCGTATTGCGCCGCTAGAGGTGAAATTCTTGGACCGGCGCAAGACGGACGAAAGCGAAAGCATTTGCCAAGAATGTTTTCATTAATCAAGAACGAAAGTCGGAGGTTCGAAGACGATCAGATACCGTCGTAG'},
{'start': 219, 'stop': 254, 'sequence': 'ATGCCGACCCGCGATCCGGCGGCGTTATTCCCATGA'},
{'start': 251, 'stop': 328, 'sequence': 'ATGACCCGCCGGGCAGCGTGCGGGAAACCACGAGTCTTTGGGTTCCGGGGGGAGTATGGTTGCAAAGCTGAAACTTAA'},
{'start': 306, 'stop': 374, 'sequence': 'ATGGTTGCAAAGCTGAAACTTAAAGGAATTGACGGAAGGGCACCACCAGGAGTGGAGCCTGCGGCTTAA'},
{'start': 465, 'stop': 512, 'sequence': 'ATGGCCGTTCTTAGTTGGTGGAGCGATTTGTCTGGTTCATTCCGATAA'},
{'start': 529, 'stop': 945, 'sequence': 'ATGCTAAATAGTTACGCGGCCCCGCGCGGTCGGCGTCCCAACTTCTTAGAGGGACAAGTGGCGTTCAGCCACGCGAGATGGAGCAATAACAGGTCTGTGATGCCCTTAGATGTCCGGGGCTGCACGCGCGCCACAATGGGCGGATCAACGTGTGCCTACCCTGCGCCGAGAGGCGCGGGTAACCCGTTGAACCCCGCTCGTGATTGGGACTGGGGCTTGAAACTGTTTCCCATCAACGAGGAATTCCCAGTAAGCGCAGGTCATAAGCTTGCGTTGATTAAGTCCCTGCCCTTTGTACACACCGCCCGTCGCTACTACCGATTGGATGGTTTAGTGAGGTCCTCGGATCGGCCCCGCCGGGGCTCCTCGCCGGGCCCTGGCGGAGCGCCGAGAAGACGATCAAACTTGATCCTCTAG'},
{'start': 606, 'stop': 617, 'sequence': 'ATGGAGCAATAA'},
{'start': 638, 'stop': 718, 'sequence': 'ATGTCCGGGGCTGCACGCGCGCCACAATGGGCGGATCAACGTGTGCCTACCCTGCGCCGAGAGGCGCGGGTAACCCGTTGA'},
{'start': 854, 'stop': 862, 'sequence': 'ATGGTTTAG'}]
class BamTestCase(unittest.TestCase):
"""Check if all arguments sent on the command line are valid."""
def test_is_bam_valid(self):
"""Test if BAM file is valid."""
valid = ribocore.is_bam_valid(CFG.RIBO_FILE)
self.assertTrue(valid)
# test with a FASTA file (which is not BAM)
self.assertRaises(ValueError, ribocore.is_bam_valid, CFG.TRANSCRIPTOME_FASTA)
def test_bam_has_index(self):
"""Check if BAM file has an index."""
# RPF file has an index
has_index = ribocore.bam_has_index(CFG.RIBO_FILE)
self.assertTrue(has_index)
# RNA file doesn't have an index
has_index = ribocore.bam_has_index(CFG.RNA_FILE)
self.assertFalse(has_index)
def test_create_bam_index(self):
"""Index a BAM file."""
ribocore.create_bam_index(CFG.RNA_FILE)
# check if index exists
has_index = ribocore.bam_has_index(CFG.RNA_FILE)
self.assertTrue(has_index)
# remove index
os.remove('{}.bai'.format(CFG.RNA_FILE))
class FastaTestCase(unittest.TestCase):
def test_is_fasta_valid(self):
"""A valid FASTA file can be opened with pysam.FastaFile."""
self.assertTrue(ribocore.is_fasta_valid(CFG.TRANSCRIPTOME_FASTA))
def test_get_fasta_records(self):
"""Given a transcriptome fasta file and a transcript name, it should \
be possible to get the sequence and length of a given transcript.
"""
record = ribocore.get_fasta_records(CFG.TRANSCRIPTOME_FASTA, [CFG.TRANSCRIPT_NAME])
self.assertEqual(len(record[CFG.TRANSCRIPT_NAME]['sequence']), CFG.TRANSCRIPT_LENGTH)
def test_get_fasta_record(self):
"""Get a single FASTA record from a transcriptome."""
record = ribocore.get_fasta_record(fasta_file=CFG.TRANSCRIPTOME_FASTA,
transcript_name=CFG.TRANSCRIPT_NAME)
self.assertEqual(record[CFG.TRANSCRIPT_NAME], CFG.TRANSCRIPT_SEQUENCE)
self.assertEqual(len(record[CFG.TRANSCRIPT_NAME]), CFG.TRANSCRIPT_LENGTH)
class RiboCoreTestCase(unittest.TestCase):
def test_get_three_frame_orfs(self):
"""Get ORFs in frames 1, 2 and 3."""
orfs = ribocore.get_three_frame_orfs(sequence=CFG.TRANSCRIPT_SEQUENCE,
starts=['ATG'], stops=['TAG', 'TAA', 'TGA'])
# function should return the same ORFs as reference
self.assertEqual(len(orfs), len(REFERENCE_ORFS))
for item in orfs:
self.assertTrue(item in REFERENCE_ORFS)
def test_get_longest_orf(self):
"""Get the longest ORF from a list."""
# longest ORF in reference is sequence with start 529
longest_orf = ribocore.get_longest_orf(REFERENCE_ORFS)
self.assertEqual(longest_orf['start'], 529)
def test_read_lengths_offsets(self):
"""If multiple read lengths, offsets given check if they are valid
i.e., Each read length must have a corresponding offset.
"""
|
vimalkumarvelayudhan/riboplot
|
tests/test_ribocore.py
|
Python
|
bsd-3-clause
| 4,785
|
[
"pysam"
] |
b212199096329107709a7c2d3ca7ae84b2c681e1712fd6ff50cf6232a7e04cdc
|
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ProvidersGeocodeResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, meta=None, providers=None):
"""
ProvidersGeocodeResponse - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'meta': 'Meta',
'providers': 'list[Provider]'
}
self.attribute_map = {
'meta': 'meta',
'providers': 'providers'
}
self._meta = meta
self._providers = providers
@property
def meta(self):
"""
Gets the meta of this ProvidersGeocodeResponse.
Meta-data
:return: The meta of this ProvidersGeocodeResponse.
:rtype: Meta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""
Sets the meta of this ProvidersGeocodeResponse.
Meta-data
:param meta: The meta of this ProvidersGeocodeResponse.
:type: Meta
"""
self._meta = meta
@property
def providers(self):
"""
Gets the providers of this ProvidersGeocodeResponse.
Providers that fit the requested criterion.
:return: The providers of this ProvidersGeocodeResponse.
:rtype: list[Provider]
"""
return self._providers
@providers.setter
def providers(self, providers):
"""
Sets the providers of this ProvidersGeocodeResponse.
Providers that fit the requested criterion.
:param providers: The providers of this ProvidersGeocodeResponse.
:type: list[Provider]
"""
self._providers = providers
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
vericred/vericred-python
|
vericred_client/models/providers_geocode_response.py
|
Python
|
apache-2.0
| 12,870
|
[
"VisIt"
] |
16c723949bf291aced74853c3b293ffac08968a0f65f18064045cdf73ac63b42
|
#### import the simple module from the paraview
from paraview.simple import *
#### disable automatic camera reset on 'Show'
paraview.simple._DisableFirstRenderCameraReset()
# create a new 'XML Unstructured Grid Reader'
xMLUnstructuredGridReader1 = XMLUnstructuredGridReader(FileName=['/raid/home/ksansom/caseFiles/mri/VWI_proj/case4/fluent_dsa/vtk_out/calc_test_node_stats.vtu'])
xMLUnstructuredGridReader1.PointArrayStatus = ['WSS', 'WSSG', 'absolute_pressure', 'TAWSS', 'TAWSSG', 'OSI', 'velocity']
# create a new 'XML Unstructured Grid Reader'
xMLUnstructuredGridReader2 = XMLUnstructuredGridReader(FileName=['/raid/home/ksansom/caseFiles/mri/VWI_proj/case4/fluent_dsa/vtk_out/calc_test_node.vtu'])
xMLUnstructuredGridReader2.PointArrayStatus = ['absolute_pressure', 'velocity', 'x_velocity', 'x_wall_shear', 'y_velocity', 'y_wall_shear', 'z_velocity', 'z_wall_shear', 'WSS', 'x_WSS_grad', 'y_WSS_grad', 'z_WSS_grad', 'WSSG']
# get animation scene
animationScene1 = GetAnimationScene()
# update animation scene based on data timesteps
animationScene1.UpdateAnimationUsingDataTimeSteps()
# get active view
renderView1 = GetActiveViewOrCreate('RenderView')
# uncomment following to set a specific view size
# renderView1.ViewSize = [928, 595]
# reset view to fit data
renderView1.ResetCamera()
# get color transfer function/color map for 'OSI'
oSILUT = GetColorTransferFunction('OSI')
# get opacity transfer function/opacity map for 'OSI'
oSIPWF = GetOpacityTransferFunction('OSI')
# show data in view
xMLUnstructuredGridReader1Display = Show(xMLUnstructuredGridReader1, renderView1)
# trace defaults for the display properties.
xMLUnstructuredGridReader1Display.Representation = 'Surface'
xMLUnstructuredGridReader1Display.ColorArrayName = ['POINTS', 'OSI']
xMLUnstructuredGridReader1Display.LookupTable = oSILUT
xMLUnstructuredGridReader1Display.OSPRayScaleArray = 'OSI'
xMLUnstructuredGridReader1Display.OSPRayScaleFunction = 'PiecewiseFunction'
xMLUnstructuredGridReader1Display.SelectOrientationVectors = 'velocity'
xMLUnstructuredGridReader1Display.ScaleFactor = 0.002641490660607815
xMLUnstructuredGridReader1Display.SelectScaleArray = 'OSI'
xMLUnstructuredGridReader1Display.GlyphType = 'Arrow'
xMLUnstructuredGridReader1Display.GlyphTableIndexArray = 'OSI'
xMLUnstructuredGridReader1Display.DataAxesGrid = 'GridAxesRepresentation'
xMLUnstructuredGridReader1Display.PolarAxes = 'PolarAxesRepresentation'
xMLUnstructuredGridReader1Display.ScalarOpacityFunction = oSIPWF
xMLUnstructuredGridReader1Display.ScalarOpacityUnitDistance = 0.0006752548306746431
xMLUnstructuredGridReader1Display.GaussianRadius = 0.0013207453303039074
xMLUnstructuredGridReader1Display.SetScaleArray = ['POINTS', 'OSI']
xMLUnstructuredGridReader1Display.ScaleTransferFunction = 'PiecewiseFunction'
xMLUnstructuredGridReader1Display.OpacityArray = ['POINTS', 'OSI']
xMLUnstructuredGridReader1Display.OpacityTransferFunction = 'PiecewiseFunction'
# reset view to fit data
renderView1.ResetCamera()
# show color bar/color legend
xMLUnstructuredGridReader1Display.SetScalarBarVisibility(renderView1, True)
# get color transfer function/color map for 'WSSG'
wSSGLUT = GetColorTransferFunction('WSSG')
# get opacity transfer function/opacity map for 'WSSG'
wSSGPWF = GetOpacityTransferFunction('WSSG')
# show data in view
xMLUnstructuredGridReader2Display = Show(xMLUnstructuredGridReader2, renderView1)
# trace defaults for the display properties.
xMLUnstructuredGridReader2Display.Representation = 'Surface'
xMLUnstructuredGridReader2Display.ColorArrayName = ['POINTS', 'WSSG']
xMLUnstructuredGridReader2Display.LookupTable = wSSGLUT
xMLUnstructuredGridReader2Display.OSPRayScaleArray = 'WSSG'
xMLUnstructuredGridReader2Display.OSPRayScaleFunction = 'PiecewiseFunction'
xMLUnstructuredGridReader2Display.SelectOrientationVectors = 'velocity'
xMLUnstructuredGridReader2Display.ScaleFactor = 0.002641490660607815
xMLUnstructuredGridReader2Display.SelectScaleArray = 'WSSG'
xMLUnstructuredGridReader2Display.GlyphType = 'Arrow'
xMLUnstructuredGridReader2Display.GlyphTableIndexArray = 'WSSG'
xMLUnstructuredGridReader2Display.DataAxesGrid = 'GridAxesRepresentation'
xMLUnstructuredGridReader2Display.PolarAxes = 'PolarAxesRepresentation'
xMLUnstructuredGridReader2Display.ScalarOpacityFunction = wSSGPWF
xMLUnstructuredGridReader2Display.ScalarOpacityUnitDistance = 0.0006752548306746431
xMLUnstructuredGridReader2Display.GaussianRadius = 0.0013207453303039074
xMLUnstructuredGridReader2Display.SetScaleArray = ['POINTS', 'WSSG']
xMLUnstructuredGridReader2Display.ScaleTransferFunction = 'PiecewiseFunction'
xMLUnstructuredGridReader2Display.OpacityArray = ['POINTS', 'WSSG']
xMLUnstructuredGridReader2Display.OpacityTransferFunction = 'PiecewiseFunction'
# show color bar/color legend
xMLUnstructuredGridReader2Display.SetScalarBarVisibility(renderView1, True)
# show data in view
xMLUnstructuredGridReader2Display = Show(xMLUnstructuredGridReader2, renderView1)
# hide color bar/color legend
xMLUnstructuredGridReader2Display.SetScalarBarVisibility(renderView1, False)
# hide data in view
Hide(xMLUnstructuredGridReader1, renderView1)
# Apply a preset using its name. Note this may not work as expected when presets have duplicate names.
wSSGLUT.ApplyPreset('Preset', True)
# Apply a preset using its name. Note this may not work as expected when presets have duplicate names.
wSSGPWF.ApplyPreset('Preset', True)
# Apply a preset using its name. Note this may not work as expected when presets have duplicate names.
wSSGLUT.ApplyPreset('Preset', True)
# Apply a preset using its name. Note this may not work as expected when presets have duplicate names.
wSSGPWF.ApplyPreset('Preset', True)
# set active source
SetActiveSource(xMLUnstructuredGridReader1)
# Apply a preset using its name. Note this may not work as expected when presets have duplicate names.
oSILUT.ApplyPreset('Preset', True)
# Apply a preset using its name. Note this may not work as expected when presets have duplicate names.
oSIPWF.ApplyPreset('Preset', True)
# create a new 'Extract Surface'
extractSurface1 = ExtractSurface(Input=xMLUnstructuredGridReader1)
# show data in view
extractSurface1Display = Show(extractSurface1, renderView1)
# trace defaults for the display properties.
extractSurface1Display.Representation = 'Surface'
extractSurface1Display.ColorArrayName = ['POINTS', 'OSI']
extractSurface1Display.LookupTable = oSILUT
extractSurface1Display.OSPRayScaleArray = 'OSI'
extractSurface1Display.OSPRayScaleFunction = 'PiecewiseFunction'
extractSurface1Display.SelectOrientationVectors = 'velocity'
extractSurface1Display.ScaleFactor = 0.002641490660607815
extractSurface1Display.SelectScaleArray = 'OSI'
extractSurface1Display.GlyphType = 'Arrow'
extractSurface1Display.GlyphTableIndexArray = 'OSI'
extractSurface1Display.DataAxesGrid = 'GridAxesRepresentation'
extractSurface1Display.PolarAxes = 'PolarAxesRepresentation'
extractSurface1Display.GaussianRadius = 0.0013207453303039074
extractSurface1Display.SetScaleArray = ['POINTS', 'OSI']
extractSurface1Display.ScaleTransferFunction = 'PiecewiseFunction'
extractSurface1Display.OpacityArray = ['POINTS', 'OSI']
extractSurface1Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide data in view
Hide(xMLUnstructuredGridReader1, renderView1)
# hide color bar/color legend
extractSurface1Display.SetScalarBarVisibility(renderView1, False)
# Hide the scalar bar for this color map if no visible data is colored by it.
HideScalarBarIfNotNeeded(oSILUT, renderView1)
# hide data in view
Hide(extractSurface1, renderView1)
# hide data in view
Hide(xMLUnstructuredGridReader1, renderView1)
# hide data in view
Hide(xMLUnstructuredGridReader2, renderView1)
# create a new 'Clip'
clip1 = Clip(Input=xMLUnstructuredGridReader1)
clip1.ClipType = 'Plane'
clip1.Scalars = ['POINTS', 'OSI']
clip1.Value = -8.963760541398452
# init the 'Plane' selected for 'ClipType'
clip1.ClipType.Origin = [0.019185381077859477, 0.008678421124464417, 0.019721650218845817]
clip1.ClipType.Normal = [-0.9140283519495734, -0.03315688808077104, 0.40429295393952386]
# toggle 3D widget visibility (only when running from the GUI)
Show3DWidgets(proxy=clip1.ClipType)
# show data in view
clip1Display = Show(clip1, renderView1)
# trace defaults for the display properties.
clip1Display.Representation = 'Surface'
clip1Display.ColorArrayName = ['POINTS', 'OSI']
clip1Display.LookupTable = oSILUT
clip1Display.OSPRayScaleArray = 'OSI'
clip1Display.OSPRayScaleFunction = 'PiecewiseFunction'
clip1Display.SelectOrientationVectors = 'velocity'
clip1Display.ScaleFactor = 0.0012495411559939385
clip1Display.SelectScaleArray = 'OSI'
clip1Display.GlyphType = 'Arrow'
clip1Display.GlyphTableIndexArray = 'OSI'
clip1Display.DataAxesGrid = 'GridAxesRepresentation'
clip1Display.PolarAxes = 'PolarAxesRepresentation'
clip1Display.ScalarOpacityFunction = oSIPWF
clip1Display.ScalarOpacityUnitDistance = 0.0004738466035644152
clip1Display.GaussianRadius = 0.0006247705779969693
clip1Display.SetScaleArray = ['POINTS', 'OSI']
clip1Display.ScaleTransferFunction = 'PiecewiseFunction'
clip1Display.OpacityArray = ['POINTS', 'OSI']
clip1Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
clip1Display.SetScalarBarVisibility(renderView1, False)
# hide data in view
Hide(clip1, renderView1)
# create a new 'Clip'
clip2 = Clip(Input=clip1)
clip2.ClipType = 'Plane'
clip2.Scalars = ['POINTS', 'OSI']
clip2.Value = -4.334532492423367
# init the 'Plane' selected for 'ClipType'
clip2.ClipType.Origin = [0.016463510161933183, 0.013672129568797606, 0.020951286955119792]
clip2.ClipType.Normal = [-0.14808092203454595, -0.9422076182376612, -0.30052761048581295]
# toggle 3D widget visibility (only when running from the GUI)
Show3DWidgets(proxy=clip2.ClipType)
# show data in view
clip2Display = Show(clip2, renderView1)
# trace defaults for the display properties.
clip2Display.Representation = 'Surface'
clip2Display.ColorArrayName = ['POINTS', 'OSI']
clip2Display.LookupTable = oSILUT
clip2Display.OSPRayScaleArray = 'OSI'
clip2Display.OSPRayScaleFunction = 'PiecewiseFunction'
clip2Display.SelectOrientationVectors = 'velocity'
clip2Display.ScaleFactor = 0.0012495411559939385
clip2Display.SelectScaleArray = 'OSI'
clip2Display.GlyphType = 'Arrow'
clip2Display.GlyphTableIndexArray = 'OSI'
clip2Display.DataAxesGrid = 'GridAxesRepresentation'
clip2Display.PolarAxes = 'PolarAxesRepresentation'
clip2Display.ScalarOpacityFunction = oSIPWF
clip2Display.ScalarOpacityUnitDistance = 0.0004570996202003559
clip2Display.GaussianRadius = 0.0006247705779969693
clip2Display.SetScaleArray = ['POINTS', 'OSI']
clip2Display.ScaleTransferFunction = 'PiecewiseFunction'
clip2Display.OpacityArray = ['POINTS', 'OSI']
clip2Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
clip2Display.SetScalarBarVisibility(renderView1, False)
# hide data in view
Hide(clip2, renderView1)
# create a new 'Clip'
clip3 = Clip(Input=clip2)
clip3.ClipType = 'Plane'
clip3.Scalars = ['POINTS', 'OSI']
clip3.Value = -4.334532492423367
# init the 'Plane' selected for 'ClipType'
clip3.ClipType.Origin = [0.016711021877632924, 0.009538794369297741, 0.02097712830393285]
clip3.ClipType.Normal = [-0.2022641960827218, 0.8606682429625938, -0.46726798578405937]
# toggle 3D widget visibility (only when running from the GUI)
Show3DWidgets(proxy=clip3.ClipType)
# show data in view
clip3Display = Show(clip3, renderView1)
# trace defaults for the display properties.
clip3Display.Representation = 'Surface'
clip3Display.ColorArrayName = ['POINTS', 'OSI']
clip3Display.LookupTable = oSILUT
clip3Display.OSPRayScaleArray = 'OSI'
clip3Display.OSPRayScaleFunction = 'PiecewiseFunction'
clip3Display.SelectOrientationVectors = 'velocity'
clip3Display.ScaleFactor = 0.0012495411559939385
clip3Display.SelectScaleArray = 'OSI'
clip3Display.GlyphType = 'Arrow'
clip3Display.GlyphTableIndexArray = 'OSI'
clip3Display.DataAxesGrid = 'GridAxesRepresentation'
clip3Display.PolarAxes = 'PolarAxesRepresentation'
clip3Display.ScalarOpacityFunction = oSIPWF
clip3Display.ScalarOpacityUnitDistance = 0.0004945866681605259
clip3Display.GaussianRadius = 0.0006247705779969693
clip3Display.SetScaleArray = ['POINTS', 'OSI']
clip3Display.ScaleTransferFunction = 'PiecewiseFunction'
clip3Display.OpacityArray = ['POINTS', 'OSI']
clip3Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
clip3Display.SetScalarBarVisibility(renderView1, False)
# hide data in view
Hide(clip3, renderView1)
# create a new 'Clip'
clip4 = Clip(Input=clip3)
clip4.ClipType = 'Plane'
clip4.Scalars = ['POINTS', 'OSI']
clip4.Value = -0.22316894760113207
# init the 'Plane' selected for 'ClipType'
clip4.ClipType.Origin = [0.013133594821495129, 0.009304044349573848, 0.020326088763650343]
clip4.ClipType.Normal = [0.5663667872989001, 0.8238781757097235, 0.02129351624181657]
# toggle 3D widget visibility (only when running from the GUI)
Show3DWidgets(proxy=clip4.ClipType)
# show data in view
clip4Display = Show(clip4, renderView1)
# trace defaults for the display properties.
clip4Display.Representation = 'Surface'
clip4Display.ColorArrayName = ['POINTS', 'OSI']
clip4Display.LookupTable = oSILUT
clip4Display.OSPRayScaleArray = 'OSI'
clip4Display.OSPRayScaleFunction = 'PiecewiseFunction'
clip4Display.SelectOrientationVectors = 'velocity'
clip4Display.ScaleFactor = 0.0007063730619847775
clip4Display.SelectScaleArray = 'OSI'
clip4Display.GlyphType = 'Arrow'
clip4Display.GlyphTableIndexArray = 'OSI'
clip4Display.DataAxesGrid = 'GridAxesRepresentation'
clip4Display.PolarAxes = 'PolarAxesRepresentation'
clip4Display.ScalarOpacityFunction = oSIPWF
clip4Display.ScalarOpacityUnitDistance = 0.00037060974906745676
clip4Display.GaussianRadius = 0.00035318653099238875
clip4Display.SetScaleArray = ['POINTS', 'OSI']
clip4Display.ScaleTransferFunction = 'PiecewiseFunction'
clip4Display.OpacityArray = ['POINTS', 'OSI']
clip4Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
clip4Display.SetScalarBarVisibility(renderView1, False)
# hide data in view
Hide(clip4, renderView1)
# create a new 'Clip'
clip5 = Clip(Input=clip4)
clip5.ClipType = 'Cylinder'
clip5.Scalars = ['POINTS', 'OSI']
clip5.Value = -0.22316894760113207
# init the 'Plane' selected for 'ClipType'
clip5.ClipType.Center = [0.018777387893322797, 0.01057777265095785, 0.021172440223998248]
clip5.ClipType.Axis = [0.36272981418484473, 0.7899369662882306, -0.4943952580605674]
clip5.ClipType.Radius = 0.0021806721886416867
# toggle 3D widget visibility (only when running from the GUI)
Show3DWidgets(proxy=clip5.ClipType)
# show data in view
clip5Display = Show(clip5, renderView1)
# trace defaults for the display properties.
clip5Display.Representation = 'Surface'
clip5Display.ColorArrayName = ['POINTS', 'OSI']
clip5Display.LookupTable = oSILUT
clip5Display.OSPRayScaleArray = 'OSI'
clip5Display.OSPRayScaleFunction = 'PiecewiseFunction'
clip5Display.SelectOrientationVectors = 'velocity'
clip5Display.ScaleFactor = 0.0005240732803940773
clip5Display.SelectScaleArray = 'OSI'
clip5Display.GlyphType = 'Arrow'
clip5Display.GlyphTableIndexArray = 'OSI'
clip5Display.DataAxesGrid = 'GridAxesRepresentation'
clip5Display.PolarAxes = 'PolarAxesRepresentation'
clip5Display.ScalarOpacityFunction = oSIPWF
clip5Display.ScalarOpacityUnitDistance = 0.0003557483835107667
clip5Display.GaussianRadius = 0.00026203664019703866
clip5Display.SetScaleArray = ['POINTS', 'OSI']
clip5Display.ScaleTransferFunction = 'PiecewiseFunction'
clip5Display.OpacityArray = ['POINTS', 'OSI']
clip5Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
clip5Display.SetScalarBarVisibility(renderView1, False)
# Rescale transfer function
oSILUT.RescaleTransferFunction(0.0, 0.5)
# Rescale transfer function
oSIPWF.RescaleTransferFunction(0.0, 0.5)
# hide data in view
Hide(clip5, renderView1)
# create a new 'Slice'
slice1 = Slice(Input=clip5)
slice1.SliceType = 'Plane'
slice1.SliceOffsetValues = [0.0]
# init the 'Plane' selected for 'SliceType'
slice1.SliceType.Origin = [0.01406339196240306, 0.011922889508895057, 0.02007914092262216]
slice1.SliceType.Normal = [0.9166910278672833, -0.3166687289041705, 0.2437180246962479]
# toggle 3D widget visibility (only when running from the GUI)
Show3DWidgets(proxy=slice1.SliceType)
# show data in view
slice1Display = Show(slice1, renderView1)
# trace defaults for the display properties.
slice1Display.Representation = 'Surface'
slice1Display.ColorArrayName = ['POINTS', 'OSI']
slice1Display.LookupTable = oSILUT
slice1Display.OSPRayScaleArray = 'OSI'
slice1Display.OSPRayScaleFunction = 'PiecewiseFunction'
slice1Display.SelectOrientationVectors = 'velocity'
slice1Display.ScaleFactor = 0.00044556772336363794
slice1Display.SelectScaleArray = 'OSI'
slice1Display.GlyphType = 'Arrow'
slice1Display.GlyphTableIndexArray = 'OSI'
slice1Display.DataAxesGrid = 'GridAxesRepresentation'
slice1Display.PolarAxes = 'PolarAxesRepresentation'
slice1Display.GaussianRadius = 0.00022278386168181897
slice1Display.SetScaleArray = ['POINTS', 'OSI']
slice1Display.ScaleTransferFunction = 'PiecewiseFunction'
slice1Display.OpacityArray = ['POINTS', 'OSI']
slice1Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
slice1Display.SetScalarBarVisibility(renderView1, False)
# toggle 3D widget visibility (only when running from the GUI)
Hide3DWidgets(proxy=slice1.SliceType)
# hide data in view
Hide(slice1, renderView1)
# create a new 'Tube'
tube1 = Tube(Input=slice1)
tube1.Scalars = ['POINTS', 'OSI']
tube1.Vectors = ['POINTS', 'velocity']
tube1.Radius = 5.0e-5
tube1.Capping = 0
tube1.NumberofSides = 8
# show data in view
tube1Display = Show(tube1, renderView1)
# trace defaults for the display properties.
tube1Display.Representation = 'Surface'
tube1Display.ColorArrayName = ['POINTS', 'OSI']
tube1Display.LookupTable = oSILUT
tube1Display.OSPRayScaleArray = 'OSI'
tube1Display.OSPRayScaleFunction = 'PiecewiseFunction'
tube1Display.SelectOrientationVectors = 'velocity'
tube1Display.ScaleFactor = 0.0004555660299956799
tube1Display.SelectScaleArray = 'OSI'
tube1Display.GlyphType = 'Arrow'
tube1Display.GlyphTableIndexArray = 'OSI'
tube1Display.DataAxesGrid = 'GridAxesRepresentation'
tube1Display.PolarAxes = 'PolarAxesRepresentation'
tube1Display.GaussianRadius = 0.00022778301499783995
tube1Display.SetScaleArray = ['POINTS', 'OSI']
tube1Display.ScaleTransferFunction = 'PiecewiseFunction'
tube1Display.OpacityArray = ['POINTS', 'OSI']
tube1Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
tube1Display.SetScalarBarVisibility(renderView1, False)
# set active source
SetActiveSource(slice1)
# reset view to fit data
renderView1.ResetCamera()
# create a new 'Clip'
clip6 = Clip(Input=xMLUnstructuredGridReader2)
clip6.ClipType = 'Plane'
clip6.Scalars = ['POINTS', 'OSI']
# init the 'Plane' selected for 'ClipType'
clip6.ClipType.Origin = clip1.ClipType.Origin
clip6.ClipType.Normal = clip1.ClipType.Normal
# toggle 3D widget visibility (only when running from the GUI)
Show3DWidgets(proxy=clip6.ClipType)
# show data in view
clip6Display = Show(clip6, renderView1)
# trace defaults for the display properties.
clip6Display.Representation = 'Surface'
clip6Display.ColorArrayName = ['POINTS', 'WSSG']
clip6Display.LookupTable = wSSGLUT
clip6Display.OSPRayScaleArray = 'WSSG'
clip6Display.OSPRayScaleFunction = 'PiecewiseFunction'
clip6Display.SelectOrientationVectors = 'velocity'
clip6Display.ScaleFactor = 0.0012495411559939385
clip6Display.SelectScaleArray = 'WSSG'
clip6Display.GlyphType = 'Arrow'
clip6Display.GlyphTableIndexArray = 'WSSG'
clip6Display.DataAxesGrid = 'GridAxesRepresentation'
clip6Display.PolarAxes = 'PolarAxesRepresentation'
clip6Display.ScalarOpacityFunction = wSSGPWF
clip6Display.ScalarOpacityUnitDistance = 0.0004738466035644152
clip6Display.GaussianRadius = 0.0006247705779969693
clip6Display.SetScaleArray = ['POINTS', 'WSSG']
clip6Display.ScaleTransferFunction = 'PiecewiseFunction'
clip6Display.OpacityArray = ['POINTS', 'WSSG']
clip6Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
clip6Display.SetScalarBarVisibility(renderView1, False)
# hide data in view
Hide(clip6, renderView1)
# create a new 'Clip'
clip7 = Clip(Input=clip6)
clip7.ClipType = 'Plane'
clip7.Scalars = ['POINTS', 'WSSG']
clip7.Value = clip3.Value
# init the 'Plane' selected for 'ClipType'
clip7.ClipType.Origin = clip2.ClipType.Origin
clip7.ClipType.Normal = clip2.ClipType.Normal
# toggle 3D widget visibility (only when running from the GUI)
Show3DWidgets(proxy=clip7.ClipType)
# show data in view
clip7Display = Show(clip7, renderView1)
# trace defaults for the display properties.
clip7Display.Representation = 'Surface'
clip7Display.ColorArrayName = ['POINTS', 'WSSG']
clip7Display.LookupTable = wSSGLUT
clip7Display.OSPRayScaleArray = 'WSSG'
clip7Display.OSPRayScaleFunction = 'PiecewiseFunction'
clip7Display.SelectOrientationVectors = 'velocity'
clip7Display.ScaleFactor = 0.0012495411559939385
clip7Display.SelectScaleArray = 'WSSG'
clip7Display.GlyphType = 'Arrow'
clip7Display.GlyphTableIndexArray = 'WSSG'
clip7Display.DataAxesGrid = 'GridAxesRepresentation'
clip7Display.PolarAxes = 'PolarAxesRepresentation'
clip7Display.ScalarOpacityFunction = wSSGPWF
clip7Display.ScalarOpacityUnitDistance = 0.0004570996202003559
clip7Display.GaussianRadius = 0.0006247705779969693
clip7Display.SetScaleArray = ['POINTS', 'WSSG']
clip7Display.ScaleTransferFunction = 'PiecewiseFunction'
clip7Display.OpacityArray = ['POINTS', 'WSSG']
clip7Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
clip7Display.SetScalarBarVisibility(renderView1, False)
# hide data in view
Hide(clip7, renderView1)
# create a new 'Clip'
clip8 = Clip(Input=clip7)
clip8.ClipType = 'Plane'
clip8.Scalars = ['POINTS', 'WSSG']
clip8.Value = clip4.Value
# init the 'Plane' selected for 'ClipType'
clip8.ClipType.Origin = clip3.ClipType.Origin
clip8.ClipType.Normal = clip3.ClipType.Normal
# toggle 3D widget visibility (only when running from the GUI)
Show3DWidgets(proxy=clip8.ClipType)
# show data in view
clip8Display = Show(clip8, renderView1)
# trace defaults for the display properties.
clip8Display.Representation = 'Surface'
clip8Display.ColorArrayName = ['POINTS', 'WSSG']
clip8Display.LookupTable = wSSGLUT
clip8Display.OSPRayScaleArray = 'WSSG'
clip8Display.OSPRayScaleFunction = 'PiecewiseFunction'
clip8Display.SelectOrientationVectors = 'velocity'
clip8Display.ScaleFactor = 0.0012495411559939385
clip8Display.SelectScaleArray = 'WSSG'
clip8Display.GlyphType = 'Arrow'
clip8Display.GlyphTableIndexArray = 'WSSG'
clip8Display.DataAxesGrid = 'GridAxesRepresentation'
clip8Display.PolarAxes = 'PolarAxesRepresentation'
clip8Display.ScalarOpacityFunction = wSSGPWF
clip8Display.ScalarOpacityUnitDistance = 0.0004945866681605259
clip8Display.GaussianRadius = 0.0006247705779969693
clip8Display.SetScaleArray = ['POINTS', 'WSSG']
clip8Display.ScaleTransferFunction = 'PiecewiseFunction'
clip8Display.OpacityArray = ['POINTS', 'WSSG']
clip8Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
clip8Display.SetScalarBarVisibility(renderView1, False)
# hide data in view
Hide(clip8, renderView1)
# create a new 'Clip'
clip9 = Clip(Input=clip8)
clip9.ClipType = 'Plane'
clip9.Scalars = ['POINTS', 'WSSG']
clip9.Value = 12858.09077513687
# init the 'Plane' selected for 'ClipType'
clip9.ClipType.Origin = clip4.ClipType.Origin
clip9.ClipType.Normal = clip4.ClipType.Normal
# toggle 3D widget visibility (only when running from the GUI)
Show3DWidgets(proxy=clip9.ClipType)
# show data in view
clip9Display = Show(clip9, renderView1)
# trace defaults for the display properties.
clip9Display.Representation = 'Surface'
clip9Display.ColorArrayName = ['POINTS', 'WSSG']
clip9Display.LookupTable = wSSGLUT
clip9Display.OSPRayScaleArray = 'WSSG'
clip9Display.OSPRayScaleFunction = 'PiecewiseFunction'
clip9Display.SelectOrientationVectors = 'velocity'
clip9Display.ScaleFactor = 0.0007063730619847775
clip9Display.SelectScaleArray = 'WSSG'
clip9Display.GlyphType = 'Arrow'
clip9Display.GlyphTableIndexArray = 'WSSG'
clip9Display.DataAxesGrid = 'GridAxesRepresentation'
clip9Display.PolarAxes = 'PolarAxesRepresentation'
clip9Display.ScalarOpacityFunction = wSSGPWF
clip9Display.ScalarOpacityUnitDistance = 0.00037060974906745676
clip9Display.GaussianRadius = 0.00035318653099238875
clip9Display.SetScaleArray = ['POINTS', 'WSSG']
clip9Display.ScaleTransferFunction = 'PiecewiseFunction'
clip9Display.OpacityArray = ['POINTS', 'WSSG']
clip9Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
clip9Display.SetScalarBarVisibility(renderView1, False)
# hide data in view
Hide(clip9, renderView1)
# create a new 'Clip'
clip10 = Clip(Input=clip9)
clip10.ClipType = 'Cylinder'
clip10.Scalars = ['POINTS', 'WSSG']
clip10.Value = clip5.Value
# init the 'Plane' selected for 'ClipType'
clip10.ClipType.Center = clip5.ClipType.Center
clip10.ClipType.Axis = clip5.ClipType.Axis
clip10.ClipType.Radius = clip5.ClipType.Radius
# toggle 3D widget visibility (only when running from the GUI)
Show3DWidgets(proxy=clip10.ClipType)
# show data in view
clip10Display = Show(clip10, renderView1)
# trace defaults for the display properties.
clip10Display.Representation = 'Surface'
clip10Display.ColorArrayName = ['POINTS', 'WSSG']
clip10Display.LookupTable = wSSGLUT
clip10Display.OSPRayScaleArray = 'WSSG'
clip10Display.OSPRayScaleFunction = 'PiecewiseFunction'
clip10Display.SelectOrientationVectors = 'velocity'
clip10Display.ScaleFactor = 0.0005240732803940773
clip10Display.SelectScaleArray = 'WSSG'
clip10Display.GlyphType = 'Arrow'
clip10Display.GlyphTableIndexArray = 'WSSG'
clip10Display.DataAxesGrid = 'GridAxesRepresentation'
clip10Display.PolarAxes = 'PolarAxesRepresentation'
clip10Display.ScalarOpacityFunction = wSSGPWF
clip10Display.ScalarOpacityUnitDistance = 0.0003557483835107667
clip10Display.GaussianRadius = 0.00026203664019703866
clip10Display.SetScaleArray = ['POINTS', 'WSSG']
clip10Display.ScaleTransferFunction = 'PiecewiseFunction'
clip10Display.OpacityArray = ['POINTS', 'WSSG']
clip10Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
clip10Display.SetScalarBarVisibility(renderView1, False)
# Rescale transfer function
oSILUT.RescaleTransferFunction(0.0, 0.5)
# Rescale transfer function
oSIPWF.RescaleTransferFunction(0.0, 0.5)
# hide data in view
Hide(clip10, renderView1)
# create a new 'Slice'
slice2 = Slice(Input=clip10)
slice2.SliceType = 'Plane'
slice2.SliceOffsetValues = [0.0]
# init the 'Plane' selected for 'SliceType'
slice2.SliceType.Origin = slice1.SliceType.Origin
slice2.SliceType.Normal = slice1.SliceType.Normal
# toggle 3D widget visibility (only when running from the GUI)
Show3DWidgets(proxy=slice2.SliceType)
# show data in view
slice2Display = Show(slice2, renderView1)
# trace defaults for the display properties.
slice2Display.Representation = 'Surface'
slice2Display.ColorArrayName = ['POINTS', 'WSSG']
slice2Display.LookupTable = wSSGLUT
slice2Display.OSPRayScaleArray = 'WSSG'
slice2Display.OSPRayScaleFunction = 'PiecewiseFunction'
slice2Display.SelectOrientationVectors = 'velocity'
slice2Display.ScaleFactor = 0.00044556772336363794
slice2Display.SelectScaleArray = 'WSSG'
slice2Display.GlyphType = 'Arrow'
slice2Display.GlyphTableIndexArray = 'WSSG'
slice2Display.DataAxesGrid = 'GridAxesRepresentation'
slice2Display.PolarAxes = 'PolarAxesRepresentation'
slice2Display.GaussianRadius = 0.00022278386168181897
slice2Display.SetScaleArray = ['POINTS', 'WSSG']
slice2Display.ScaleTransferFunction = 'PiecewiseFunction'
slice2Display.OpacityArray = ['POINTS', 'WSSG']
slice2Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
slice2Display.SetScalarBarVisibility(renderView1, False)
# toggle 3D widget visibility (only when running from the GUI)
Hide3DWidgets(proxy=slice2.SliceType)
# hide data in view
Hide(slice2, renderView1)
# create a new 'Tube'
tube2 = Tube(Input=slice2)
tube2.Scalars = ['POINTS', 'WSSG']
tube2.Vectors = ['POINTS', 'velocity']
tube2.Radius = 4.4556772336363794e-05
# show data in view
tube2Display = Show(tube2, renderView1)
# trace defaults for the display properties.
tube2Display.Representation = 'Surface'
tube2Display.ColorArrayName = ['POINTS', 'WSSG']
tube2Display.LookupTable = wSSGLUT
tube2Display.OSPRayScaleArray = 'WSSG'
tube2Display.OSPRayScaleFunction = 'PiecewiseFunction'
tube2Display.SelectOrientationVectors = 'velocity'
tube2Display.ScaleFactor = 0.0004555660299956799
tube2Display.SelectScaleArray = 'WSSG'
tube2Display.GlyphType = 'Arrow'
tube2Display.GlyphTableIndexArray = 'WSSG'
tube2Display.DataAxesGrid = 'GridAxesRepresentation'
tube2Display.PolarAxes = 'PolarAxesRepresentation'
tube2Display.GaussianRadius = 0.00022778301499783995
tube2Display.SetScaleArray = ['POINTS', 'WSSG']
tube2Display.ScaleTransferFunction = 'PiecewiseFunction'
tube2Display.OpacityArray = ['POINTS', 'WSSG']
tube2Display.OpacityTransferFunction = 'PiecewiseFunction'
# hide color bar/color legend
tube2Display.SetScalarBarVisibility(renderView1, False)
# set active source
SetActiveSource(slice2)
# reset view to fit data
renderView1.ResetCamera()
# set active source
SetActiveSource(xMLUnstructuredGridReader1)
# show data in view
xMLUnstructuredGridReader1Display = Show(xMLUnstructuredGridReader1, renderView1)
# show color bar/color legend
xMLUnstructuredGridReader1Display.SetScalarBarVisibility(renderView1, True)
# reset view to fit data
renderView1.ResetCamera()
ReloadFiles(xMLUnstructuredGridReader1)
# hide data in view
Hide(xMLUnstructuredGridReader1, renderView1)
#### saving camera placements for all active views
# current camera placement for renderView1
renderView1.CameraPosition = [-0.050332399782713755, 0.031944983002028124, 0.005757153247374316]
renderView1.CameraFocalPoint = [0.020720326341688633, 0.010470669716596603, 0.010644147405400872]
renderView1.CameraViewUp = [0.004949270345919701, 0.2374388307550575, 0.9713898838122179]
renderView1.CameraParallelScale = 0.0030018986837840496
#### uncomment the following to render all views
# RenderAllViews()
# alternatively, if you want to write images, you can use SaveScreenshot(...).
|
kayarre/Tools
|
vtk/paraview_case4_2.py
|
Python
|
bsd-2-clause
| 30,397
|
[
"ParaView"
] |
a5ee0baff2c1046596623d02cac06973464722fb1f4a9462ef4f6b7b255b2c40
|
#!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_scheduler
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Scheduler Avi RESTful Object
description:
- This module is used to configure Scheduler object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
backup_config_ref:
description:
- Backup configuration to be executed by this scheduler.
- It is a reference to an object of type backupconfiguration.
enabled:
description:
- Boolean flag to set enabled.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
end_date_time:
description:
- Scheduler end date and time.
frequency:
description:
- Frequency at which custom scheduler will run.
- Allowed values are 0-60.
frequency_unit:
description:
- Unit at which custom scheduler will run.
- Enum options - SCHEDULER_FREQUENCY_UNIT_MIN, SCHEDULER_FREQUENCY_UNIT_HOUR, SCHEDULER_FREQUENCY_UNIT_DAY, SCHEDULER_FREQUENCY_UNIT_WEEK,
- SCHEDULER_FREQUENCY_UNIT_MONTH.
name:
description:
- Name of scheduler.
required: true
run_mode:
description:
- Scheduler run mode.
- Enum options - RUN_MODE_PERIODIC, RUN_MODE_AT, RUN_MODE_NOW.
run_script_ref:
description:
- Control script to be executed by this scheduler.
- It is a reference to an object of type alertscriptconfig.
scheduler_action:
description:
- Define scheduler action.
- Enum options - SCHEDULER_ACTION_RUN_A_SCRIPT, SCHEDULER_ACTION_BACKUP.
- Default value when not specified in API or module is interpreted by Avi Controller as SCHEDULER_ACTION_BACKUP.
start_date_time:
description:
- Scheduler start date and time.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Scheduler object
avi_scheduler:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_scheduler
"""
RETURN = '''
obj:
description: Scheduler (api/scheduler) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
backup_config_ref=dict(type='str',),
enabled=dict(type='bool',),
end_date_time=dict(type='str',),
frequency=dict(type='int',),
frequency_unit=dict(type='str',),
name=dict(type='str', required=True),
run_mode=dict(type='str',),
run_script_ref=dict(type='str',),
scheduler_action=dict(type='str',),
start_date_time=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'scheduler',
set([]))
if __name__ == '__main__':
main()
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/network/avi/avi_scheduler.py
|
Python
|
bsd-3-clause
| 5,098
|
[
"VisIt"
] |
8658e992a6acccedc8e347e11e08fa1204a5ddde766878be5a83f6a9b64959c0
|
import os
from ase.test import NotAvailable
from ase.lattice import bulk
from ase.calculators.calculator import kpts2mp
from ase.calculators.elk import ELK
atoms = bulk('Al', 'bcc', a=4.0)
# save ELK_SPECIES_PATH
ELK_SPECIES_PATH = os.environ.get('ELK_SPECIES_PATH', None)
if ELK_SPECIES_PATH is None:
raise NotAvailable('ELK_SPECIES_PATH not set.')
# find rmt of the default species
sfile = os.path.join(os.environ['ELK_SPECIES_PATH'], 'elk.in')
assert os.path.exists(sfile)
slines = open(sfile, 'r').readlines()
rmt_orig = {}
for name in ['Al']:
found = False
for n, line in enumerate(slines):
if line.find("'" + name + "'") > -1:
begline = n - 1
for n, line in enumerate(slines[begline:]):
if not line.strip(): # first empty line
endline = n
found = True
break
assert found
# split needed because H is defined with comments
rmt_orig[name] = float(slines[begline + 3].split()[0].strip())
assert rmt_orig['Al'] == 2.2 # 2.2 Bohr default
# test1
# generate species with custom rmt 2.1
rmt = {'Al': 2.1}
label = 'rmt2.1'
atomsrmt = atoms.copy()
os.environ['ELK_SPECIES_PATH'] = ELK_SPECIES_PATH
atomsrmt.calc = ELK(tasks=0, label=label, rmt=rmt) # minimal calc
atomsrmt.get_potential_energy()
del atomsrmt.calc
del atomsrmt
# hack ELK_SPECIES_PATH to use custom species
os.environ['ELK_SPECIES_PATH'] = os.path.abspath(label) + '/'
# run calculation
calc = ELK(tasks=0, label=label,
rgkmax=4.0, kpts=tuple(kpts2mp(atoms, 2.0, even=True)))
atoms.set_calculator(calc)
e1 = atoms.get_potential_energy()
# test2
# generate species with custom rmt 2.1
rmt = {'Al': -0.1}
label = 'rmt0.1m'
atomsrmt = atoms.copy()
os.environ['ELK_SPECIES_PATH'] = ELK_SPECIES_PATH
atomsrmt.calc = ELK(tasks=0, label=label, rmt=rmt) # minimal calc
atomsrmt.get_potential_energy()
del atomsrmt.calc
del atomsrmt
# hack ELK_SPECIES_PATH to use custom species
os.environ['ELK_SPECIES_PATH'] = os.path.abspath(label) + '/'
# run calculation
calc = ELK(tasks=0, label=label,
rgkmax=4.0, kpts=tuple(kpts2mp(atoms, 2.0, even=True)))
atoms.set_calculator(calc)
e2 = atoms.get_potential_energy()
# restore ELK_SPECIES_PATH
os.environ['ELK_SPECIES_PATH'] = ELK_SPECIES_PATH
assert abs(e1 - e2) < 1.0e-4
|
askhl/ase
|
ase/test/elk/Al_rmt.py
|
Python
|
gpl-2.0
| 2,300
|
[
"ASE",
"Elk"
] |
b00529d423dc59d7aca5514bea0292f89968c07501312eb3b51f675747337beb
|
"""
This module contains code for generating toy examples
"""
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Soeren Sonnenburg
import sys
import parse
import random
from numpy.random import randn
from numpy import ones, concatenate, array, transpose
from esvm.mldata import DatasetFileFASTA, init_datasetfile
from esvm.mldata_arff import DatasetFileARFF
class MotifDataDef(object):
motif = ''
numseq = 0
seqlenmin = 0
seqlenmax = 0
posstart = 0
posend = 0
mutrate = 0.0
################################################################################
# data generation functions
def motifgen(motif, numseq, seqlenmin, seqlenmax, posstart, posend, mutrate):
"""Generate sequences with a particular motif at a particular location.
Also allow a possible mutation rate of the motif.
"""
metadata = 'motifgen(%s,%d,%d,%d,%d,%d,%1.2f)' % (motif, numseq, seqlenmin, seqlenmax, posstart, posend, mutrate)
acgt='acgt'
seqlist = []
for i in xrange(0,numseq):
str=[] ;
seqlen=random.randint(seqlenmin,seqlenmax) ;
for l in xrange(0,seqlen):
str.append(acgt[random.randint(0,3)])
pos=random.randint(posstart,posend) ;
for l in xrange(0,len(motif)):
if (random.random()>=mutrate) and (pos+l<seqlen) and (pos+l>=0):
str[pos+l]=motif[l]
seqlist.append(''.join(str).upper())
return metadata, seqlist
def cloudgen(numpoint, numfeat, fracpos, width):
"""Generate two Gaussian point clouds, centered around one and minus one."""
numpos = int(round(fracpos*numpoint))
numneg = numpoint - numpos
metadata = 'cloudgen(%d,%d,%d,%3.2f)' % (numpos, numneg, numfeat, width)
datapos = ones((numfeat, numpos)) + width*randn(numfeat, numpos)
dataneg = -ones((numfeat, numneg)) + width*randn(numfeat, numneg)
pointcloud = concatenate((datapos,dataneg),axis=1)
labels = concatenate((ones(numpos),-ones(numneg)))
return metadata, pointcloud, labels
################################################################################
# ARFF functions
def arffwrite_real(filename, numpoint, numfeat, fracpos=0.5, width=1.0):
"""Write an ARFF file containing a vectorial dataset"""
#import arff
(metadata, pointcloud, labels) = cloudgen(numpoint, numfeat, fracpos, width)
fp = init_datasetfile(filename,'vec')
fp.comment = metadata
fp.dataname = 'pointcloud'
fp.writelines(pointcloud,labels)
def arffwrite_sequence(filename,p, n):
"""Write an ARFF file containing a sequence dataset"""
#import arff
(metadatapos,seqlistpos) = motifgen(p.motif, p.numseq, p.seqlenmin, p.seqlenmax, p.posstart, p.posend, p.mutrate)
(metadataneg,seqlistneg) = motifgen(n.motif, n.numseq, n.seqlenmin, n.seqlenmax, n.posstart, n.posend, n.mutrate)
labels = concatenate((ones(len(seqlistpos)),-ones(len(seqlistneg))))
seqlist = seqlistpos + seqlistneg
fp = init_datasetfile(filename,'seq')
fp.comment = metadatapos+' '+metadataneg
fp.dataname = 'motif'
fp.writelines(seqlist,labels)
def arffread(kernelname,datafilename):
"""Decide based on kernelname whether to read a sequence or vectorial file"""
if kernelname == 'gauss' or kernelname == 'linear' or kernelname == 'poly' or kernelname == None:
fp = init_datasetfile(datafilename,'vec')
elif kernelname == 'wd' or kernelname == 'localalign' or kernelname == 'localimprove'\
or kernelname == 'spec' or kernelname == 'cumspec':
fp = init_datasetfile(datafilename,'seq')
elif kernelname == 'spec2' or kernelname == 'cumspec2':
fp = init_datasetfile(datafilename,'mseq')
else:
print 'Unknown kernel in arffread'
return fp.readlines()
################################################################################
# fasta functions
def fastawrite_sequence(filename,p):
"""Write a FASTA file containing a sequence dataset"""
import arff
(metadata,seqlist) = motifgen(p.motif, p.numseq, p.seqlenmin, p.seqlenmax, p.posstart, p.posend, p.mutrate)
labels = ones(len(seqlist))
fp = init_datasetfile(filename,'seq')
fp.writelines(seqlist,labels)
def fastaread(fnamepos,fnameneg=None):
"""Read two fasta files, the first positive, the second negative"""
fpos = init_datasetfile(fnamepos,'seq')
(fa1,lab1) = fpos.readlines()
if fnameneg is not None:
fneg = init_datasetfile(fnameneg,'seq')
(fa2,lab2) = fneg.readlines()
print 'positive: %d, negative %d' % (len(fa1),len(fa2))
all_labels = concatenate((ones(len(fa1)),-ones(len(fa2))))
all_examples = fa1 + fa2
else:
all_examples = fa1
all_labels = ones(len(fa1))
return all_examples, all_labels
|
besser82/shogun
|
applications/easysvm/esvm/datafuncs.py
|
Python
|
bsd-3-clause
| 4,836
|
[
"Gaussian"
] |
99ad70e3863dfb6e73774ce2d6e679d5178ce4b91d73a9f511179e2595b2d26b
|
"""
Author: Ang Ming Liang
Please run the following command before running the script
wget -q https://raw.githubusercontent.com/sayantanauddy/vae_lightning/main/data.py
or curl https://raw.githubusercontent.com/sayantanauddy/vae_lightning/main/data.py > data.py
Then, make sure to get your kaggle.json from kaggle.com then run
mkdir /root/.kaggle
cp kaggle.json /root/.kaggle/kaggle.json
chmod 600 /root/.kaggle/kaggle.json
rm kaggle.json
to copy kaggle.json into a folder first
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from pytorch_lightning import LightningModule, Trainer
from data import CelebADataModule
from argparse import ArgumentParser
IMAGE_SIZE = 64
CROP = 128
DATA_PATH = "kaggle"
trans = []
trans.append(transforms.RandomHorizontalFlip())
if CROP > 0:
trans.append(transforms.CenterCrop(CROP))
trans.append(transforms.Resize(IMAGE_SIZE))
trans.append(transforms.ToTensor())
transform = transforms.Compose(trans)
class VAE(LightningModule):
"""
Standard VAE with Gaussian Prior and approx posterior.
"""
def __init__(
self,
input_height: int,
hidden_dims = None,
in_channels = 3,
enc_out_dim: int = 512,
kl_coeff: float = 0.1,
latent_dim: int = 256,
lr: float = 1e-4
):
"""
Args:
input_height: height of the images
enc_type: option between resnet18 or resnet50
first_conv: use standard kernel_size 7, stride 2 at start or
replace it with kernel_size 3, stride 1 conv
maxpool1: use standard maxpool to reduce spatial dim of feat by a factor of 2
enc_out_dim: set according to the out_channel count of
encoder used (512 for resnet18, 2048 for resnet50)
kl_coeff: coefficient for kl term of the loss
latent_dim: dim of latent space
lr: learning rate for Adam
"""
super(VAE, self).__init__()
self.save_hyperparameters()
self.lr = lr
self.kl_coeff = kl_coeff
self.enc_out_dim = enc_out_dim
self.latent_dim = latent_dim
self.input_height = input_height
modules = []
if hidden_dims is None:
hidden_dims = [32, 64, 128, 256, 512]
# Build Encoder
for h_dim in hidden_dims:
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels=h_dim,
kernel_size= 3, stride= 2, padding = 1),
nn.BatchNorm2d(h_dim),
nn.LeakyReLU())
)
in_channels = h_dim
self.encoder = nn.Sequential(*modules)
self.fc_mu = nn.Linear(hidden_dims[-1]*4, latent_dim)
self.fc_var = nn.Linear(hidden_dims[-1]*4, latent_dim)
# Build Decoder
modules = []
self.decoder_input = nn.Linear(latent_dim, hidden_dims[-1] * 4)
hidden_dims.reverse()
for i in range(len(hidden_dims) - 1):
modules.append(
nn.Sequential(
nn.ConvTranspose2d(hidden_dims[i],
hidden_dims[i + 1],
kernel_size=3,
stride = 2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[i + 1]),
nn.LeakyReLU())
)
self.decoder = nn.Sequential(*modules)
self.final_layer = nn.Sequential(
nn.ConvTranspose2d(hidden_dims[-1],
hidden_dims[-1],
kernel_size=3,
stride=2,
padding=1,
output_padding=1),
nn.BatchNorm2d(hidden_dims[-1]),
nn.LeakyReLU(),
nn.Conv2d(hidden_dims[-1], out_channels= 3,
kernel_size= 3, padding= 1),
nn.Sigmoid())
@staticmethod
def pretrained_weights_available():
return list(VAE.pretrained_urls.keys())
def from_pretrained(self, checkpoint_name):
if checkpoint_name not in VAE.pretrained_urls:
raise KeyError(str(checkpoint_name) + ' not present in pretrained weights.')
return self.load_from_checkpoint(VAE.pretrained_urls[checkpoint_name], strict=False)
def forward(self, x):
mu, log_var = self.encode(x)
p, q, z = self.sample(mu, log_var)
return self.decode(z)
def encode(self, x):
x = self.encoder(x)
x = torch.flatten(x, start_dim=1)
mu = self.fc_mu(x)
log_var = self.fc_var(x)
return mu, log_var
def _run_step(self, x):
mu, log_var = self.encode(x)
p, q, z = self.sample(mu, log_var)
return z, self.decode(z), p, q
def decode(self, z):
result = self.decoder_input(z)
result = result.view(-1, 512, 2, 2)
result = self.decoder(result)
result = self.final_layer(result)
return result
def sample(self, mu, log_var):
std = torch.exp(log_var / 2)
p = torch.distributions.Normal(torch.zeros_like(mu), torch.ones_like(std))
q = torch.distributions.Normal(mu, std)
z = q.rsample()
return p, q, z
def step(self, batch, batch_idx):
x, y = batch
z, x_hat, p, q = self._run_step(x)
recon_loss = F.mse_loss(x_hat, x, reduction='mean')
log_qz = q.log_prob(z)
log_pz = p.log_prob(z)
kl = log_qz - log_pz
kl = kl.mean()
kl *= self.kl_coeff
loss = kl + recon_loss
logs = {
"recon_loss": recon_loss,
"kl": kl,
"loss": loss,
}
return loss, logs
def training_step(self, batch, batch_idx):
loss, logs = self.step(batch, batch_idx)
self.log_dict({f"train_{k}": v for k, v in logs.items()}, on_step=True, on_epoch=False)
return loss
def validation_step(self, batch, batch_idx):
loss, logs = self.step(batch, batch_idx)
self.log_dict({f"val_{k}": v for k, v in logs.items()})
return loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.lr)
if __name__ == "__main__":
parser = ArgumentParser(description='Hyperparameters for our experiments')
parser.add_argument('--latent-dim', type=int, default=256, help="size of latent dim for our vae")
parser.add_argument('--epochs', type=int, default=50, help="num epochs")
parser.add_argument('--gpus', type=int, default=1, help="gpus, if no gpu set to 0, to run on all gpus set to -1")
parser.add_argument('--bs', type=int, default=500, help="batch size")
parser.add_argument('--kl-coeff', type=int, default=5, help="kl coeff aka beta term in the elbo loss function")
parser.add_argument('--lr', type=int, default=0.01, help="learning rate")
hparams = parser.parse_args()
m = VAE(input_height=IMAGE_SIZE, latent_dim=hparams.latent_dim, kl_coeff=hparams.kl_coeff, lr=hparams.lr)
runner = Trainer(gpus = hparams.gpus,
max_epochs = hparams.epochs)
dm = CelebADataModule(data_dir=DATA_PATH,
target_type='attr',
train_transform=transform,
val_transform=transform,
download=True,
batch_size=hparams.bs)
runner.fit(m, datamodule=dm)
torch.save(m.state_dict(), "vae-celeba-conv.ckpt")
|
probml/pyprobml
|
vae/standalone/vae_celeba_lightning.py
|
Python
|
mit
| 7,878
|
[
"Gaussian"
] |
2c5634aa58705e0a2db08f94108adc35fae311dee26ed3b711930ed0850a570a
|
import time
import random
def cls():
for i in range(200):
print("")
def cool():
wn = random.randrange(0,27)
if wn == 1:
return "█ Now under New Management! █" #Avg line to line distance: 56
elif wn == 2:
return "█ Kappa! █" #Unfortunately, We're using prng to generate these "Random" numbers. Seems like we get the same number a couple times.
elif wn == 3:
return "█ Now with more sayings! █"
elif wn == 4:
return "█ Plot twist! █"
elif wn == 5:
return "█ Totally not a virus, Trust me i'm a dolphin! █"
elif wn == 6:
return "█ Now On GitHub! █"
elif wn == 7:
return "█ Unsupported! █"
elif wn == 8:
return "█ 01110111 01101111 01110111 00100001 █"
elif wn == 9:
return "█ >Install Gentoo! █"
elif wn == 10:
return "█ Welcome to the botnet! █"
elif wn == 11:
return "█ Now with Blast Processing! █"
elif wn == 12:
return "█ Your version of Flash Player is out of date! █"
elif wn == 13:
return "█ Uses UTF-8 Encoding! █"
elif wn == 14:
return "█ No Easter Eggs! █"
elif wn == 15:
return "█Pleasehelpmeivebeentrappedwritingwittylinesforthepastmon█"
elif wn == 16:
return "█ Ignore that! █"
elif wn == 17:
return "█ Made With GameMaker! █"
elif wn == 18:
return "█ I'm really hoping that SOEvm will be finished soon █"
elif wn == 19:
return "█ EOF inside string starting at line 891743 █"
elif wn == 20:
return "█ Everything probably still works! █"
elif wn == 21:
return "█Symmet---------------------|----------------------rical!█"
elif wn == 22:
return "█ IT'S SNOWING OUTSIDE GUYS! █"
elif wn == 23:
return "█ a day old meme █"
elif wn == 24:
return "█ Oh, FiddleSticks! █"
elif wn == 25:
return "█ @echo off █"
elif wn == 26:
return "█ Coded in Batch! █"
else:
return "█ Unimplemented! █"
def load():
cls()
print("▄████████████████████████████████████████████████████████▄")
print("█▀ ▀█")
print("█ PySOE DEV █")
print("█ (Python Simulated Operating System Enviroment) █")
print("█ v0.03a █")
print("█ Updated on 12/11/16 █")
print(cool())
print("█▄ ▄█")
print("▀████████████████████████████████████████████████████████▀")
for i in range(7):
print("")
print("Type '-help' to get started.")
print("")
funct()
def funct():
func = input()
if func == "-listcommands" or func == "-help":
print("-listcommands: Lists Commands")
print("-help: Lists Commands")
print("-time: Shows System Time")
print("-ver or -version: Shows OS version")
print("-print or -pr: Prints to display")
print("-prog or -programs: Lists all available installed programs")
print("-ext or -extras: Extra side commands")
print("-inst or -install: Installs a new program into your program list.")
print("")
funct()
elif func == "-time":
print(time.asctime())
print("")
funct()
elif func == "-inst" or func == "-install":
print("")
print("Welcome to the Program Installer.")
print("")
print("Please make sure you have the program you want to install in the same directory as pysoe_shell.py")
print("")
print("What is the name of the program you will install? (Case Sensitive!) or type 'Cancel to cancel.")
print("")
inst = input("-inst: ")
if inst == "cancel" or inst == "Cancel":
print()
funct()
else:
f = open("progs.txt","a",encoding='utf-7')
f.write("\n")
f.write(inst)
f.flush()
print("")
print("Completed Installation Successfully!")
print("")
funct()
elif func == "-ver" or func == "-version":
print("PySOE v0.03a Created on 12/11/16")
print("")
funct()
elif func == "-print" or func == "-pr":
print1 = input("-pr: ")
print("")
print(print1)
print("")
funct()
elif func == "-prog" or func == "-programs":
print("")
print("Your installed programs are:")
f = open("progs.txt")
print(f.read())
print("")
print("Run a program? Y/N")
print("")
progprompt = input(">")
if progprompt == "y" or progprompt == "Y":
print("")
print("Input a program name (Case Sensitive) or type 'Cancel' to cancel.")
prog = input(">")
if prog == "cancel" or prog == "Cancel":
funct()
else:
print("")
print("Now Loading",prog)
time.sleep(2)
__import__(prog)
elif progprompt == "n" or progprompt == "N":
print("")
funct()
else:
print("Unsupported command!")
funct()
elif func == "-ext" or func == "-extras":
print("")
print("Extra commands are:")
print("-beep or -boop: Beep")
print("-his or -history: History of PYOS")
print("")
funct()
elif func == "-beep" or func == "-boop":
import winsound
print("")
Freq = input("Enter Frequency in Hertz: ")
Dur = input("Enter Length in Milliseconds: ")
winsound.Beep(int(Freq),int(Dur))
print("")
print("done")
print("")
funct()
elif func == "-his" or func == "-history":
print("")
print(" The History of PySOE (Python Simulated Operating System Environment)")
print("")
print("-------------------------------------------------------------------------------------------")
print(" The first beginnings of PySOE started somewhere around October of 2015")
print(" The first version didn't have much, just a fake login screen and")
print(" a fake loading screen. There were only three commands. These commands inclue")
print(" -help, -commands, and -ver. Those were all carried over. This program was")
print(" started in my High School's Computer Science club while I was bored")
print(" This 'OS' is heavily inspired by DOS, PC-DOS, and MS-DOS which is very")
print(" evident by my functions that I have made. Most of them come from various")
print(" DOS'. The first program written for PySOE was 'PYGraph' by Carson Goodwin")
print(" which was not originally written for PySOE, but was adapted after v0.02c was")
print(" published to GitHub. This is the most important parts of the whole history of PySOE.")
print(" Hopefully many changes will come to this, most likely repurposing this High School Project")
print("")
print("-------------------------------------------------------------------------------------------")
funct()
else:
print("bad sytnax")
print("")
funct()
load()
|
LTEGaming/PySOE
|
Main-Dev/pysoe_shell.py
|
Python
|
bsd-3-clause
| 8,782
|
[
"BLAST"
] |
3cac434f080151afadd41a425395cddf7b5745341b00d0c80347e526e5f11fa5
|
#!/usr/bin/python2
""" run bowtie with specified parameter file
"""
import subprocess
from logging import getLogger
import os
import shlex
import argparse
import csv
from glob import glob
from multiprocessing.pool import ThreadPool
logger = getLogger('pijp.bowtie_wrapper')
def build_bowtie_command(fastq_file, index_file, number_of_threads, output_dir, extra_params):
base_fastq = os.path.splitext(os.path.basename(fastq_file))[0]
samfile = os.path.join(output_dir, base_fastq + ".sam")
## no-hd means no header lines.
## -p is for the number of rows.
bowtie_cmd = "bowtie2 -p {0} {1} -x {2} -U {3} -S {4} ".format(number_of_threads, extra_params, index_file, fastq_file, samfile)
return bowtie_cmd
def run_cmd((cmd,fastq_file)):
""" Run the command, and return the stderr.
"""
logger.info("ran : " + cmd)
pro = subprocess.Popen(cmd, shell=True, stderr = subprocess.PIPE)
(x, stderr) = pro.communicate()
assert (pro.returncode == 0 ), "bowtie error %d : %s" % (pro.returncode, stderr)
new_row = ( [fastq_file] + get_stats(stderr.splitlines()) )
logger.info("finished : " + cmd)
return new_row
def main(input_files, index_file, number_of_threads, output_dir, bowtie_report_name,extra_params,procs=10):
report = []
ht_col2 = []
base_names = []
cmds = []
for fastq_file in input_files:
base_names += [os.path.splitext(os.path.basename(fastq_file))[0]]
# we need base_names for heading the matrix file.
bt2_cmd = build_bowtie_command(fastq_file, index_file, number_of_threads, output_dir, extra_params)
cmds.append((bt2_cmd,fastq_file))
ht_col1 = "\n".join(base_names)
pool = ThreadPool(int(procs))
results = pool.map(run_cmd, cmds)
for res in results:
htout = "\t".join(res)
ht_col2.append(htout)
matrix_header = "\t".join(['#sample','total', 'not_aligned', 'aligned_once', 'multi_aligned', '% mapped']) + "\n"
matrix = "\n".join(ht_col2)
filename = os.path.join(output_dir, bowtie_report_name)
with open(filename, 'wb') as f:
f.write(matrix_header)
f.write(matrix)
def get_stats(bt_stderr):
## Write all warnings to log, and skip them.
# For millions of warnings this takes too long..
#while bt_stderr[0].startswith("Warning"):
# logger.info("BOWTIE : " + bt_stderr[0])
# bt_stderr.pop(0)
if len(bt_stderr) > 5:
logger.info("Bowtie probably had {0} warnings [e.g. read too small]".format(len(bt_stderr)-4))
bt_stderr = bt_stderr[-5:]
if int(bt_stderr[0].split()[0]) != 0 :
total = str(int(bt_stderr[0].split()[0]))
not_aligned = str(int(bt_stderr[1].split()[0]))
aligned = str(int(bt_stderr[2].split()[0]))
multi_aligned = str(int(bt_stderr[3].split()[0]))
mapped_percent = bt_stderr[4].split()[0]
return [total, not_aligned, aligned, multi_aligned, mapped_percent]
else:
return ['0','0','0','0','0']
|
yanailab/CEL-Seq-pipeline
|
bowtie_wrapper.py
|
Python
|
gpl-3.0
| 3,008
|
[
"Bowtie"
] |
6a849f9529c68686ea38660523fbabd6d0feb90a5b63935b473f4ebb011c29d3
|
# coding: utf-8
from __future__ import unicode_literals
import copy
import glob
import json
import os
from unittest import TestCase
import unittest
from pymatgen import Molecule
from pymatgen.io.qchemio import QcTask, QcInput, QcOutput
__author__ = 'xiaohuiqu'
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files', "molecules")
coords = [[0.000000, 0.000000, 0.000000],
[0.000000, 0.000000, 1.089000],
[1.026719, 0.000000, -0.363000],
[-0.513360, -0.889165, -0.363000],
[-0.513360, 0.889165, -0.363000]]
mol = Molecule(["C", "H", "H", "H", "Cl"], coords)
coords2 = [[0.0, 0.0, -2.4],
[0.0, 0.0, 0.0],
[0.0, 0.0, 2.4]]
heavy_mol = Molecule(["Br", "Cd", "Br"], coords2)
coords3 = [[2.632273, -0.313504, -0.750376],
[3.268182, -0.937310, -0.431464],
[2.184198, -0.753305, -1.469059]]
water_mol = Molecule(["O", "H", "H"], coords3)
class TestQcTask(TestCase):
def elementary_io_verify(self, text, qctask):
self.to_and_from_dict_verify(qctask)
self.from_string_verify(contents=text, ref_dict=qctask.as_dict())
def to_and_from_dict_verify(self, qctask):
"""
Helper function. This function should be called in each specific test.
"""
d1 = qctask.as_dict()
qc2 = QcTask.from_dict(d1)
d2 = qc2.as_dict()
self.assertEqual(d1, d2)
def from_string_verify(self, contents, ref_dict):
qctask = QcTask.from_string(contents)
d2 = qctask.as_dict()
self.assertEqual(ref_dict, d2)
def test_read_zmatrix(self):
contents = '''$moLEcule
1 2
S
C 1 1.726563
H 2 1.085845 1 119.580615
C 2 1.423404 1 114.230851 3 -180.000000 0
H 4 1.084884 2 122.286346 1 -180.000000 0
C 4 1.381259 2 112.717365 1 0.000000 0
H 6 1.084731 4 127.143779 2 -180.000000 0
C 6 1.415867 4 110.076147 2 0.000000 0
F 8 1.292591 6 124.884374 4 -180.000000 0
$end
$reM
BASIS = 6-31+G*
EXCHANGE = B3LYP
jobtype = freq
$end
'''
qctask = QcTask.from_string(contents)
ans = '''$molecule
1 2
S 0.00000000 0.00000000 0.00000000
C 0.00000000 0.00000000 1.72656300
H -0.94431813 0.00000000 2.26258784
C 1.29800105 -0.00000002 2.31074808
H 1.45002821 -0.00000002 3.38492732
C 2.30733813 -0.00000003 1.36781908
H 3.37622632 -0.00000005 1.55253338
C 1.75466906 -0.00000003 0.06427152
F 2.44231414 -0.00000004 -1.03023099
$end
$rem
jobtype = freq
exchange = b3lyp
basis = 6-31+g*
$end
'''
ans_tokens = ans.split('\n')
ans_text_part = ans_tokens[:2] + ans_tokens[11:]
ans_coords_part = ans_tokens[2:11]
converted_tokens = str(qctask).split('\n')
converted_text_part = converted_tokens[:2] + converted_tokens[11:]
converted_coords_part = converted_tokens[2:11]
self.assertEqual(ans_text_part, converted_text_part)
for ans_coords, converted_coords in zip(ans_coords_part,
converted_coords_part):
ans_coords_tokens = ans_coords.split()
converted_coords_tokens = converted_coords.split()
self.assertEqual(ans_coords_tokens[0], converted_coords_tokens[0])
xyz1 = ans_coords_tokens[1:]
xyz2 = converted_coords_tokens[1:]
for t1, t2 in zip(xyz1, xyz2):
self.assertTrue(abs(float(t1)-float(t2)) < 0.0001)
def test_no_mol(self):
ans = '''$comment
Test Methane
$end
$molecule
-1 2
read
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
$end
'''
qctask = QcTask(molecule="READ", title="Test Methane",
exchange="B3LYP", jobtype="SP", charge=-1,
spin_multiplicity=2,
basis_set="6-31+G*")
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_simple_basis_str(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_fragmented_molecule(self):
mol1 = copy.deepcopy(mol)
mol1.set_charge_and_spin(1, 2)
mol2 = copy.deepcopy(water_mol)
mol2.set_charge_and_spin(-1, 2)
qctask = QcTask([mol1, mol2], title="Test Fragments", exchange="B3LYP",
jobtype="bsse", charge=0, spin_multiplicity=3, basis_set="6-31++G**")
ans = """$comment
Test Fragments
$end
$molecule
0 3
--
1 2
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
--
-1 2
O 2.63227300 -0.31350400 -0.75037600
H 3.26818200 -0.93731000 -0.43146400
H 2.18419800 -0.75330500 -1.46905900
$end
$rem
jobtype = bsse
exchange = b3lyp
basis = 6-31++g**
$end
"""
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_mixed_basis_str(self):
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set=[("C", "6-311G*"), ("H", "6-31g(d,p)"), ("H", "6-31g(d,p)"),
("H", "6-31g*"), ("cl", "6-31+g*")])
ans_mixed = """$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = mixed
$end
$basis
C 1
6-311g*
****
H 2
6-31g(d,p)
****
H 3
6-31g(d,p)
****
H 4
6-31g*
****
Cl 5
6-31+g*
****
$end
"""
self.assertEqual(ans_mixed, str(qctask))
self.elementary_io_verify(ans_mixed, qctask)
qctask.set_basis_set("6-31+G*")
ans_simple = """$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
$end
"""
self.assertEqual(str(qctask), ans_simple)
qctask.set_basis_set([("C", "6-311G*"), ("H", "6-31g(d,p)"), ("H", "6-31g(d,p)"),
("H", "6-31g*"), ("cl", "6-31+g*")])
self.assertEqual(str(qctask), ans_mixed)
self.elementary_io_verify(ans_mixed, qctask)
def test_partial_hessian(self):
qcinp1 = QcInput.from_file(os.path.join(test_dir, "partial_hessian.qcinp"))
ans = """$molecule
0 1
C -1.76827000 0.46495000 0.28695000
O 1.78497000 -0.42034000 -0.39845000
H -0.77736000 0.78961000 0.66548000
H -1.75896000 0.46604000 -0.82239000
H -2.54983000 1.16313000 0.65101000
H -1.98693000 -0.55892000 0.65381000
H 2.14698000 -0.07173000 0.45530000
H 1.25596000 -1.21510000 -0.13726000
$end
$rem
jobtype = freq
exchange = b3lyp
basis = 6-31g*
n_sol = 3
phess = true
$end
$alist
3
7
8
$end
"""
self.assertEqual(ans, str(qcinp1))
self.elementary_io_verify(ans, qcinp1.jobs[0])
qcinp1.jobs[0].params["rem"]["jobtype"] = "sp"
qcinp1.jobs[0].params["rem"]["phess"] = 3
qcinp1.jobs[0].set_partial_hessian_atoms([2, 3, 4, 5, 6])
ans = """$molecule
0 1
C -1.76827000 0.46495000 0.28695000
O 1.78497000 -0.42034000 -0.39845000
H -0.77736000 0.78961000 0.66548000
H -1.75896000 0.46604000 -0.82239000
H -2.54983000 1.16313000 0.65101000
H -1.98693000 -0.55892000 0.65381000
H 2.14698000 -0.07173000 0.45530000
H 1.25596000 -1.21510000 -0.13726000
$end
$rem
jobtype = freq
exchange = b3lyp
basis = 6-31g*
n_sol = 5
phess = True
$end
$alist
2
3
4
5
6
$end
"""
self.assertEqual(ans, str(qcinp1))
def test_basis2_mixed(self):
qcinp1 = QcInput.from_file(os.path.join(test_dir, "basis2_mixed.inp"))
ans = """$molecule
0 1
C -1.76827000 0.46495000 0.28695000
O 1.78497000 -0.42034000 -0.39845000
H -0.77736000 0.78961000 0.66548000
H -1.75896000 0.46604000 -0.82239000
H -2.54983000 1.16313000 0.65101000
H -1.98693000 -0.55892000 0.65381000
H 2.14698000 -0.07173000 0.45530000
H 1.25596000 -1.21510000 -0.13726000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = mixed
basis2 = basis2_mixed
purecart = 1111
$end
$basis
C 1
6-311+g(3df)
****
O 2
aug-cc-pvtz
****
H 3
6-31g*
****
H 4
6-31g*
****
H 5
6-31g*
****
H 6
6-31g*
****
H 7
cc-pvdz
****
H 8
cc-pvdz
****
$end
$basis2
C 1
sto-3g
****
O 2
sto-3g
****
H 3
sto-3g
****
H 4
sto-3g
****
H 5
sto-3g
****
H 6
sto-3g
****
H 7
sto-3g
****
H 8
sto-3g
****
$end
"""
self.assertEqual(str(qcinp1), ans)
self.elementary_io_verify(ans, qcinp1.jobs[0])
basis2 = qcinp1.jobs[0].params["basis2"]
qcinp2 = copy.deepcopy(qcinp1)
qcinp2.jobs[0].set_basis2("3-21g")
self.assertEqual(qcinp2.jobs[0].params["rem"]["basis2"], "3-21g")
self.assertFalse("basis2" in qcinp2.jobs[0].params)
qcinp2.jobs[0].set_basis2(basis2)
self.assertEqual(str(qcinp2), ans)
def test_aux_basis_str(self):
ans_gen = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = freq
exchange = xygjos
basis = gen
aux_basis = gen
$end
$aux_basis
C
rimp2-cc-pvdz
****
Cl
rimp2-aug-cc-pvdz
****
H
rimp2-cc-pvdz
****
$end
$basis
C
6-31g*
****
Cl
6-31+g*
****
H
6-31g*
****
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="xygjos",
jobtype="Freq",
basis_set={"C": "6-31G*", "h": "6-31g*",
"CL": "6-31+g*"},
aux_basis_set={"c": "rimp2-cc-pvdz",
"H": "rimp2-cc-pvdz",
"Cl": "rimp2-aug-cc-pvdz"})
self.assertEqual(str(qctask), ans_gen)
self.elementary_io_verify(ans_gen, qctask)
qctask.set_auxiliary_basis_set([("C", "aug-cc-pvdz"), ("H", "cc-pvdz"), ("H", "cc-pvdz"),
("H", "cc-pvdz"), ("cl", "rimp2-aug-cc-pvdz")])
ans_mixed_aux = """$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = freq
exchange = xygjos
basis = gen
aux_basis = mixed
$end
$aux_basis
C 1
aug-cc-pvdz
****
H 2
cc-pvdz
****
H 3
cc-pvdz
****
H 4
cc-pvdz
****
Cl 5
rimp2-aug-cc-pvdz
****
$end
$basis
C
6-31g*
****
Cl
6-31+g*
****
H
6-31g*
****
$end
"""
self.assertEqual(ans_mixed_aux, str(qctask))
self.elementary_io_verify(ans_mixed_aux, qctask)
qctask.set_basis_set("6-31+G*")
qctask.set_auxiliary_basis_set("rimp2-cc-pvdz")
ans_simple = """$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = freq
exchange = xygjos
basis = 6-31+g*
aux_basis = rimp2-cc-pvdz
$end
"""
self.assertEqual(ans_simple, str(qctask))
self.elementary_io_verify(ans_simple, qctask)
qctask.set_basis_set({"C": "6-31G*", "h": "6-31g*",
"CL": "6-31+g*"})
qctask.set_auxiliary_basis_set([("C", "aug-cc-pvdz"), ("H", "cc-pvdz"), ("H", "cc-pvdz"),
("H", "cc-pvdz"), ("cl", "rimp2-aug-cc-pvdz")])
self.assertEqual(ans_mixed_aux, str(qctask))
self.elementary_io_verify(ans_mixed_aux, qctask)
def test_ecp_str(self):
ans = '''$comment
Test ECP
$end
$molecule
0 1
Br 0.00000000 0.00000000 -2.40000000
Cd 0.00000000 0.00000000 0.00000000
Br 0.00000000 0.00000000 2.40000000
$end
$rem
jobtype = opt
exchange = b3lyp
basis = gen
ecp = gen
$end
$basis
Br
srlc
****
Cd
srsc
****
$end
$ecp
Br
srlc
****
Cd
srsc
****
$end
'''
qctask = QcTask(heavy_mol, title="Test ECP", exchange="B3LYP",
jobtype="Opt",
basis_set={"Br": "srlc", "Cd": "srsc"},
ecp={"Br": "SrlC", "Cd": "srsc"})
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_set_memory(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
mem_static = 500
mem_total = 18000
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.set_memory(total=18000, static=500)
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_set_max_num_of_scratch_files(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
max_sub_file_num = 500
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.set_max_num_of_scratch_files(500)
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_set_max_scf_iterations(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
max_scf_cycles = 100
scf_algorithm = diis_gdm
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.set_scf_algorithm_and_iterations(algorithm="diis_gdm",
iterations=100)
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_set_scf_convergence_threshold(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
scf_convergence = 8
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.set_scf_convergence_threshold(exponent=8)
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_set_integral_threshold(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
thresh = 14
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.set_integral_threshold(thresh=14)
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_set_dft_grid(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
xc_grid = 000110000590
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.set_dft_grid(radical_points=110, angular_points=590)
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_set_scf_initial_guess(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
scf_guess = gwh
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.set_scf_initial_guess("GWH")
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_geom_opt_max_cycles(self):
ans = '''$comment
Test Methane
$end
$molecule
1 2
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
geom_opt_max_cycles = 100
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP", charge=1, spin_multiplicity=2,
basis_set="6-31+G*")
qctask.set_geom_max_iterations(100)
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_set_geom_opt_coords_type(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
geom_opt_coords = 0
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.set_geom_opt_coords_type("cartesian")
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_scale_geom_opt_threshold(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
geom_opt_tol_displacement = 120
geom_opt_tol_energy = 10
geom_opt_tol_gradient = 30
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.scale_geom_opt_threshold(gradient=0.1, displacement=0.1,
energy=0.1)
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_set_geom_opt_use_gdiis(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
geom_opt_max_diis = -1
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.set_geom_opt_use_gdiis()
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_disable_symmetry(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
sym_ignore = True
symmetry = False
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.disable_symmetry()
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_use_cosmo(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
solvent_dielectric = 35.0
solvent_method = cosmo
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.use_cosmo(dielectric_constant=35.0)
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_wrap_comment(self):
ans = '''$comment
5_2_2_methoxyethoxy_ethoxy_6_nitro_1_3_dihydro_2_1_3_benzothiadiazole
singlet neutral B3lYP/6-31+G* geometry optimization
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
$end
'''
qctask = QcTask(mol, title=" 5_2_2_methoxyethoxy_ethoxy_6_nitro_1_3_dihydro_2_1_3_benzothiadiazole singlet "
"neutral B3lYP/6-31+G* geometry optimization", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
title = ''' MgBPh42 singlet neutral PBE-D3/6-31+G* geometry optimization
<SCF Fix Strategy>{
"current_method_id": 1,
"methods": [
"increase_iter",
"diis_gdm",
"gwh",
"rca",
"gdm",
"core+gdm"
]
}</SCF Fix Strategy>'''
ans = '''$comment
MgBPh42 singlet neutral PBE-D3/6-31+G* geometry optimization
<SCF Fix Strategy>{
"current_method_id": 1,
"methods": [
"increase_iter",
"diis_gdm",
"gwh",
"rca",
"gdm",
"core+gdm"
]
}</SCF Fix Strategy>
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
$end
'''
qctask = QcTask(mol, title=title, exchange="B3LYP", jobtype="SP", basis_set="6-31+G*")
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
title = " 5_2_2_methoxyethoxy_ethoxy_6_nitro_1_3_dihydro_2_1_3_benzothiadiazole singlet neutral " \
"B3lYP/6-31+G* geometry optimization" + \
'''<SCF Fix Strategy>{
"current_method_id": 1,
"methods": [
"increase_iter",
"diis_gdm",
"gwh",
"rca",
"gdm",
"core+gdm"
]
}</SCF Fix Strategy>'''
qctask = QcTask(mol, title=title, exchange="B3LYP", jobtype="SP", basis_set="6-31+G*")
self.elementary_io_verify(str(qctask), qctask)
def test_use_pcm(self):
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
solvent_method = pcm
$end
$pcm
radii uff
theory ssvpe
vdwscale 1.1
$end
$pcm_solvent
dielectric 78.3553
$end
'''
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.use_pcm()
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
qctask = QcTask(mol, title="Test Methane", exchange="B3LYP",
jobtype="SP",
basis_set="6-31+G*")
qctask.use_pcm(pcm_params={"Radii": "FF",
"Theory": "CPCM",
"SASrad": 1.5,
"HPoints": 1202},
solvent_params={"Dielectric": 20.0,
"Temperature": 300.75,
"NSolventAtoms": 2,
"SolventAtom": [[8, 1, 186, 1.30],
[1, 2, 187, 1.01]]},
radii_force_field="OPLSAA")
ans = '''$comment
Test Methane
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
force_fied = oplsaa
solvent_method = pcm
$end
$pcm
hpoints 1202
radii bondi
sasrad 1.5
theory cpcm
vdwscale 1.1
$end
$pcm_solvent
dielectric 20.0
nsolventatoms 2
solventatom 8 1 186 1.30
solventatom 1 2 187 1.01
temperature 300.75
$end
'''
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
def test_ghost_atoms(self):
qctask = QcTask(mol, charge=0, spin_multiplicity=1, exchange="B3LYP", ghost_atoms=[2, 4])
ans = """$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
@H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
@Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-31+g*
$end
"""
self.assertEqual(str(qctask), ans)
self.elementary_io_verify(ans, qctask)
mol1 = copy.deepcopy(mol)
mol1.set_charge_and_spin(1, 2)
mol2 = copy.deepcopy(water_mol)
mol2.set_charge_and_spin(-1, 2)
qctask = QcTask([mol1, mol2], title="Test Fragments", exchange="B3LYP",
jobtype="bsse", charge=0, spin_multiplicity=3, basis_set="6-31++G**",
ghost_atoms=[1, 2, 3, 5])
self.elementary_io_verify(str(qctask), qctask)
qctask = QcTask(mol, charge=0, spin_multiplicity=2, exchange="B3LYP", ghost_atoms=[2])
self.assertEqual(qctask.spin_multiplicity, 2)
class TestQcInput(TestCase):
def test_str_and_from_string(self):
ans = '''$comment
Test Methane Opt
$end
$molecule
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.08900000
H 1.02671900 0.00000000 -0.36300000
H -0.51336000 -0.88916500 -0.36300000
Cl -0.51336000 0.88916500 -0.36300000
$end
$rem
jobtype = opt
exchange = b3lyp
basis = 6-31+g*
$end
@@@
$comment
Test Methane Frequency
$end
$molecule
read
$end
$rem
jobtype = freq
exchange = b3lyp
basis = 6-31+g*
$end
@@@
$comment
Test Methane Single Point Energy
$end
$molecule
read
$end
$rem
jobtype = sp
exchange = b3lyp
basis = 6-311+g(3df,2p)
$end
'''
qctask1 = QcTask(mol, title="Test Methane Opt", exchange="B3LYP",
jobtype="Opt", basis_set="6-31+G*")
qctask2 = QcTask(molecule="read", title="Test Methane Frequency",
exchange="B3LYP", jobtype="Freq", basis_set="6-31+G*")
qctask3 = QcTask(title="Test Methane Single Point Energy",
exchange="B3LYP", jobtype="SP",
basis_set="6-311+G(3df,2p)")
qcinp1 = QcInput(jobs=[qctask1, qctask2, qctask3])
self.assertEqual(str(qcinp1), ans)
qcinp2 = QcInput.from_string(ans)
self.assertEqual(qcinp1.as_dict(), qcinp2.as_dict())
qcinp_mgbf4 = QcInput.from_file(os.path.join(test_dir, "MgBF4_b_overalpped.qcinp"))
self.assertEqual(qcinp_mgbf4.jobs[0].ghost_atoms, [0])
def test_to_and_from_dict(self):
qctask1 = QcTask(mol, title="Test Methane Opt", exchange="B3LYP",
jobtype="Opt", basis_set="6-31+G*")
qctask2 = QcTask(molecule="read", title="Test Methane Frequency",
exchange="B3LYP", jobtype="Freq",
basis_set="6-31+G*")
qctask3 = QcTask(title="Test Methane Single Point Energy",
exchange="B3LYP", jobtype="SP",
basis_set="6-311+G(3df,2p)")
qcinp1 = QcInput(jobs=[qctask1, qctask2, qctask3])
d1 = qcinp1.as_dict()
qcinp2 = QcInput.from_dict(d1)
d2 = qcinp2.as_dict()
self.assertEqual(d1, d2)
class TestQcOutput(TestCase):
def test_energy(self):
ref_energies_text = '''
{
"hf-rimp2.qcout": {
"RIMP2": -2726.6860779805256,
"SCF": -2721.541435904716
},
"hf_b3lyp.qcout": {
"SCF": -2733.1747178920828
},
"hf_ccsd(t).qcout": {
"CCSD": -2726.7627121001865,
"CCSD(T)": -2726.8283514003333,
"MP2": -2726.685664155242,
"SCF": -2721.5414360843106
},
"hf_cosmo.qcout": {
"SCF": -2721.1752937496067
},
"hf_hf.qcout": {
"SCF": -2721.541435904716
},
"hf_lxygjos.qcout": {
"SCF": -2724.0769973875713,
"XYGJ-OS": -2726.3445157759393
},
"hf_mosmp2.qcout": {
"MOS-MP2": -2725.302538779482,
"SCF": -2721.541435904716
},
"hf_mp2.qcout": {
"MP2": -2726.685661962005,
"SCF": -2721.541435904716
},
"hf_pcm.qcout": {
"SCF": -2720.703940318968
},
"hf_qcisd(t).qcout": {
"QCISD": -2726.7853751012344,
"QCISD(T)": -2726.8346541282745,
"SCF": -2721.5414360843106
},
"hf_riccsd(t).qcout": {
"CCSD": -2726.7641790658904,
"CCSD(T)": -2726.829853468723,
"MP2": -2726.6860802173014,
"SCF": -2721.5414360843106
},
"hf_tpssh.qcout": {
"SCF": -2732.938974944255
},
"hf_xyg3.qcout": {
"SCF": -2728.769906036435,
"XYG3": -2731.0640917605806
},
"hf_xygjos.qcout": {
"SCF": -2724.0769973875713,
"XYGJ-OS": -2726.3447230967517
}
}'''
ref_energies = json.loads(ref_energies_text)
parsed_energies = dict()
# noinspection PyUnresolvedReferences
for filename in glob.glob(os.path.join(test_dir, "qchem_energies",
"*.qcout")):
molname = os.path.basename(filename)
qcout = QcOutput(filename)
d = dict(qcout.data[0]["energies"])
parsed_energies[molname] = d
self.assertEqual(sorted(ref_energies.keys()),
sorted(parsed_energies.keys()))
mols = sorted(ref_energies.keys())
for molname in mols:
self.assertEqual(sorted(ref_energies[molname].keys()),
sorted(parsed_energies[molname].keys()))
methods = sorted(ref_energies[molname].keys())
for method in methods:
self.assertAlmostEqual(ref_energies[molname][method],
parsed_energies[molname][method])
def test_unable_to_determine_lambda_in_geom_opt(self):
filename = os.path.join(test_dir, "unable_to_determine_lambda_in_geom_opt.qcout")
qcout = QcOutput(filename)
self.assertTrue(qcout.data[0]['has_error'])
self.assertEqual(qcout.data[0]['errors'],
['Lamda Determination Failed',
'Geometry optimization failed'])
def test_geom_opt(self):
filename = os.path.join(test_dir, "thiophene_wfs_5_carboxyl.qcout")
qcout = QcOutput(filename)
self.assertEqual(qcout.data[0]["jobtype"], "opt")
ans_energies = [('SCF', -20179.88483906483),
('SCF', -20180.120269846386),
('SCF', -20180.14892206486),
('SCF', -20180.150026022537),
('SCF', -20180.15020789526),
('SCF', -20180.150206202714)]
self.assertEqual(qcout.data[0]["energies"], ans_energies)
ans_mol1 = '''Full Formula (H4 C5 S1 O2)
Reduced Formula: H4C5SO2
Charge = -1, Spin Mult = 2
Sites (12)
0 C 0.158839 -0.165379 0.000059
1 C -0.520531 -1.366720 0.000349
2 C -1.930811 -1.198460 -0.000041
3 C -2.297971 0.127429 -0.000691
4 S -0.938312 1.189630 0.000400
5 H -0.014720 -2.325340 0.000549
6 H -2.641720 -2.017721 -0.000161
7 H -3.301032 0.535659 -0.001261
8 C 1.603079 0.076231 -0.000101
9 O 2.131988 1.173581 -0.000330
10 O 2.322109 -1.079218 -0.000021
11 H 3.262059 -0.820188 -0.000171'''
ans_mol_last = '''Full Formula (H4 C5 S1 O2)
Reduced Formula: H4C5SO2
Charge = -1, Spin Mult = 2
Sites (12)
0 C 0.194695 -0.158362 -0.001887
1 C -0.535373 -1.381241 -0.001073
2 C -1.927071 -1.199274 -0.000052
3 C -2.332651 0.131916 0.000329
4 S -0.942111 1.224916 -0.001267
5 H -0.038260 -2.345185 -0.001256
6 H -2.636299 -2.025939 0.000620
7 H -3.339756 0.529895 0.001288
8 C 1.579982 0.071245 -0.002733
9 O 2.196383 1.165675 -0.000178
10 O 2.352341 -1.114671 0.001634
11 H 3.261096 -0.769470 0.003158'''
self.assertEqual(qcout.data[0]["molecules"][0].__str__(), ans_mol1)
self.assertEqual(str(qcout.data[0]["molecules"][-1]), ans_mol_last)
self.assertFalse(qcout.data[0]["has_error"])
ans_gradient = [{'max_gradient': 0.07996,
'gradients': [(-0.0623076, -0.0157774, -2.05e-05),
(0.0260287, 0.0289157, -6e-06),
(-0.015738, 0.0103583, 1.87e-05),
(0.0260219, -0.0028, -1.36e-05),
(-0.0043158, -0.0245896, 2.83e-05),
(4.8e-05, 0.000782, 1.3e-06),
(0.0014679, 0.0020277, 3.9e-06),
(0.0010437, -1.29e-05, -1.04e-05),
(0.0799585, 0.0204159, 1e-06),
(-0.0320357, -0.0421461, 2.1e-06),
(-0.0237691, 0.0247526, -4.6e-06),
(0.0035975, -0.0019264, -3e-07)],
'rms_gradient': 0.02244},
{'max_gradient': 0.02721,
'gradients': [(-0.0195677, -0.0008468, -3.2e-06),
(0.0106798, 0.0039494, 1.11e-05),
(-0.0086473, -0.0012624, -8.1e-06),
(0.0065018, 0.0033749, 5e-07),
(0.0002581, -0.0060831, 7.2e-06),
(-0.0004373, -0.000504, 1.4e-06),
(0.0003216, 0.0001059, -9e-07),
(-0.000814, -5.03e-05, 3e-07),
(0.0272109, 0.001408, -2.06e-05),
(-0.0086971, -0.009251, 8.3e-06),
(-0.0080925, 0.0112191, 2.9e-06),
(0.0012838, -0.0020597, 1.1e-06)],
'rms_gradient': 0.007037},
{'max_gradient': 0.003444,
'gradients': [(0.0021606, 0.0013094, -1.68e-05),
(0.0005757, -0.0002616, -1e-05),
(2.73e-05, -0.0002868, 1.5e-05),
(0.0001088, 0.0006944, -1.23e-05),
(0.0006912, -0.0006523, 6.1e-06),
(-0.0004191, -9.32e-05, -1.3e-06),
(0.0002288, 3.98e-05, 1.8e-06),
(-8.99e-05, -0.0002338, -3.2e-06),
(1.95e-05, -0.0034439, 7.08e-05),
(-0.0008228, -9.18e-05, -2.77e-05),
(-0.0018054, 0.0034031, -2.21e-05),
(-0.0006747, -0.0003834, -3e-07)],
'rms_gradient': 0.001008},
{'max_gradient': 0.002367,
'gradients': [(-0.0001646, 0.0006149, 4.17e-05),
(-0.0004516, -0.0003116, 1.28e-05),
(0.0003366, -3.27e-05, -1.59e-05),
(-0.0003164, 0.0001775, 1.37e-05),
(0.0001399, -0.0001201, -6.9e-06),
(-0.0001374, -1.58e-05, 9e-07),
(-1.19e-05, -3.93e-05, -3.3e-06),
(-1.76e-05, -0.0001233, 5.1e-06),
(9.73e-05, -0.0023668, -0.0001609),
(0.0006998, 0.0009023, 6.31e-05),
(-0.0002169, 0.0014874, 4.95e-05),
(4.28e-05, -0.0001724, 2e-07)],
'rms_gradient': 0.0005339},
{'max_gradient': 0.001246,
'gradients': [(-6.88e-05, 0.0001757, -8.32e-05),
(-0.0002264, -0.0001306, -1.93e-05),
(0.0001526, -1.39e-05, 2.05e-05),
(-0.0001401, 3.8e-06, -2.05e-05),
(1.52e-05, 0.0001152, 8e-06),
(2.01e-05, -3.69e-05, -1e-06),
(-3.62e-05, -3.51e-05, 5.5e-06),
(1.01e-05, -1.23e-05, -6.8e-06),
(9.73e-05, -0.0012462, 0.0003246),
(0.0003926, 0.0008331, -0.0001269),
(-0.0002294, 0.000281, -0.0001009),
(1.3e-05, 6.61e-05, 0.0)],
'rms_gradient': 0.0002814},
{'max_gradient': 0.0006359,
'gradients': [(0.0001036, -0.0001339, 0.0001633),
(0.0001003, 6.98e-05, 3.43e-05),
(-8.28e-05, 1.1e-05, -3.31e-05),
(6.2e-05, -0.0001068, 3.41e-05),
(-5.02e-05, 0.0001346, -1.18e-05),
(8.72e-05, -7.3e-06, 1.5e-06),
(-1.7e-05, 4.9e-06, -1.05e-05),
(1.29e-05, 5.9e-05, 1.26e-05),
(-0.0001059, -5.4e-06, -0.0006359),
(-1.48e-05, 0.0002152, 0.0002469),
(-0.0001335, -0.0003534, 0.0001988),
(3.83e-05, 0.0001124, -1e-07)],
'rms_gradient': 0.0001535}]
self.assertEqual(qcout.data[0]["gradients"], ans_gradient)
ans_inp = '''$molecule
-1 2
C 0.15884000 -0.16538000 0.00006000
C -0.52053000 -1.36672000 0.00035000
C -1.93081000 -1.19846000 -0.00004000
C -2.29797000 0.12743000 -0.00069000
S -0.93831000 1.18963000 0.00040000
H -0.01472000 -2.32534000 0.00055000
H -2.64172000 -2.01772000 -0.00016000
H -3.30103000 0.53566000 -0.00126000
C 1.60308000 0.07623000 -0.00010000
O 2.13199000 1.17358000 -0.00033000
O 2.32211000 -1.07922000 -0.00002000
H 3.26206000 -0.82019000 -0.00017000
$end
$rem
jobtype = opt
exchange = b3lyp
basis = 6-31+g*
$end
'''
self.assertEqual(str(qcout.data[0]['input']), ans_inp)
self.assertTrue(qcout.data[0]['gracefully_terminated'])
ans_scf_iter = [[(-743.3130310589, 0.0561),
(-741.3557302205, 0.00841),
(-740.7031048846, 0.0157),
(-741.5589873953, 0.00303),
(-741.5918010434, 0.00118),
(-741.5966923809, 0.000332),
(-741.5970287119, 0.000158),
(-741.5971282029, 4.38e-05),
(-741.5971448077, 2.17e-05),
(-741.5971501973, 7.7e-06),
(-741.5971533576, 5.05e-06),
(-741.5971541122, 2.7e-06),
(-741.5971544119, 9.48e-07),
(-741.5971544408, 2.61e-07),
(-741.5971544436, 1.21e-07),
(-741.5971544441, 5.45e-08),
(-741.5971544442, 1.77e-08),
(-741.5971544442, 7.79e-09)],
[(-741.5552794274, 0.00265),
(-741.6048574279, 0.000515),
(-741.6037290502, 0.000807),
(-741.6056978336, 0.000188),
(-741.6057976553, 4.78e-05),
(-741.6058045572, 1.54e-05),
(-741.6058057373, 4.51e-06),
(-741.6058061671, 2.91e-06),
(-741.6058062822, 8.32e-07),
(-741.6058063435, 7.17e-07),
(-741.6058063636, 1.97e-07),
(-741.6058063662, 5.03e-08),
(-741.6058063666, 3.35e-08),
(-741.6058063666, 1.24e-08),
(-741.6058063666, 5.25e-09)],
[(-741.6023833754, 0.0013),
(-741.6065067966, 0.000305),
(-741.6057886337, 0.000559),
(-741.6068434004, 7.61e-05),
(-741.6068555361, 3.4e-05),
(-741.6068589376, 5.66e-06),
(-741.6068591778, 2.95e-06),
(-741.60685927, 1.27e-06),
(-741.6068592962, 4.82e-07),
(-741.6068593106, 3.84e-07),
(-741.6068593157, 9.23e-08),
(-741.6068593162, 2.49e-08),
(-741.6068593163, 1.52e-08),
(-741.6068593163, 5.71e-09)],
[(-741.6012175391, 0.000209),
(-741.6068794773, 7.2e-05),
(-741.606851035, 0.000117),
(-741.606899078, 1.53e-05),
(-741.6068997567, 6.01e-06),
(-741.6068998747, 1.68e-06),
(-741.6068998849, 5.32e-07),
(-741.6068998857, 2.76e-07),
(-741.606899886, 6.41e-08),
(-741.606899886, 3.08e-08),
(-741.606899886, 9.5e-09)],
[(-741.6067290885, 0.0001),
(-741.6069044268, 2.64e-05),
(-741.6068991026, 5.29e-05),
(-741.6069065234, 3.51e-06),
(-741.6069065452, 2.49e-06),
(-741.6069065686, 3.57e-07),
(-741.6069065693, 2.59e-07),
(-741.6069065696, 7.05e-08),
(-741.6069065696, 4.44e-08),
(-741.6069065697, 1.52e-08),
(-741.6069065697, 8.17e-09)],
[(-741.6074251344, 0.000129),
(-741.6069044127, 2.43e-05),
(-741.6068998551, 4.95e-05),
(-741.6069064294, 4.49e-06),
(-741.606906478, 2.77e-06),
(-741.6069065049, 5.85e-07),
(-741.6069065068, 2.74e-07),
(-741.6069065073, 6.99e-08),
(-741.6069065074, 3.37e-08),
(-741.6069065075, 1.89e-08),
(-741.6069065075, 7.38e-09)]]
self.assertEqual(qcout.data[0]['scf_iteration_energies'], ans_scf_iter)
def test_multiple_step_job(self):
filename = os.path.join(test_dir, "CdBr2.qcout")
qcout = QcOutput(filename)
self.assertEqual(len(qcout.data), 3)
self.assertEqual(qcout.data[0]['jobtype'], 'opt')
self.assertEqual(qcout.data[1]['jobtype'], 'freq')
ans_thermo_corr_text = '''
{
"Rotational Enthalpy": 0.025714259,
"Rotational Entropy": 0.000833523586,
"Total Enthalpy": 0.199729978,
"Total Entropy": 0.003218965579,
"Translational Enthalpy": 0.038549707,
"Translational Entropy": 0.001851513374,
"Vibrational Enthalpy": 0.109795116,
"Vibrational Entropy": 0.000533928619,
"ZPE": 0.039330241,
"Zero point vibrational energy": 0.039330241,
"gas constant (RT)": 0.025714259
}'''
ans_thermo_corr = json.loads(ans_thermo_corr_text)
self.assertEqual(sorted(qcout.data[1]['corrections'].keys()),
sorted(ans_thermo_corr.keys()))
for k, ref in ans_thermo_corr.items():
self.assertAlmostEqual(qcout.data[1]['corrections'][k], ref)
self.assertEqual(len(qcout.data[1]['molecules']), 1)
ans_mol1 = '''Full Formula (Cd1 Br2)
Reduced Formula: CdBr2
Charge = 0, Spin Mult = 1
Sites (3)
0 Br 0.000000 0.000000 -2.453720
1 Cd 0.000000 0.000000 0.000000
2 Br 0.000000 0.000000 2.453720'''
self.assertEqual(str(qcout.data[1]['molecules'][0]), ans_mol1)
self.assertFalse(qcout.data[1]['has_error'])
self.assertEqual(qcout.data[1]['gradients'], [])
ans_inp = '''$molecule
read
$end
$rem
jobtype = freq
exchange = b3lyp
basis = gen
ecp = gen
max_scf_cycles = 100
scf_guess = gwh
$end
$basis
Br
srlc
****
Cd
srsc
****
$end
$ecp
Br
srlc
****
Cd
srsc
****
$end
'''
self.assertEqual(str(qcout.data[1]['input']), ans_inp)
ans_freq = [{'vib_mode': ((0.17, -0.475, 0.0),
(-0.236, 0.659, 0.0),
(0.17, -0.475, 0.0)),
'frequency': 61.36},
{'vib_mode': ((-0.475, -0.17, 0.0),
(0.659, 0.236, 0.0),
(-0.475, -0.17, 0.0)),
'frequency': 61.36},
{'vib_mode': ((0.0, 0.0, 0.707),
(0.0, 0.0, 0.0),
(0.0, 0.0, -0.707)),
'frequency': 199.94},
{'vib_mode': ((0.0, 0.0, -0.505),
(0.0, 0.0, 0.7),
(0.0, 0.0, -0.505)),
'frequency': 311.74}]
self.assertEqual(qcout.data[1]['frequencies'], ans_freq)
self.assertEqual(qcout.data[2]['energies'],
[('SCF', -5296.720321211475)])
ans_scf_iter_ene = [[(-176.9147092199, 0.779),
(-156.8236033975, 0.115),
(-152.9396694452, 0.157),
(-183.2743425778, 0.138),
(-182.2994943574, 0.142),
(-181.990425533, 0.143),
(-182.1690180647, 0.142),
(-106.6454708618, 0.239),
(-193.8056267625, 0.0432),
(-193.0854096948, 0.0455),
(-194.6340538334, 0.0062),
(-194.6495072245, 0.00205),
(-194.6508787796, 0.000189),
(-194.6508984743, 2.18e-05),
(-194.6508986262, 2.17e-06)]]
self.assertEqual(qcout.data[2]['scf_iteration_energies'],
ans_scf_iter_ene)
def test_solvent_method(self):
filename = os.path.join(test_dir, "thiophene_wfs_5_carboxyl.qcout")
qcout = QcOutput(filename)
self.assertEqual(qcout.data[0]["solvent_method"], "NA")
filename = os.path.join(test_dir, "qchem_energies", "hf_cosmo.qcout")
qcout = QcOutput(filename)
self.assertEqual(qcout.data[0]["solvent_method"], "cosmo")
filename = os.path.join(test_dir, "qchem_energies", "hf_pcm.qcout")
qcout = QcOutput(filename)
self.assertEqual(qcout.data[0]["solvent_method"], "pcm")
def test_failed_message(self):
scf_file = os.path.join(test_dir, "hf.qcout")
scf_qcout = QcOutput(scf_file)
self.assertTrue(scf_qcout.data[0]['has_error'])
self.assertEqual(scf_qcout.data[0]['errors'],
['Bad SCF convergence',
'Molecular charge is not found',
'Geometry optimization failed'])
geom_file = os.path.join(test_dir, "hf_opt_failed.qcout")
geom_qcout = QcOutput(geom_file)
self.assertTrue(geom_qcout.data[0]['has_error'])
self.assertEqual(geom_qcout.data[0]['errors'],
['Geometry optimization failed'])
def test_abnormal_exit(self):
no_reading_file = os.path.join(test_dir, "no_reading.qcout")
no_reading_qcout = QcOutput(no_reading_file)
self.assertTrue(no_reading_qcout.data[0]['has_error'])
self.assertEqual(no_reading_qcout.data[0]['errors'],
['Exit Code 134',
'Molecular charge is not found',
'No input text',
'Bad SCF convergence'])
exit_code_134_file = os.path.join(test_dir, "exit_code_134.qcout")
ec134_qcout = QcOutput(exit_code_134_file)
self.assertTrue(ec134_qcout.data[0]['has_error'])
self.assertEqual(ec134_qcout.data[0]['errors'],
['Exit Code 134',
'Molecular charge is not found',
'Bad SCF convergence'])
def test_chelp_and_mulliken_charges(self):
filename = os.path.join(test_dir, 'chelpg_charges.qcout')
qcout = QcOutput(filename)
mulliken_charges = [0.393961, -0.281545, 0.066432, 0.019364, -0.186041,
-0.16007, 0.315659, 0.30631, 0.064257, 0.056438,
-0.17695, 0.16976, -0.13326, -0.131853, -0.178711,
0.163697, 0.170148, 0.143329, 0.152702, 0.152929,
0.170475, -0.451542, -0.441554, -0.709834,
-0.592718, 0.20506, 0.211043, 0.204389, 0.546173,
-0.414558, 0.346511]
self.assertEqual(qcout.data[0]['charges']['mulliken'],
mulliken_charges)
chelpg_charges = [0.399404, -0.277179, -0.057502, -0.110085, -0.07107,
-0.274987, 0.475781, 0.423117, -0.054079, -0.101424,
-0.05793, 0.115179, -0.116069, -0.10949, -0.06664,
0.161442, 0.135438, 0.158081, 0.125881, 0.125324,
0.115863, -0.425251, -0.42309, -0.602375, -0.458844,
0.140267, 0.139084, 0.139995, 0.698011, -0.487911,
0.341061]
self.assertEqual(qcout.data[0]['charges']['chelpg'], chelpg_charges)
def test_no_message_scf_opt_fail(self):
so_failfile = os.path.join(test_dir, 'scf_opt_no_message_fail.qcout')
so_failqcout = QcOutput(so_failfile)
self.assertTrue(so_failqcout.data[0]['has_error'])
self.assertEqual(so_failqcout.data[0]['errors'],
['Exit Code 134',
'Molecular charge is not found',
'Bad SCF convergence',
'Geometry optimization failed'])
o_failfile = os.path.join(test_dir, 'opt_fail_no_message.qcout')
o_failqcout = QcOutput(o_failfile)
self.assertEqual(o_failqcout.data[0]['errors'],
['Geometry optimization failed'])
s_failfile = os.path.join(test_dir, 'scf_no_message_fail.qcout')
s_failqcout = QcOutput(s_failfile)
self.assertEqual(s_failqcout.data[0]['errors'],
['Exit Code 134',
'Molecular charge is not found',
'Bad SCF convergence'])
so_successfile = os.path.join(test_dir,
'thiophene_wfs_5_carboxyl.qcout')
so_successqcout = QcOutput(so_successfile)
self.assertFalse(so_successqcout.data[0]['has_error'])
def test_negative_eigen(self):
filename = os.path.join(test_dir, "negative_eigen.qcout")
qcout = QcOutput(filename)
self.assertTrue(qcout.data[0]['has_error'])
self.assertEqual(qcout.data[0]["errors"],
['Negative Eigen',
'Molecular charge is not found',
'Bad SCF convergence',
'Geometry optimization failed'])
def test_insufficient_memory(self):
filename = os.path.join(test_dir, "insufficient_memory.qcout")
qcout = QcOutput(filename)
self.assertTrue(qcout.data[0]['has_error'])
self.assertEqual(qcout.data[0]['errors'],
['Insufficient static memory',
'Molecular charge is not found',
'Bad SCF convergence',
'Geometry optimization failed'])
def test_freq_seg_too_small(self):
filename = os.path.join(test_dir, "freq_seg_too_small.qcout")
qcout = QcOutput(filename)
self.assertTrue(qcout.data[0]['has_error'])
self.assertEqual(qcout.data[0]['errors'],
['Freq Job Too Small',
'Exit Code 134'])
def test_not_enough_total_memory(self):
filename = os.path.join(test_dir, "not_enough_total_memory.qcout")
qcout = QcOutput(filename)
self.assertTrue(qcout.data[1]['has_error'])
self.assertEqual(qcout.data[1]["errors"],
['Not Enough Total Memory',
'Exit Code 134'])
def test_killed(self):
filename = os.path.join(test_dir, "killed.qcout")
qcout = QcOutput(filename)
self.assertFalse(qcout.data[0]["has_error"])
self.assertTrue(qcout.data[1]["has_error"])
self.assertEqual(qcout.data[1]["errors"],
['Killed',
'Molecular charge is not found',
'Bad SCF convergence'])
def test_gdm_scf(self):
filename = os.path.join(test_dir, "gmd_scf.qcout")
qcout = QcOutput(filename)
self.assertTrue(qcout.data[0]['has_error'])
self.assertEqual(qcout.data[0]['errors'],
['Exit Code 134',
'Bad SCF convergence',
'Geometry optimization failed'])
self.assertEqual(len(qcout.data[0]['scf_iteration_energies']), 2)
self.assertEqual(len(qcout.data[0]['scf_iteration_energies'][-1]), 192)
self.assertAlmostEqual(qcout.data[0]['scf_iteration_energies'][-1][-1][0],
-1944.945908459, 5)
def test_crazy_scf_values(self):
filename = os.path.join(test_dir, "crazy_scf_values.qcout")
qcout = QcOutput(filename)
ans = [(-28556254.06737586, 6.49e-06),
(-28556254.067382727, 9.45e-06),
(-28556254.067382865, 6.14e-06)]
self.assertEqual(qcout.data[0]["scf_iteration_energies"][-1][-3:], ans)
def test_crowd_gradient_number(self):
filename = os.path.join(test_dir, "crowd_gradient_number.qcout")
qcout = QcOutput(filename)
self.assertEqual(qcout.data[0]['gradients'][0]['gradients'],
[(-0.0307525, 0.0206536, -0.0396255),
(0.0008938, -0.000609, 0.0082746),
(0.042143, -0.0240514, 0.0380298),
(-0.0843578, 0.0002757, 0.0884924),
(0.0356689, -0.0444656, -0.0710646),
(-0.0190554, -0.0308886, -0.0297994),
(0.0470543, -0.0263915, -0.0690973),
(-0.0297801, 0.0296872, -0.0104344),
(0.0504581, -0.0014272, 0.0262245),
(-0.0927323, 0.0750046, 0.0128003),
(0.0183242, -0.0084638, 0.0127388),
(-0.0083989, 0.0111579, -0.0002461),
(-0.0316941, 267.34455, 878.3493251),
(0.017459, 0.0487124, -0.0276365),
(-0.3699134, 0.0110442, 0.0260809),
(0.363931, 0.24044, 0.5192852),
(0.026669, -0.0284192, -0.0347528),
(0.0047475, 0.0049706, 0.0148794),
(-0.077804, 0.003402, 0.000852),
(-6772.1697035, -267.4471902, -878.585931),
(-0.0029556, -0.0616073, -0.0180577),
(-0.0001915, 0.0021213, 0.0006193),
(0.0320436, -0.0073456, -0.01509),
(0.0155112, -0.0035725, 0.0015675),
(-0.0034309, 0.0170739, 0.0074455),
(-0.0088735, -0.0129874, 0.0092329),
(-0.0271963, -0.0258714, 0.0246954),
(0.0025065, 0.0062934, 0.0209733),
(0.0152829, -0.0080239, -0.018902),
(0.0461304, 0.0071952, 0.0012227),
(-0.0272755, -0.0280053, 0.0325455),
(0.0122118, 0.027816, -0.0167773),
(0.0168893, -0.0014211, 0.0039917),
(-0.0048723, 0.0026667, -0.0159952),
(-0.1840467, -0.1425887, -0.3235801),
(0.015975, -0.0922797, 0.0640925),
(0.0267234, 0.1031154, -0.0299014),
(-0.0175591, 0.0081813, -0.0165425),
(0.0119225, 0.0113174, 0.0154056),
(0.0138491, 0.0083436, 0.0188022),
(-0.0151146, -0.0015971, -0.0054462)])
def test_nbo_charges(self):
filename = os.path.join(test_dir, "quinoxaline_anion.qcout")
qcout = QcOutput(filename)
ans = [-0.29291, -0.29807, 0.12715, 0.12715, -0.29807, -0.29291,
0.21284, 0.22287, 0.22287, 0.21284, -0.10866, -0.10866,
0.19699, -0.5602, -0.5602, 0.19699]
self.assertEqual(qcout.data[0]["charges"]["nbo"], ans)
filename = os.path.join(test_dir, "tfsi_nbo.qcout")
qcout = QcOutput(filename)
ans = [2.2274, 2.23584, -0.94183, -0.94575, -0.94719, -0.9423,
0.86201, 0.85672, -0.35698, -0.35373, -0.35782, -0.35647,
-0.35646, -0.35787, -1.26555]
self.assertEqual(qcout.data[0]["charges"]["nbo"], ans)
filename = os.path.join(test_dir, "crowd_nbo_charges.qcout")
qcout = QcOutput(filename)
self.assertEqual(
qcout.data[0]["charges"]["nbo"],
[-0.33917, -0.6104, -0.15912, -0.17751, -0.61817, -0.3357, 0.24671,
0.19942, 0.19325, 0.2362, 0.23982, 0.21985, 0.2305, 0.20444,
0.23179, 0.20491, 0.85965, -0.59655, -0.59561, -0.14789, -0.13859,
-0.32712, -0.33359, 0.21602, 0.22383, 0.2123, 0.22759, 0.2507,
0.20098, 0.18631, 0.24945, 0.19709, 0.20274, -0.34831, -0.56307,
-0.14572, -0.1431, -0.55866, -0.3572, 0.22695, 0.21983, 0.1963,
0.20977, 0.22298, 0.20875, 0.21081, 0.19586, 0.24708, 0.20067,
-0.34288, -0.55793, -0.16806, -0.15609, -0.56464, -0.34695,
0.22555, 0.20417, 0.206, 0.20825, 0.22409, 0.25415, 0.20977,
0.18976, 0.24647, 0.1993, -0.33605, -0.59395, -0.15985, -0.18024,
-0.60646, -0.32742, 0.22909, 0.19347, 0.21872, 0.2203, 0.23518,
0.25185, 0.23523, 0.18666, 0.22737, 0.2205, -0.35902, -0.56138,
-0.14552, -0.14903, -0.55491, -0.3493, 0.22826, 0.21789, 0.19075,
0.20898, 0.21343, 0.21715, 0.20794, 0.19695, 0.2429, 0.18482,
-0.33943, -0.55659, -0.16437, -0.14503, -0.56155, -0.34131,
0.22339, 0.20483, 0.19376, 0.23395, 0.20784, 0.2096, 0.21945,
0.19192, 0.23089, 0.20493, -0.32963, -0.56949, -0.1446, -0.15244,
-0.55482, -0.34848, 0.22802, 0.20471, 0.19704, 0.20744, 0.22332,
0.2206, 0.20734, 0.18871, 0.22907, 0.20741, -0.33856, -0.564,
-0.16575, -0.17422, -0.56032, -0.3426, 0.22585, 0.20169, 0.20529,
0.20836, 0.21329, 0.25353, 0.23374, 0.19306, 0.23582, 0.20196,
-0.34069, -0.56522, -0.17228, -0.17503, -0.55505, -0.34264,
0.22696, 0.19604, 0.20515, 0.23964, 0.2437, 0.2111, 0.21204,
0.19975, 0.2347, 0.18835, -0.34324, -0.55184, -0.16086, -0.15907,
-0.56319, -0.3384, 0.23866, 0.19808, 0.19728, 0.20205, 0.24698,
0.21416, 0.20398, 0.20475, 0.2265, 0.20141, -0.34339, -0.56344,
-0.14955, -0.14878, -0.55906, -0.34506, 0.23937, 0.20027, 0.19671,
0.2085, 0.21693, 0.22164, 0.20863, 0.20703, 0.22889, 0.1916])
def test_simple_aimd(self):
filename = os.path.join(test_dir, "h2o_aimd.qcout")
qcout = QcOutput(filename)
self.assertEqual(len(qcout.data[0]["molecules"]), 11)
def test_homo_lumo(self):
filename = os.path.join(test_dir, "quinoxaline_anion.qcout")
qcout = QcOutput(filename)
for a, b in zip(qcout.data[0]["HOMO/LUMOs"][-1],
[1.00682120282, 2.80277253758]):
self.assertAlmostEqual(a, b, 5)
filename = os.path.join(test_dir, "qchem_energies", "hf_ccsd(t).qcout")
qcout = QcOutput(filename)
self.assertEqual(qcout.data[0]["HOMO/LUMOs"], [[-17.74182227672, 5.2245857011200005],
[-17.74182227672, 5.2245857011200005]])
filename = os.path.join(test_dir, "crowd_gradient_number.qcout")
qcout = QcOutput(filename)
self.assertEqual(qcout.data[0]["HOMO/LUMOs"], [[-5.74160199446, -4.544301104620001],
[-4.9796832463800005, -4.2993986498800005],
[-4.7619921755, -3.8095937404000004]])
def test_bsse(self):
filename = os.path.join(test_dir, "bsse.qcout")
qcout = QcOutput(filename)
self.assertAlmostEqual(qcout.data[0]["bsse"], -0.164210762949, 5)
self.assertEqual(qcout.data[0]["jobtype"], "bsse")
def test_hirshfeld_charge(self):
filename = os.path.join(test_dir, "hirshfeld_population.qcout")
qcout = QcOutput(filename)
self.assertEqual(qcout.data[0]["charges"]["hirshfeld"],
[-0.286309, 0.143134, 0.143176])
self.assertFalse(qcout.data[0]["has_error"])
def test_ghost_atoms(self):
filename = os.path.join(test_dir, "ghost_atoms.qcout")
qcout = QcOutput(filename)
elements = [a.specie.symbol for a in qcout.data[-1]["molecules"][-1].sites]
self.assertEqual(elements, ['O', 'H', 'H', 'C', 'H', 'H', 'H', 'H'])
filename = os.path.join(test_dir, "MgBF4_b_overalpped.qcout")
qcout = QcOutput(filename)
self.assertEqual(qcout.data[0]["input"].ghost_atoms, [0])
def test_final_energy(self):
filename = os.path.join(test_dir, "thiophene_wfs_5_carboxyl.qcout")
qcout = QcOutput(filename)
self.assertEqual(qcout.final_energy, -20180.150206202714)
def test_final_structure(self):
filename = os.path.join(test_dir, "thiophene_wfs_5_carboxyl.qcout")
qcout = QcOutput(filename)
ans = '''Full Formula (H4 C5 S1 O2)
Reduced Formula: H4C5SO2
Charge = -1, Spin Mult = 2
Sites (12)
0 C 0.194695 -0.158362 -0.001887
1 C -0.535373 -1.381241 -0.001073
2 C -1.927071 -1.199274 -0.000052
3 C -2.332651 0.131916 0.000329
4 S -0.942111 1.224916 -0.001267
5 H -0.038260 -2.345185 -0.001256
6 H -2.636299 -2.025939 0.000620
7 H -3.339756 0.529895 0.001288
8 C 1.579982 0.071245 -0.002733
9 O 2.196383 1.165675 -0.000178
10 O 2.352341 -1.114671 0.001634
11 H 3.261096 -0.769470 0.003158'''
self.assertEqual(qcout.final_structure.__str__(), ans)
if __name__ == "__main__":
unittest.main()
|
Dioptas/pymatgen
|
pymatgen/io/tests/test_qchemio.py
|
Python
|
mit
| 71,957
|
[
"pymatgen"
] |
a33bf0539f3e676063b9f278dc14c3d41d6e77e27e7462628afac1ae07085a3c
|
import os
import numpy
from pyproj import Proj
from netCDF4 import Dataset
from trefoil.netcdf.variable import CoordinateVariable, BoundsCoordinateVariable
from trefoil.netcdf.variable import SpatialCoordinateVariable, SpatialCoordinateVariables
from trefoil.geometry.bbox import BBox
def test_coordinate_variable_length():
data = numpy.arange(10)
variable = CoordinateVariable(data)
assert len(variable) == data.shape[0]
def test_range_functions():
data = numpy.arange(10)
variable = CoordinateVariable(data)
value_range = (2, 5)
indices = variable.indices_for_range(*value_range)
assert indices == value_range
assert numpy.array_equal(variable.slice_by_range(*value_range), data[2:6])
# Test values in reverse order
data = data[::-1]
variable = CoordinateVariable(data)
indices = variable.indices_for_range(*value_range)
size = len(variable) - 1
assert indices == (size - value_range[1], size - value_range[0])
assert numpy.array_equal(variable.slice_by_range(*value_range), data[4:8])
# Test value range much larger than data
value_range = (-100, 100)
variable = CoordinateVariable(numpy.arange(1, 11))
indices = variable.indices_for_range(*value_range)
assert indices == (0, len(variable) - 1)
data = numpy.arange(20,40)
variable = CoordinateVariable(data)
# Test out of range
assert variable.indices_for_range(0, 10) == (0, 0)
assert numpy.array_equal(variable.slice_by_range(0, 10), numpy.array([]))
#Test partial overlap
assert numpy.array_equal(variable.slice_by_range(10, 30), numpy.arange(20, 31))
assert variable.indices_for_range(40, 50) == (variable.values.size-1, variable.values.size-1)
assert numpy.array_equal(variable.slice_by_range(40, 50), numpy.array([]))
def test_window_for_bbox():
coords = SpatialCoordinateVariables.from_bbox(BBox([-124, 82, -122, 90], Proj(init='epsg:4326')), 20, 20)
window = coords.get_window_for_bbox(BBox([-123.9, 82.4, -122.1, 89.6]))
assert window.x_slice == slice(1, 19)
assert window.y_slice == slice(1, 19)
def test_BoundsCoordinateVariable():
bounds = numpy.array(((0, 1), (1, 2)))
variable = BoundsCoordinateVariable(bounds)
outvarname = 'test_bounds'
outfilename = 'test.nc'
try:
with Dataset(outfilename, 'w') as target_ds:
variable.add_to_dataset(target_ds, outvarname)
assert '_bnds' in target_ds.dimensions
assert outvarname in target_ds.dimensions
assert outvarname in target_ds.variables
assert numpy.array_equal(target_ds.variables[outvarname][:], bounds)
finally:
if os.path.exists(outfilename):
os.remove(outfilename)
def test_SpatialCoordinateVariable():
# Ascending
variable = SpatialCoordinateVariable(numpy.arange(10))
assert numpy.array_equal(variable.edges, numpy.arange(11) - 0.5)
# Descending
variable = SpatialCoordinateVariable(numpy.arange(9, -1, -1))
assert numpy.array_equal(variable.edges, numpy.arange(10, -1, -1) - 0.5)
outvarname = 'lat'
outfilename = 'test.nc'
try:
with Dataset(outfilename, 'w') as target_ds:
variable.add_to_dataset(target_ds, outvarname)
assert outvarname in target_ds.dimensions
assert outvarname in target_ds.variables
assert numpy.array_equal(target_ds.variables[outvarname][:], variable.values)
finally:
if os.path.exists(outfilename):
os.remove(outfilename)
def test_SpatialCoordinateVariables_bbox():
proj = Proj(init='EPSG:4326')
bbox = BBox((10.5, 5, 110.5, 55), projection=proj)
coords = SpatialCoordinateVariables.from_bbox(bbox, 10, 5)
assert coords.bbox.as_list() == bbox.as_list()
def test_SpatialCoordinateVariables_slice_by_bbox():
lat = SpatialCoordinateVariable(numpy.arange(19, -1, -1))
lon = SpatialCoordinateVariable(numpy.arange(10))
proj = Proj(init='EPSG:4326')
coords = SpatialCoordinateVariables(lon, lat, proj)
subset = coords.slice_by_bbox(BBox((1.75, 3.7, 6.2, 16.7), proj))
assert numpy.array_equal(subset.x.values, numpy.arange(2, 6))
assert subset.x.values[0] == 2
assert subset.x.values[-1] == 5
assert subset.y.values[0] == 16
assert subset.y.values[-1] == 4
def test_SpatialCoordinateVariables_add_to_dataset():
lat = SpatialCoordinateVariable(numpy.arange(19, -1, -1))
lon = SpatialCoordinateVariable(numpy.arange(10))
coords = SpatialCoordinateVariables(lon, lat, Proj(init='EPSG:4326'))
lat_varname = 'lat'
lon_varname = 'lon'
outfilename = 'test.nc'
try:
with Dataset(outfilename, 'w') as target_ds:
coords.add_to_dataset(target_ds, lon_varname, lat_varname)
assert lat_varname in target_ds.dimensions
assert lat_varname in target_ds.variables
assert len(target_ds.dimensions[lat_varname]) == lat.values.size
assert numpy.array_equal(lat.values, target_ds.variables[lat_varname][:])
assert lon_varname in target_ds.dimensions
assert lon_varname in target_ds.variables
assert len(target_ds.dimensions[lon_varname]) == lon.values.size
assert numpy.array_equal(lon.values, target_ds.variables[lon_varname][:])
finally:
if os.path.exists(outfilename):
os.remove(outfilename)
|
consbio/clover
|
trefoil/netcdf/tests/test_variable.py
|
Python
|
bsd-3-clause
| 5,577
|
[
"NetCDF"
] |
9bda6e7cfd9d046212d2c2991b9c9d4a916bb20fc9bf971f6d0ce0556118bb1f
|
""" Test for StorageManagement clients
"""
# pylint: disable=protected-access,missing-docstring,invalid-name
import unittest
from mock import MagicMock, patch
from DIRAC import S_OK, S_ERROR
from DIRAC.StorageManagementSystem.Client.StorageManagerClient import getFilesToStage
from DIRAC.DataManagementSystem.Client.test.mock_DM import dm_mock
import errno
mockObjectSE1 = MagicMock()
mockObjectSE1.getFileMetadata.return_value = S_OK( {'Successful':{'/a/lfn/1.txt':{'Accessible':False}},
'Failed':{}} )
mockObjectSE1.getStatus.return_value = S_OK( {'DiskSE': False, 'TapeSE':True} )
mockObjectSE2 = MagicMock()
mockObjectSE2.getFileMetadata.return_value = S_OK( {'Successful':{'/a/lfn/2.txt':{'Cached':1, 'Accessible':True}},
'Failed':{}} )
mockObjectSE2.getStatus.return_value = S_OK( {'DiskSE': False, 'TapeSE':True} )
mockObjectSE3 = MagicMock()
mockObjectSE3.getFileMetadata.return_value = S_OK( {'Successful':{},
'Failed':{'/a/lfn/2.txt': 'error'}} )
mockObjectSE3.getStatus.return_value = S_OK( {'DiskSE': False, 'TapeSE':True} )
mockObjectSE4 = MagicMock()
mockObjectSE4.getFileMetadata.return_value = S_OK( {'Successful':{},
'Failed':{'/a/lfn/2.txt':
S_ERROR( errno.ENOENT, '' )['Message']}} )
mockObjectSE4.getStatus.return_value = S_OK( {'DiskSE': False, 'TapeSE':True} )
mockObjectSE5 = MagicMock()
mockObjectSE5.getFileMetadata.return_value = S_OK( {'Successful':{'/a/lfn/1.txt':{'Accessible':False}},
'Failed':{}} )
mockObjectSE5.getStatus.return_value = S_OK( {'DiskSE': True, 'TapeSE':False} )
mockObjectSE6 = MagicMock()
mockObjectSE6.getFileMetadata.return_value = S_OK( {'Successful':{'/a/lfn/2.txt':{'Cached':0, 'Accessible':False}},
'Failed':{}} )
mockObjectSE6.getStatus.return_value = S_OK( {'DiskSE': False, 'TapeSE':True} )
mockObjectDMSHelper = MagicMock()
mockObjectDMSHelper.getLocalSiteForSE.return_value = S_OK( 'mySite' )
mockObjectDMSHelper.getSitesForSE.return_value = S_OK( ['mySite'] )
class ClientsTestCase( unittest.TestCase ):
""" Base class for the clients test cases
"""
def setUp( self ):
from DIRAC import gLogger
gLogger.setLevel( 'DEBUG' )
def tearDown( self ):
pass
#############################################################################
class StorageManagerSuccess( ClientsTestCase ):
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.DataManager", return_value = dm_mock )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.StorageElement", return_value = mockObjectSE1 )
def test_getFilesToStage_withFilesToStage( self, _patch, _patched ):
""" Test where the StorageElement mock will return files offline
"""
res = getFilesToStage( ['/a/lfn/1.txt'], checkOnlyTapeSEs = False )
self.assertTrue( res['OK'] )
self.assertEqual( res['Value']['onlineLFNs'], [] )
self.assertIn( res['Value']['offlineLFNs'], [{'SE1':['/a/lfn/1.txt']},
{'SE2':['/a/lfn/1.txt']}] )
self.assertEqual( res['Value']['absentLFNs'], {} )
self.assertEqual( res['Value']['failedLFNs'], [] )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.DataManager", return_value = dm_mock )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.StorageElement", return_value = mockObjectSE2 )
def test_getFilesToStage_noFilesToStage( self, _patch, _patched ):
""" Test where the StorageElement mock will return files online
"""
res = getFilesToStage( ['/a/lfn/2.txt'], checkOnlyTapeSEs = False )
self.assertTrue( res['OK'] )
self.assertEqual( res['Value']['onlineLFNs'], ['/a/lfn/2.txt'] )
self.assertEqual( res['Value']['offlineLFNs'], {} )
self.assertEqual( res['Value']['absentLFNs'], {} )
self.assertEqual( res['Value']['failedLFNs'], [] )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.DataManager", return_value = dm_mock )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.StorageElement", return_value = mockObjectSE3 )
def test_getFilesToStage_seErrors( self, _patch, _patched ):
""" Test where the StorageElement will return failure
"""
res = getFilesToStage( ['/a/lfn/2.txt'], checkOnlyTapeSEs = False )
self.assertTrue( res['OK'] )
self.assertEqual( res['Value']['onlineLFNs'], [] )
self.assertEqual( res['Value']['offlineLFNs'], {} )
self.assertEqual( res['Value']['absentLFNs'], {} )
self.assertEqual( res['Value']['failedLFNs'], ['/a/lfn/2.txt'] )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.DataManager", return_value = dm_mock )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.StorageElement", return_value = mockObjectSE4 )
def test_getFilesToStage_noSuchFile( self, _patch, _patched ):
""" Test where the StorageElement will return file is absent
"""
res = getFilesToStage( ['/a/lfn/2.txt'], checkOnlyTapeSEs = False )
self.assertTrue( res['OK'] )
self.assertEqual( res['Value']['onlineLFNs'], [] )
self.assertEqual( res['Value']['offlineLFNs'], {} )
self.assertEqual( res['Value']['absentLFNs'], {'/a/lfn/2.txt': 'No such file or directory ( 2 : File not at SE2)'} )
self.assertEqual( res['Value']['failedLFNs'], [] )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.DataManager", return_value = dm_mock )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.StorageElement", return_value = mockObjectSE5 )
def test_getFilesToStage_fileInaccessibleAtDisk( self, _patch, _patched ):
""" Test where the StorageElement will return file is unavailable at a Disk SE
"""
res = getFilesToStage( ['/a/lfn/1.txt'], checkOnlyTapeSEs = False )
self.assertTrue( res['OK'] )
self.assertEqual( res['Value']['onlineLFNs'], [] )
self.assertEqual( res['Value']['offlineLFNs'], {} )
self.assertEqual( res['Value']['absentLFNs'], {} )
self.assertEqual( res['Value']['failedLFNs'], ['/a/lfn/1.txt'] )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.DataManager", return_value = dm_mock )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.StorageElement", return_value = mockObjectSE2 )
def test_getFilesToStage_tapeSEOnly_1( self, _patch, _patched ):
""" Test where the StorageElement will return file is available
"""
res = getFilesToStage( ['/a/lfn/2.txt'], checkOnlyTapeSEs = True )
self.assertTrue( res['OK'] )
self.assertEqual( res['Value']['onlineLFNs'], ['/a/lfn/2.txt'] )
self.assertEqual( res['Value']['offlineLFNs'], {} )
self.assertEqual( res['Value']['absentLFNs'], {} )
self.assertEqual( res['Value']['failedLFNs'], [] )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.DataManager", return_value = dm_mock )
@patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.StorageElement", return_value = mockObjectSE6 )
def test_getFilesToStage_tapeSEOnly_2( self, _patch, _patched ):
""" Test where the StorageElement will return file is at offline at tape
"""
with patch( "DIRAC.StorageManagementSystem.Client.StorageManagerClient.random.choice", new=MagicMock( return_value='SERandom' )):
res = getFilesToStage( ['/a/lfn/2.txt'], checkOnlyTapeSEs = True )
self.assertTrue( res['OK'] )
self.assertEqual( res['Value']['onlineLFNs'], [] )
self.assertEqual( res['Value']['offlineLFNs'], {'SERandom': ['/a/lfn/2.txt']} )
self.assertEqual( res['Value']['absentLFNs'], {} )
self.assertEqual( res['Value']['failedLFNs'], [] )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase( ClientsTestCase )
suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( StorageManagerSuccess ) )
testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite )
|
andresailer/DIRAC
|
StorageManagementSystem/Client/test/Test_Client_StorageManagementSystem.py
|
Python
|
gpl-3.0
| 8,186
|
[
"DIRAC"
] |
4baab3197a4c1fd4632401e3ea3926c040fdb05f87d7b527b4d35905839226f2
|
"""
@file bible/model.py
@author Brian Kim
@brief definition of bible objects
Language, Version, Chapter, Verse
"""
from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Language(db.Model):
abbr = db.Column(db.String(5), primary_key=True)
name = db.Column(db.String(32))
def __init__(self,abbr,name):
self.abbr = abbr
self.name = name
def __repr__(self):
return '<Language %s>' % self
def __str__(self):
return '%s (%s)' % (self.name,self.abbr)
def __iter__(self):
yield (u'abbr', self.abbr)
yield (u'name', self.name)
class Version(db.Model):
lang_id = db.Column(db.Integer, db.ForeignKey('language.abbr'))
lang = db.relationship('Language',backref=db.backref('versions',lazy='dynamic'))
abbr = db.Column(db.String(5), primary_key=True)
name = db.Column(db.String(32))
def chapters(self):
return self.lang.chapters()
def __init__(self,lang,abbr,name):
self.lang = lang
self.abbr = abbr
self.name = name
def __repr__(self):
return '<Translation %s>' % self
def __str__(self):
return '%s (%s)' % (self.name,self.abbr)
def __iter__(self):
yield (u'abbr', self.abbr)
yield (u'name', self.name)
yield (u'lang', self.lang.abbr)
class Book(db.Model):
lang_id = db.Column(db.Integer, db.ForeignKey('language.abbr'))
lang = db.relationship('Language',backref=db.backref('books',lazy='dynamic'))
abbr = db.Column(db.String(5), primary_key=True)
name = db.Column(db.String(32))
numch = db.Column(db.Integer)
def __init__(self,lang,abbr,name,numch):
self.lang = lang
self.abbr = abbr
self.name = name
self.numch = numch
def __repr__(self):
return '<Book %s>' % self
def __str__(self):
return '%s (%s)' % (self.name,self.abbr)
def __iter__(self):
yield (u'abbr', self.abbr)
yield (u'name', self.name)
yield (u'lang', self.lang.abbr)
yield (u'numch', self.numch)
class Chapter(db.Model):
id = db.Column(db.Integer,primary_key=True)
version_id = db.Column(db.String(5), db.ForeignKey('version.abbr'))
book_id = db.Column(db.String(5), db.ForeignKey('book.abbr'))
number = db.Column(db.Integer)
version = db.relationship('Version',backref=db.backref('chapters',lazy='dynamic'))
book = db.relationship('Book',backref=db.backref('chapters',lazy='dynamic'))
def __init__(self,v,b,num):
self.version = v
self.book = b
self.number = num
def __repr__(self):
return '<Chapter %s>' % self
def __str__(self):
return '%s %s %i' % (self.version.abbr,self.book.abbr,self.number)
def __iter__(self):
yield (u'version',self.version.abbr)
yield (u'book',self.book.abbr)
yield (u'number',self.number)
yield (u'verses',[ dict(x) for x in self.verses.all() ])
class Verse(db.Model):
id = db.Column(db.Integer,primary_key=True)
ch_id = db.Column(db.Integer, db.ForeignKey('chapter.id'))
number = db.Column(db.Integer)
text = db.Column(db.String(1024,convert_unicode=True))
chapter = db.relationship('Chapter',backref=db.backref('verses',lazy='dynamic'))
def __init__(self,ch,num,txt):
self.chapter = ch
self.number = num
self.text = txt
def __repr__(self):
return '<Verse %s>' % self
def __str__(self):
return '%s:%i' % (self.chapter,self.number)
def __iter__(self):
yield (u'chapter',str(self.chapter))
yield (u'number',self.number)
yield (u'text',self.text)
|
briansan/bible
|
bible/model.py
|
Python
|
bsd-2-clause
| 3,446
|
[
"Brian"
] |
349910da542da8e2499d991122aacc19940b8352490d84d7ce1d478b99c02742
|
#modified to work with python3
##############################
import operator
import random
from collections import defaultdict
from functools import reduce
###Stats and math functions
def weighted_sampler(pop_dict):
"""randomly sample a dictionary's keys based on weights stored as values example:
m = {'a':3, 'b':2, 'c':5}
samps = [weighted_sampler(m) for _ in range(1000)]
#samps should be a ~ 300, b ~ 200, and c ~ 500
>>> samps.count('a')
304
>>> samps.count('b')
211
>>> samps.count('c')
485
of course, being a random sampler your results will vary"""
ch = random.random() * sum(pop_dict.values())
f = sorted(pop_dict.keys())
for i, w in enumerate([pop_dict[x] for x in f]):
ch -= w
if ch < 0: return f[i]
def choose(n,k):
'''implements binomial coefficient function
see: https://en.wikipedia.org/wiki/Binomial_coefficient
performance not tested on really large values'''
return reduce(lambda a,b: a*(n-b)/(b+1),range(k),1)
def sampler(pop, size, replacement=False):
'''a quick re-implementation of the python random sampler that
allows for sampling with or without replacement (pythons builtin
only allows without replacement)'''
if replacement:
return [random.choice(pop) for i in range(size)]
else:
return random.sample(pop, size)
def rank(x):
'''returns the sample rank of the elements in a list'''
out={}
idx=0
for i in x:
out[idx] = i
idx+=1
p1 = (j[0] for j in sorted(sort_dict_by_val(out), key=lambda s: s[1]))
p2 = list(range(len(x)))
idx=0
for i in p1:
p2[i] = idx
idx+=1
return p2
def order(x):
'''returns the sample indeces that would return the list in sorted order
ie:
x = (4,3,406,5)
sorted(x) == [x[i] for i in order(x)]'''
out={}
idx=0
for i in x:
out[idx] = i
idx+=1
p1 = [j[0] for j in sorted(sort_dict_by_val(out), key=lambda s: s[1])]
return p1
###Useful functions for bioinformatics
###NOTE: biopython offers more robust versions, but sometimes you just
def revcom (s):
'''returns the reverse complement of a DNA sequence string
only accepts ACGT, upper or lowercase'''
trans = str.maketrans('atcgATCG', 'tagcTAGC')
rv_s = s[::-1] #strange python string reversal, it works!
rv_comp_s = rv_s.translate(trans)
return rv_comp_s
def get_fasta(file_name):
'''read a properly formated fasta and return a dict
with key=readname and value=sequence
reads the whole file in'''
d = [i.strip() for i in open(file_name,'r')]
out={}
for i in d:
if i.startswith('>'):
curr_seq = i[1:]
out[curr_seq] = []
else:
out[curr_seq].append(i)
for i in out:
out[i] = ''.join(out[i])
return out
def get_fasta_buffer(file_name):
'''An efficient fasta reader that is buffered and therefore
useful for big fasta files. It returns each fasta one by
as a tuple -> (name, sequence). '''
file_iter = open(file_name)
current_seq = [] # a dummy, needed to get through the 1st read only
for line in file_iter:
if not line.startswith('>'):
current_seq.append(line.strip())
else:
if len(current_seq) != 0:
yield (current_name, ''.join(current_seq))
current_name = line[1:].strip()
current_seq = []
yield (current_name, ''.join(current_seq))
print_fasta = lambda s: ('>'+i+'\n' + s[i] for i in s)
###Set functions
def intersection(sets):
"""Get the intersection of all input sets"""
if all((type(i)==type(set()) for i in sets)):
return reduce(set.intersection, sets)
else:
sets = list(map(set, sets))
return reduce(set.intersection, sets)
def union(sets):
"""Get the union of all input sets"""
if all((type(i)==type(set()) for i in sets)):
return reduce(set.union, sets)
else:
sets = list(map(set, sets))
return reduce(set.union, sets)
def join(seqs):
"""Join any input sequences that support concatenation"""
return reduce(operator.concat, seqs)
#Misc
def get_file(filename, splitchar = 'NA', buffered = False):
if not buffered:
if splitchar == 'NA':
return [i.strip().split() for i in open(filename)]
else: return [i.strip().split(splitchar) for i in open(filename)]
else:
if splitchar == 'NA':
return (i.strip().split() for i in open(filename))
else: return (i.strip().split(splitchar) for i in open(filename))
def sort_dict_by_val(aDict):
'''returns a list of tuples sorted by the dict values'''
return sorted(iter(aDict.items()), key=lambda k_v: (k_v[1],k_v[0]))
def pairwise(li):
'''a convienience function that produces all pairwise comparisons from a list'''
for i in range(len(li)):
j = i+1
while j < len(li):
yield (li[i], li[j])
j += 1
def count_all(xlist, proportions=False):
'''Count all the items in a list, return a dict
with the item as key and counts as value.
If proportions are set to True, the values
are the proportions not counts'''
out = defaultdict(int)
for i in xlist: out[i]+=1
if proportions:
out2 = {}
tot_sz = float(sum(out.values()))
for i in out: out2[i] = out[i] / tot_sz
return out2
else: return out
|
flag0010/python_common
|
old_versions/common3.py
|
Python
|
mit
| 5,604
|
[
"Biopython"
] |
a65757b5ff5feda69ac7d6d513a87c7e8a94d8f641efacc326cd004f99c4a07b
|
from __future__ import print_function
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
from h2o.estimators.gbm import H2OGradientBoostingEstimator
def cv_cars_gbm():
# read in the dataset and construct training set (and validation set)
cars = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
# choose the type model-building exercise (multinomial classification or regression). 0:regression, 1:binomial,
# 2:multinomial
problem = 1 #random.sample(list(range(3)),1)[0]
# pick the predictors and response column, along with the correct distribution
predictors = ["displacement","power","weight","acceleration","year"]
if problem == 1 :
response_col = "economy_20mpg"
distribution = "bernoulli"
cars[response_col] = cars[response_col].asfactor()
elif problem == 2 :
response_col = "cylinders"
distribution = "multinomial"
cars[response_col] = cars[response_col].asfactor()
else :
response_col = "economy"
distribution = "gaussian"
print("Distribution: {0}".format(distribution))
print("Response column: {0}".format(response_col))
## cross-validation
# 1. check that cv metrics are the same over repeated "Modulo" runs
nfolds = random.randint(3,10)
gbm1 = H2OGradientBoostingEstimator(nfolds=nfolds,
distribution=distribution,
ntrees=5,
fold_assignment="Modulo")
gbm1.train(x=predictors, y=response_col, training_frame=cars)
gbm2 = H2OGradientBoostingEstimator(nfolds=nfolds,
distribution=distribution,
ntrees=5,
fold_assignment="Modulo")
gbm2.train(x=predictors, y=response_col, training_frame=cars)
pyunit_utils.check_models(gbm1, gbm2, True)
# 2. check that cv metrics are different over repeated "Random" runs
nfolds = random.randint(3,10)
gbm1 = H2OGradientBoostingEstimator(nfolds=nfolds,
distribution=distribution,
ntrees=5,
fold_assignment="Random")
gbm1.train(x=predictors, y=response_col, training_frame=cars)
gbm2 = H2OGradientBoostingEstimator(nfolds=nfolds,
distribution=distribution,
ntrees=5,
fold_assignment="Random")
gbm2.train(x=predictors, y=response_col, training_frame=cars)
try:
pyunit_utils.check_models(gbm1, gbm2, True)
assert False, "Expected models to be different over repeated Random runs"
except AssertionError:
assert True
# 3. folds_column
num_folds = random.randint(2,5)
fold_assignments = h2o.H2OFrame([[random.randint(0,num_folds-1)] for f in range(cars.nrow)])
fold_assignments.set_names(["fold_assignments"])
cars = cars.cbind(fold_assignments)
gbm = H2OGradientBoostingEstimator(distribution=distribution,
ntrees=5,
keep_cross_validation_predictions=True)
gbm.train(x=predictors, y=response_col, training_frame=cars, fold_column="fold_assignments")
num_cv_models = len(gbm._model_json['output']['cross_validation_models'])
assert num_cv_models==num_folds, "Expected {0} cross-validation models, but got " \
"{1}".format(num_folds, num_cv_models)
cv_model1 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][0]['name'])
cv_model2 = h2o.get_model(gbm._model_json['output']['cross_validation_models'][1]['name'])
# 4. keep_cross_validation_predictions
cv_predictions = gbm1._model_json['output']['cross_validation_predictions']
assert cv_predictions is None, "Expected cross-validation predictions to be None, but got {0}".format(cv_predictions)
cv_predictions = gbm._model_json['output']['cross_validation_predictions']
assert len(cv_predictions)==num_folds, "Expected the same number of cross-validation predictions " \
"as folds, but got {0}".format(len(cv_predictions))
## boundary cases
# 1. nfolds = number of observations (leave-one-out cross-validation)
gbm = H2OGradientBoostingEstimator(nfolds=cars.nrow, distribution=distribution,ntrees=5,
fold_assignment="Modulo")
gbm.train(x=predictors, y=response_col, training_frame=cars)
# 2. nfolds = 0
gbm1 = H2OGradientBoostingEstimator(nfolds=0, distribution=distribution, ntrees=5)
gbm1.train(x=predictors, y=response_col,training_frame=cars)
# check that this is equivalent to no nfolds
gbm2 = H2OGradientBoostingEstimator(distribution=distribution, ntrees=5)
gbm2.train(x=predictors, y=response_col, training_frame=cars)
pyunit_utils.check_models(gbm1, gbm2)
# 3. cross-validation and regular validation attempted
gbm = H2OGradientBoostingEstimator(nfolds=random.randint(3,10),
ntrees=5,
distribution=distribution)
gbm.train(x=predictors, y=response_col, training_frame=cars, validation_frame=cars)
## error cases
# 1. nfolds == 1 or < 0
try:
gbm = H2OGradientBoostingEstimator(nfolds=random.sample([-1,1],1)[0],
ntrees=5,
distribution=distribution)
gbm.train(x=predictors, y=response_col, training_frame=cars)
assert False, "Expected model-build to fail when nfolds is 1 or < 0"
except EnvironmentError:
assert True
# 2. more folds than observations
try:
gbm = H2OGradientBoostingEstimator(nfolds=cars.nrow+1,
distribution=distribution,
ntrees=5,
fold_assignment="Modulo")
gbm.train(x=predictors, y=response_col, training_frame=cars)
assert False, "Expected model-build to fail when nfolds > nobs"
except EnvironmentError:
assert True
# 3. fold_column and nfolds both specified
try:
gbm = H2OGradientBoostingEstimator(nfolds=3, ntrees=5, distribution=distribution)
gbm.train(x=predictors, y=response_col, training_frame=cars, fold_column="fold_assignments")
assert False, "Expected model-build to fail when fold_column and nfolds both specified"
except EnvironmentError:
assert True
# 4. fold_column and fold_assignment both specified
try:
gbm = H2OGradientBoostingEstimator(ntrees=5, fold_assignment="Random", distribution=distribution)
gbm.train(x=predictors, y=response_col, training_frame=cars, fold_column="fold_assignments")
assert False, "Expected model-build to fail when fold_column and fold_assignment both specified"
except EnvironmentError:
assert True
if __name__ == "__main__":
pyunit_utils.standalone_test(cv_cars_gbm)
else:
cv_cars_gbm()
|
YzPaul3/h2o-3
|
h2o-py/tests/testdir_algos/gbm/pyunit_cv_cars_gbm.py
|
Python
|
apache-2.0
| 7,071
|
[
"Gaussian"
] |
34aa8d062d85b2fba451351db4f1711176d997aa280c448ad1bd6904c2c1cdd6
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Validator classes validate data before their GUI representation is closed
by the user. The validator class also takes care of informing the user where
the data is invalid.
This prevents the user from entering invalid data into the model or flushing
it to the database."""
|
kurtraschke/camelot
|
camelot/admin/validator/__init__.py
|
Python
|
gpl-2.0
| 1,346
|
[
"VisIt"
] |
afcbb20db1d318f71b9eff2e128e075dd3e69ce56ad163164f6f6a5ecf6a542d
|
"""
Tests for the Piwik template tags and filters.
"""
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.template import Context
from django.test.utils import override_settings
from analytical.templatetags.piwik import PiwikNode
from analytical.tests.utils import TagTestCase
from analytical.utils import AnalyticalException
@override_settings(PIWIK_DOMAIN_PATH='example.com', PIWIK_SITE_ID='345')
class PiwikTagTestCase(TagTestCase):
"""
Tests for the ``piwik`` template tag.
"""
def test_tag(self):
r = self.render_tag('piwik', 'piwik')
self.assertTrue('"//example.com/"' in r, r)
self.assertTrue("_paq.push(['setSiteId', 345]);" in r, r)
self.assertTrue('img src="//example.com/piwik.php?idsite=345"'
in r, r)
def test_node(self):
r = PiwikNode().render(Context({}))
self.assertTrue('"//example.com/";' in r, r)
self.assertTrue("_paq.push(['setSiteId', 345]);" in r, r)
self.assertTrue('img src="//example.com/piwik.php?idsite=345"'
in r, r)
@override_settings(PIWIK_DOMAIN_PATH='example.com/piwik',
PIWIK_SITE_ID='345')
def test_domain_path_valid(self):
r = self.render_tag('piwik', 'piwik')
self.assertTrue('"//example.com/piwik/"' in r, r)
@override_settings(PIWIK_DOMAIN_PATH='example.com:1234',
PIWIK_SITE_ID='345')
def test_domain_port_valid(self):
r = self.render_tag('piwik', 'piwik')
self.assertTrue('"//example.com:1234/";' in r, r)
@override_settings(PIWIK_DOMAIN_PATH='example.com:1234/piwik',
PIWIK_SITE_ID='345')
def test_domain_port_path_valid(self):
r = self.render_tag('piwik', 'piwik')
self.assertTrue('"//example.com:1234/piwik/"' in r, r)
@override_settings(PIWIK_DOMAIN_PATH=None)
def test_no_domain(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(PIWIK_SITE_ID=None)
def test_no_siteid(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(PIWIK_SITE_ID='x')
def test_siteid_not_a_number(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(PIWIK_DOMAIN_PATH='http://www.example.com')
def test_domain_protocol_invalid(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(PIWIK_DOMAIN_PATH='example.com/')
def test_domain_slash_invalid(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(PIWIK_DOMAIN_PATH='example.com:123:456')
def test_domain_multi_port(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(PIWIK_DOMAIN_PATH='example.com:')
def test_domain_incomplete_port(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(PIWIK_DOMAIN_PATH='example.com:/piwik')
def test_domain_uri_incomplete_port(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(PIWIK_DOMAIN_PATH='example.com:12df')
def test_domain_port_invalid(self):
self.assertRaises(AnalyticalException, PiwikNode)
@override_settings(ANALYTICAL_INTERNAL_IPS=['1.1.1.1'])
def test_render_internal_ip(self):
req = HttpRequest()
req.META['REMOTE_ADDR'] = '1.1.1.1'
context = Context({'request': req})
r = PiwikNode().render(context)
self.assertTrue(r.startswith(
'<!-- Piwik disabled on internal IP address'), r)
self.assertTrue(r.endswith('-->'), r)
def test_uservars(self):
context = Context({'piwik_vars': [(1, 'foo', 'foo_val'),
(2, 'bar', 'bar_val', 'page'),
(3, 'spam', 'spam_val', 'visit')]})
r = PiwikNode().render(context)
msg = 'Incorrect Piwik custom variable rendering. Expected:\n%s\nIn:\n%s'
for var_code in ['_paq.push(["setCustomVariable", 1, "foo", "foo_val", "page"]);',
'_paq.push(["setCustomVariable", 2, "bar", "bar_val", "page"]);',
'_paq.push(["setCustomVariable", 3, "spam", "spam_val", "visit"]);']:
self.assertIn(var_code, r, msg % (var_code, r))
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_default_usertrack(self):
context = Context({
'user': User(username='BDFL', first_name='Guido', last_name='van Rossum')
})
r = PiwikNode().render(context)
msg = 'Incorrect Piwik user tracking rendering.\nNot found:\n%s\nIn:\n%s'
var_code = '_paq.push(["setUserId", "BDFL"]);'
self.assertIn(var_code, r, msg % (var_code, r))
def test_piwik_usertrack(self):
context = Context({
'piwik_identity': 'BDFL'
})
r = PiwikNode().render(context)
msg = 'Incorrect Piwik user tracking rendering.\nNot found:\n%s\nIn:\n%s'
var_code = '_paq.push(["setUserId", "BDFL"]);'
self.assertIn(var_code, r, msg % (var_code, r))
def test_analytical_usertrack(self):
context = Context({
'analytical_identity': 'BDFL'
})
r = PiwikNode().render(context)
msg = 'Incorrect Piwik user tracking rendering.\nNot found:\n%s\nIn:\n%s'
var_code = '_paq.push(["setUserId", "BDFL"]);'
self.assertIn(var_code, r, msg % (var_code, r))
@override_settings(ANALYTICAL_AUTO_IDENTIFY=True)
def test_disable_usertrack(self):
context = Context({
'user': User(username='BDFL', first_name='Guido', last_name='van Rossum'),
'piwik_identity': None
})
r = PiwikNode().render(context)
msg = 'Incorrect Piwik user tracking rendering.\nFound:\n%s\nIn:\n%s'
var_code = '_paq.push(["setUserId", "BDFL"]);'
self.assertNotIn(var_code, r, msg % (var_code, r))
@override_settings(PIWIK_DISABLE_COOKIES=True)
def test_disable_cookies(self):
r = PiwikNode().render(Context({}))
self.assertTrue("_paq.push(['disableCookies']);" in r, r)
|
pjdelport/django-analytical
|
analytical/tests/test_tag_piwik.py
|
Python
|
mit
| 6,215
|
[
"VisIt"
] |
87a64819cb5d3cf0f4e925132d7d136c29aacf81cbec05cfa0f1dc1efff34481
|
from behave import when
from django.core.management import execute_from_command_line
from selenium import webdriver
def before_all(context):
context.browser = webdriver.Firefox()
context.browser.implicitly_wait(1)
context.server_url = 'http://localhost:8081'
execute_from_command_line(['manage.py', 'loaddata',
'test_initial_data.json'])
def after_all(context):
context.browser.quit()
@when(u'I visit the "{link}" page')
def get_url(context, link=''):
url = u'/'.join([context.server_url, link.strip('/')])
context.browser.get(url)
# def before_feature(context, feature):
# pass
# def after_feature(context, feature):
# pass
|
evili/django-jenkins-plugin
|
src/test/resources/org/jenkinsci/plugins/django/simple_test_project/bdd_tests/features/environment.py
|
Python
|
apache-2.0
| 697
|
[
"VisIt"
] |
6bd4324987fe2841ab49b0fb02e333571e24f0e801830050b4c04e8731bdd85b
|
"""
Views for user API
"""
from django.shortcuts import redirect
from django.utils import dateparse
from rest_framework import generics, views
from rest_framework.decorators import api_view
from rest_framework.response import Response
from opaque_keys.edx.keys import UsageKey
from opaque_keys import InvalidKeyError
from courseware.access import is_mobile_available_for_user
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from courseware.views import get_current_child, save_positions_recursively_up
from student.models import CourseEnrollment, User
from xblock.fields import Scope
from xblock.runtime import KeyValueStore
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from .serializers import CourseEnrollmentSerializer, UserSerializer
from .. import errors
from ..utils import mobile_view, mobile_course_access
@mobile_view(is_user=True)
class UserDetail(generics.RetrieveAPIView):
"""
**Use Case**
Get information about the specified user and
access other resources the user has permissions for.
Users are redirected to this endpoint after logging in.
You can use the **course_enrollments** value in
the response to get a list of courses the user is enrolled in.
**Example request**:
GET /api/mobile/v0.5/users/{username}
**Response Values**
* id: The ID of the user.
* username: The username of the currently logged in user.
* email: The email address of the currently logged in user.
* name: The full name of the currently logged in user.
* course_enrollments: The URI to list the courses the currently logged
in user is enrolled in.
"""
queryset = (
User.objects.all()
.select_related('profile', 'course_enrollments')
)
serializer_class = UserSerializer
lookup_field = 'username'
@mobile_view(is_user=True)
class UserCourseStatus(views.APIView):
"""
**Use Case**
Get or update the ID of the module that the specified user last visited in the specified course.
**Example request**:
GET /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
PATCH /api/mobile/v0.5/users/{username}/course_status_info/{course_id}
body:
last_visited_module_id={module_id}
modification_date={date}
The modification_date is optional. If it is present, the update will only take effect
if the modification_date is later than the modification_date saved on the server.
**Response Values**
* last_visited_module_id: The ID of the last module visited by the user in the course.
* last_visited_module_path: The ID of the modules in the path from the
last visited module to the course module.
"""
http_method_names = ["get", "patch"]
def _last_visited_module_path(self, request, course):
"""
Returns the path from the last module visited by the current user in the given course up to
the course module. If there is no such visit, the first item deep enough down the course
tree is used.
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course_module = get_module_for_descriptor(request.user, request, course, field_data_cache, course.id)
path = [course_module]
chapter = get_current_child(course_module, min_depth=2)
if chapter is not None:
path.append(chapter)
section = get_current_child(chapter, min_depth=1)
if section is not None:
path.append(section)
path.reverse()
return path
def _get_course_info(self, request, course):
"""
Returns the course status
"""
path = self._last_visited_module_path(request, course)
path_ids = [unicode(module.location) for module in path]
return Response({
"last_visited_module_id": path_ids[0],
"last_visited_module_path": path_ids,
})
def _update_last_visited_module_id(self, request, course, module_key, modification_date):
"""
Saves the module id if the found modification_date is less recent than the passed modification date
"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
try:
module_descriptor = modulestore().get_item(module_key)
except ItemNotFoundError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
module = get_module_for_descriptor(request.user, request, module_descriptor, field_data_cache, course.id)
if modification_date:
key = KeyValueStore.Key(
scope=Scope.user_state,
user_id=request.user.id,
block_scope_id=course.location,
field_name=None
)
student_module = field_data_cache.find(key)
if student_module:
original_store_date = student_module.modified
if modification_date < original_store_date:
# old modification date so skip update
return self._get_course_info(request, course)
save_positions_recursively_up(request.user, request, field_data_cache, module)
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def get(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Get the ID of the module that the specified user last visited in the specified course.
"""
return self._get_course_info(request, course)
@mobile_course_access(depth=2)
def patch(self, request, course, *args, **kwargs): # pylint: disable=unused-argument
"""
Update the ID of the module that the specified user last visited in the specified course.
"""
module_id = request.DATA.get("last_visited_module_id")
modification_date_string = request.DATA.get("modification_date")
modification_date = None
if modification_date_string:
modification_date = dateparse.parse_datetime(modification_date_string)
if not modification_date or not modification_date.tzinfo:
return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400)
if module_id:
try:
module_key = UsageKey.from_string(module_id)
except InvalidKeyError:
return Response(errors.ERROR_INVALID_MODULE_ID, status=400)
return self._update_last_visited_module_id(request, course, module_key, modification_date)
else:
# The arguments are optional, so if there's no argument just succeed
return self._get_course_info(request, course)
@mobile_view(is_user=True)
class UserCourseEnrollmentsList(generics.ListAPIView):
"""
**Use Case**
Get information about the courses the currently logged in user is
enrolled in.
**Example request**:
GET /api/mobile/v0.5/users/{username}/course_enrollments/
**Response Values**
* created: The date the course was created.
* mode: The type of certificate registration for this course: honor or
certified.
* is_active: Whether the course is currently active; true or false.
* certificate: Information about the user's earned certificate in the course.
* url: URL to the downloadable version of the certificate, if exists.
* course: A collection of data about the course:
* course_about: The URI to get the data for the course About page.
* course_updates: The URI to get data for course updates.
* number: The course number.
* org: The organization that created the course.
* video_outline: The URI to get the list of all vides the user can
access in the course.
* id: The unique ID of the course.
* subscription_id: A unique "clean" (alphanumeric with '_') ID of the course.
* latest_updates: Reserved for future use.
* end: The end date of the course.
* name: The name of the course.
* course_handouts: The URI to get data for course handouts.
* start: The data and time the course starts.
* course_image: The path to the course image.
"""
queryset = CourseEnrollment.objects.all()
serializer_class = CourseEnrollmentSerializer
lookup_field = 'username'
def get_queryset(self):
enrollments = self.queryset.filter(
user__username=self.kwargs['username'],
is_active=True
).order_by('created').reverse()
return [
enrollment for enrollment in enrollments
if enrollment.course and is_mobile_available_for_user(self.request.user, enrollment.course)
]
@api_view(["GET"])
@mobile_view()
def my_user_info(request):
"""
Redirect to the currently-logged-in user's info page
"""
return redirect("user-detail", username=request.user.username)
|
dkarakats/edx-platform
|
lms/djangoapps/mobile_api/users/views.py
|
Python
|
agpl-3.0
| 9,370
|
[
"VisIt"
] |
c3197a749acf29bafed67be21e7e279ab5939da697f28aea0bd11514896c938a
|
########################################################################
# Author : Andrei Tsaregorodtsev
########################################################################
"""
Utilities for managing DIRAC configuration:
getCEsFromCS
getUnusedGridCEs
getUnusedGridSEs
getSiteUpdates
getSEUpdates
"""
__RCSID__ = "$Id$"
import re
import socket
from urlparse import urlparse
from DIRAC import gConfig, gLogger, S_OK, S_ERROR
from DIRAC.Core.Utilities import List
from DIRAC.Core.Utilities.Grid import getBdiiCEInfo, getBdiiSEInfo, ldapService
from DIRAC.Core.Utilities.SitesDIRACGOCDBmapping import getDIRACSiteName, getDIRACSesForHostName
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOs, getVOOption
from DIRAC.ConfigurationSystem.Client.PathFinder import getDatabaseSection
def getGridVOs():
""" Get all the VOMS VO names served by this DIRAC service
"""
voNames = []
result = getVOs()
if not result['OK']:
return result
else:
vos = result['Value']
for vo in vos:
vomsVO = getVOOption(vo, "VOMSName")
if vomsVO:
voNames.append(vomsVO)
return S_OK(voNames)
def getCEsFromCS():
""" Get all the CEs defined in the CS
"""
knownCEs = []
result = gConfig.getSections('/Resources/Sites')
if not result['OK']:
return result
grids = result['Value']
for grid in grids:
result = gConfig.getSections('/Resources/Sites/%s' % grid)
if not result['OK']:
return result
sites = result['Value']
for site in sites:
opt = gConfig.getOptionsDict('/Resources/Sites/%s/%s' % (grid, site))['Value']
ces = List.fromChar(opt.get('CE', ''))
knownCEs += ces
return S_OK(knownCEs)
def getSEsFromCS(protocol='srm'):
""" Get all the SEs defined in the CS
"""
knownSEs = {}
result = gConfig.getSections('/Resources/StorageElements')
if not result['OK']:
return result
ses = result['Value']
for se in ses:
seSection = '/Resources/StorageElements/%s' % se
result = gConfig.getSections(seSection)
if not result['OK']:
continue
accesses = result['Value']
for access in accesses:
seProtocol = gConfig.getValue(cfgPath(seSection, access, 'Protocol'), '')
if seProtocol.lower() == protocol.lower() or protocol == 'any':
host = gConfig.getValue(cfgPath(seSection, access, 'Host'), '')
knownSEs.setdefault(host, [])
knownSEs[host].append(se)
else:
continue
return S_OK(knownSEs)
def getGridCEs(vo, bdiiInfo=None, ceBlackList=None, hostURL=None, glue2=False):
""" Get all the CEs available for a given VO and having queues in Production state
"""
knownCEs = set()
if ceBlackList is not None:
knownCEs = knownCEs.union(set(ceBlackList))
ceBdiiDict = bdiiInfo
if bdiiInfo is None:
result = getBdiiCEInfo(vo, host=hostURL, glue2=glue2)
if not result['OK']:
return result
ceBdiiDict = result['Value']
siteDict = {}
for site in ceBdiiDict:
siteCEs = set(ceBdiiDict[site]['CEs'].keys())
newCEs = siteCEs - knownCEs
if not newCEs:
continue
ceFullDict = {}
for ce in newCEs:
ceDict = {}
ceInfo = ceBdiiDict[site]['CEs'][ce]
ceType = 'Unknown'
ceDict['Queues'] = []
for queue in ceInfo['Queues']:
queueStatus = ceInfo['Queues'][queue].get('GlueCEStateStatus', 'UnknownStatus')
if 'production' in queueStatus.lower():
ceType = ceInfo['Queues'][queue].get('GlueCEImplementationName', '')
ceDict['Queues'].append(queue)
if not ceDict['Queues']:
continue
ceDict['CEType'] = ceType
ceDict['GOCSite'] = site
ceDict['CEID'] = ce
systemName = ceInfo.get('GlueHostOperatingSystemName', 'Unknown')
systemVersion = ceInfo.get('GlueHostOperatingSystemVersion', 'Unknown')
systemRelease = ceInfo.get('GlueHostOperatingSystemRelease', 'Unknown')
ceDict['System'] = (systemName, systemVersion, systemRelease)
ceFullDict[ce] = ceDict
siteDict[site] = ceFullDict
result = S_OK(siteDict)
result['BdiiInfo'] = ceBdiiDict
return result
def getSiteUpdates(vo, bdiiInfo=None, log=None):
""" Get all the necessary updates for the already defined sites and CEs
"""
def addToChangeSet(entry, changeSet):
""" Inner function to update changeSet with entry (a tuple)
:param tuple entry: entry to add to changeSet
:param set changeSet: set collecting stuff to change
"""
_section, _option, value, new_value = entry
if new_value and new_value != value:
changeSet.add(entry)
if log is None:
log = gLogger
ceBdiiDict = bdiiInfo
if bdiiInfo is None:
result = getBdiiCEInfo(vo)
if not result['OK']:
return result
ceBdiiDict = result['Value']
changeSet = set()
for site in ceBdiiDict:
result = getDIRACSiteName(site)
if not result['OK']:
continue
siteNames = result['Value']
for siteName in siteNames:
siteSection = cfgPath('/Resources', 'Sites', siteName.split('.')[0], siteName)
result = gConfig.getOptionsDict(siteSection)
if not result['OK']:
continue
siteDict = result['Value']
# Current CS values
coor = siteDict.get('Coordinates', 'Unknown')
mail = siteDict.get('Mail', 'Unknown').replace(' ', '')
description = siteDict.get('Description', 'Unknown')
description = description.replace(' ,', ',')
longitude = ceBdiiDict[site].get('GlueSiteLongitude', '').strip()
latitude = ceBdiiDict[site].get('GlueSiteLatitude', '').strip()
# Current BDII value
newcoor = ''
if longitude and latitude:
newcoor = "%s:%s" % (longitude, latitude)
newmail = ceBdiiDict[site].get('GlueSiteSysAdminContact', '').replace('mailto:', '').strip()
newdescription = ceBdiiDict[site].get('GlueSiteDescription', '').strip()
newdescription = ", ".join([line.strip() for line in newdescription.split(",")])
# Adding site data to the changes list
addToChangeSet((siteSection, 'Coordinates', coor, newcoor), changeSet)
addToChangeSet((siteSection, 'Mail', mail, newmail), changeSet)
addToChangeSet((siteSection, 'Description', description, newdescription), changeSet)
ces = gConfig.getValue(cfgPath(siteSection, 'CE'), [])
for ce in ces:
ceSection = cfgPath(siteSection, 'CEs', ce)
ceDict = {}
result = gConfig.getOptionsDict(ceSection)
if result['OK']:
ceDict = result['Value']
else:
if ceBdiiDict[site]['CEs'].get(ce, None):
log.notice("Adding new CE", "%s to site %s/%s" % (ce, siteName, site))
ceInfo = ceBdiiDict[site]['CEs'].get(ce, None)
if ceInfo is None:
ceType = ceDict.get('CEType', '')
continue
# Current CS CE info
arch = ceDict.get('architecture', 'Unknown')
OS = ceDict.get('OS', 'Unknown')
si00 = ceDict.get('SI00', 'Unknown')
ceType = ceDict.get('CEType', 'Unknown')
ram = ceDict.get('MaxRAM', 'Unknown')
submissionMode = ceDict.get('SubmissionMode', 'Unknown')
# Current BDII CE info
newarch = ceBdiiDict[site]['CEs'][ce].get('GlueHostArchitecturePlatformType', '').strip()
systemName = ceInfo.get('GlueHostOperatingSystemName', '').strip()
systemVersion = ceInfo.get('GlueHostOperatingSystemVersion', '').strip()
systemRelease = ceInfo.get('GlueHostOperatingSystemRelease', '').strip()
newOS = ''
if systemName and systemVersion and systemRelease:
newOS = '_'.join((systemName, systemVersion, systemRelease))
newsi00 = ceInfo.get('GlueHostBenchmarkSI00', '').strip()
newCEType = 'Unknown'
for queue in ceInfo['Queues']:
queueDict = ceInfo['Queues'][queue]
newCEType = queueDict.get('GlueCEImplementationName', '').strip()
if newCEType:
break
if newCEType == 'ARC-CE':
newCEType = 'ARC'
newSubmissionMode = None
if newCEType in ['ARC', 'CREAM']:
newSubmissionMode = "Direct"
newRAM = ceInfo.get('GlueHostMainMemoryRAMSize', '').strip()
# Protect from unreasonable values
if newRAM and int(newRAM) > 150000:
newRAM = ''
# Adding CE data to the change list
addToChangeSet((ceSection, 'architecture', arch, newarch), changeSet)
addToChangeSet((ceSection, 'OS', OS, newOS), changeSet)
addToChangeSet((ceSection, 'SI00', si00, newsi00), changeSet)
addToChangeSet((ceSection, 'CEType', ceType, newCEType), changeSet)
addToChangeSet((ceSection, 'MaxRAM', ram, newRAM), changeSet)
if submissionMode == "Unknown" and newSubmissionMode:
addToChangeSet((ceSection, 'SubmissionMode', submissionMode, newSubmissionMode), changeSet)
queues = ceInfo['Queues'].keys()
for queue in queues:
queueInfo = ceInfo['Queues'][queue]
queueStatus = queueInfo['GlueCEStateStatus']
queueSection = cfgPath(ceSection, 'Queues', queue)
queueDict = {}
result = gConfig.getOptionsDict(queueSection)
if result['OK']:
queueDict = result['Value']
else:
if queueStatus.lower() == "production":
log.notice("Adding new queue", "%s to CE %s" % (queue, ce))
else:
continue
# Current CS queue info
maxCPUTime = queueDict.get('maxCPUTime', 'Unknown')
si00 = queueDict.get('SI00', 'Unknown')
maxTotalJobs = queueDict.get('MaxTotalJobs', 'Unknown')
# Current BDII queue info
newMaxCPUTime = queueInfo.get('GlueCEPolicyMaxCPUTime', '')
if newMaxCPUTime == "4" * len(newMaxCPUTime) or newMaxCPUTime == "9" * len(newMaxCPUTime):
newMaxCPUTime = ''
wallTime = queueInfo.get('GlueCEPolicyMaxWallClockTime', '')
if wallTime == "4" * len(wallTime) or wallTime == "9" * len(wallTime):
wallTime = ''
if wallTime and int(wallTime) > 0:
if not newMaxCPUTime:
newMaxCPUTime = str(int(0.8 * int(wallTime)))
else:
if int(wallTime) <= int(newMaxCPUTime):
newMaxCPUTime = str(int(0.8 * int(wallTime)))
newSI00 = ''
caps = queueInfo.get('GlueCECapability', [])
if isinstance(caps, basestring):
caps = [caps]
for cap in caps:
if 'CPUScalingReferenceSI00' in cap:
newSI00 = cap.split('=')[-1]
# Adding queue info to the CS
addToChangeSet((queueSection, 'maxCPUTime', maxCPUTime, newMaxCPUTime), changeSet)
addToChangeSet((queueSection, 'SI00', si00, newSI00), changeSet)
if maxTotalJobs == "Unknown":
newTotalJobs = min(1000, int(int(queueInfo.get('GlueCEInfoTotalCPUs', 0)) / 2))
newWaitingJobs = max(2, int(newTotalJobs * 0.1))
newTotalJobs = str(newTotalJobs)
newWaitingJobs = str(newWaitingJobs)
addToChangeSet((queueSection, 'MaxTotalJobs', '', newTotalJobs), changeSet)
addToChangeSet((queueSection, 'MaxWaitingJobs', '', newWaitingJobs), changeSet)
# Updating eligible VO list
VOs = set()
if queueDict.get('VO', ''):
VOs = set([q.strip() for q in queueDict.get('VO', '').split(',') if q])
if vo not in VOs:
VOs.add(vo)
VOs = list(VOs)
newVOs = ','.join(VOs)
addToChangeSet((queueSection, 'VO', '', newVOs), changeSet)
return S_OK(changeSet)
def getGridSEs(vo, bdiiInfo=None, seBlackList=None):
""" Get all the SEs available for a given VO
"""
seBdiiDict = bdiiInfo
if bdiiInfo is None:
result = getBdiiSEInfo(vo)
if not result['OK']:
return result
seBdiiDict = result['Value']
knownSEs = set()
if seBlackList is not None:
knownSEs = knownSEs.union(set(seBlackList))
siteDict = {}
for site in seBdiiDict:
for gridSE in seBdiiDict[site]['SEs']:
seDict = seBdiiDict[site]['SEs'][gridSE]
# if "lhcb" in seDict['GlueSAName']:
# print '+'*80
# print gridSE
# for k,v in seDict.items():
# print k,'\t',v
if gridSE not in knownSEs:
siteDict.setdefault(site, {})
if isinstance(seDict['GlueSAAccessControlBaseRule'], list):
voList = [re.sub('^VO:', '', s) for s in seDict['GlueSAAccessControlBaseRule']]
else:
voList = [re.sub('^VO:', '', seDict['GlueSAAccessControlBaseRule'])]
siteDict[site][gridSE] = {'GridSite': seDict['GlueSiteUniqueID'],
'BackendType': seDict['GlueSEImplementationName'],
'Description': seDict.get('GlueSEName', '-'),
'VOs': voList
}
result = S_OK(siteDict)
result['BdiiInfo'] = seBdiiDict
return result
def getGridSRMs(vo, bdiiInfo=None, srmBlackList=None, unUsed=False):
result = ldapService(serviceType='SRM', vo=vo)
if not result['OK']:
return result
srmBdiiDict = result['Value']
knownSRMs = set()
if srmBlackList is not None:
knownSRMs = knownSRMs.union(set(srmBlackList))
siteSRMDict = {}
for srm in srmBdiiDict:
srm = dict(srm)
endPoint = srm.get('GlueServiceEndpoint', '')
srmHost = ''
if endPoint:
srmHost = urlparse(endPoint).hostname
if not srmHost:
continue
if srmHost in knownSRMs:
continue
if unUsed:
result = getDIRACSesForHostName(srmHost)
if not result['OK']:
return result
diracSEs = result['Value']
if diracSEs:
# If it is a known SRM and only new SRMs are requested, continue
continue
site = srm.get('GlueForeignKey', '').replace('GlueSiteUniqueID=', '')
siteSRMDict.setdefault(site, {})
siteSRMDict[site][srmHost] = srm
if bdiiInfo is None:
result = getBdiiSEInfo(vo)
if not result['OK']:
return result
seBdiiDict = dict(result['Value'])
else:
seBdiiDict = dict(bdiiInfo)
srmSeDict = {}
for site in siteSRMDict:
srms = siteSRMDict[site].keys()
for srm in srms:
if seBdiiDict.get(site, {}).get('SEs', {}).get(srm, {}):
srmSeDict.setdefault(site, {})
srmSeDict[site].setdefault(srm, {})
srmSeDict[site][srm]['SRM'] = siteSRMDict[site][srm]
srmSeDict[site][srm]['SE'] = seBdiiDict[site]['SEs'][srm]
return S_OK(srmSeDict)
def getSRMUpdates(vo, bdiiInfo=None):
changeSet = set()
def addToChangeSet(entry, changeSet):
_section, _option, value, new_value = entry
if new_value and new_value != value:
changeSet.add(entry)
result = getGridSRMs(vo, bdiiInfo=bdiiInfo)
if not result['OK']:
return result
srmBdiiDict = result['Value']
result = getSEsFromCS()
if not result['OK']:
return result
seDict = result['Value']
result = getVOs()
if result['OK']:
csVOs = set(result['Value'])
else:
csVOs = set([vo])
for seHost, diracSE in seDict.items():
seSection = '/Resources/StorageElements/%s' % diracSE[0]
# Look up existing values first
description = gConfig.getValue(cfgPath(seSection, 'Description'), 'Unknown')
backend = gConfig.getValue(cfgPath(seSection, 'BackendType'), 'Unknown')
vos = gConfig.getValue(cfgPath(seSection, 'VO'), 'Unknown').replace(' ', '')
size = gConfig.getValue(cfgPath(seSection, 'TotalSize'), 'Unknown')
# Look up current BDII values
srmDict = {}
seBdiiDict = {}
for site in srmBdiiDict:
if seHost in srmBdiiDict[site]:
srmDict = srmBdiiDict[site][seHost]['SRM']
seBdiiDict = srmBdiiDict[site][seHost]['SE']
break
if not srmDict or not seBdiiDict:
continue
newDescription = seBdiiDict.get('GlueSEName', 'Unknown')
newBackend = seBdiiDict.get('GlueSEImplementationName', 'Unknown')
newSize = seBdiiDict.get('GlueSESizeTotal', 'Unknown')
addToChangeSet((seSection, 'Description', description, newDescription), changeSet)
addToChangeSet((seSection, 'BackendType', backend, newBackend), changeSet)
addToChangeSet((seSection, 'TotalSize', size, newSize), changeSet)
# Evaluate VOs if no space token defined, otherwise this is VO specific
spaceToken = ''
for i in range(1, 10):
protocol = gConfig.getValue(cfgPath(seSection, 'AccessProtocol.%d' % i, 'Protocol'), '')
if protocol.lower() == 'srm':
spaceToken = gConfig.getValue(cfgPath(seSection, 'AccessProtocol.%d' % i, 'SpaceToken'), '')
break
if not spaceToken:
bdiiVOs = srmDict.get('GlueServiceAccessControlBaseRule', [])
bdiiVOs = set([re.sub('^VO:', '', rule) for rule in bdiiVOs])
seVOs = csVOs.intersection(bdiiVOs)
newVOs = ','.join(seVOs)
addToChangeSet((seSection, 'VO', vos, newVOs), changeSet)
return S_OK(changeSet)
def getDBParameters(fullname):
"""
Retrieve Database parameters from CS
fullname should be of the form <System>/<DBname>
defaultHost is the host to return if the option is not found in the CS.
Not used as the method will fail if it cannot be found
defaultPort is the port to return if the option is not found in the CS
defaultUser is the user to return if the option is not found in the CS.
Not usePassword is the password to return if the option is not found in
the CS.
Not used as the method will fail if it cannot be found
defaultDB is the db to return if the option is not found in the CS.
Not used as the method will fail if it cannot be found
defaultQueueSize is the QueueSize to return if the option is not found in the
CS
Returns a dictionary with the keys: 'host', 'port', 'user', 'password',
'db' and 'queueSize'
"""
cs_path = getDatabaseSection(fullname)
parameters = {}
result = gConfig.getOption(cs_path + '/Host')
if not result['OK']:
# No host name found, try at the common place
result = gConfig.getOption('/Systems/Databases/Host')
if not result['OK']:
return S_ERROR('Failed to get the configuration parameter: Host')
dbHost = result['Value']
# Check if the host is the local one and then set it to 'localhost' to use
# a socket connection
if dbHost != 'localhost':
localHostName = socket.getfqdn()
if localHostName == dbHost:
dbHost = 'localhost'
parameters['Host'] = dbHost
# Mysql standard
dbPort = 3306
result = gConfig.getOption(cs_path + '/Port')
if not result['OK']:
# No individual port number found, try at the common place
result = gConfig.getOption('/Systems/Databases/Port')
if result['OK']:
dbPort = int(result['Value'])
else:
dbPort = int(result['Value'])
parameters['Port'] = dbPort
result = gConfig.getOption(cs_path + '/User')
if not result['OK']:
# No individual user name found, try at the common place
result = gConfig.getOption('/Systems/Databases/User')
if not result['OK']:
return S_ERROR('Failed to get the configuration parameter: User')
dbUser = result['Value']
parameters['User'] = dbUser
result = gConfig.getOption(cs_path + '/Password')
if not result['OK']:
# No individual password found, try at the common place
result = gConfig.getOption('/Systems/Databases/Password')
if not result['OK']:
return S_ERROR('Failed to get the configuration parameter: Password')
dbPass = result['Value']
parameters['Password'] = dbPass
result = gConfig.getOption(cs_path + '/DBName')
if not result['OK']:
return S_ERROR('Failed to get the configuration parameter: DBName')
dbName = result['Value']
parameters['DBName'] = dbName
return S_OK(parameters)
def getElasticDBParameters(fullname):
"""
Retrieve Database parameters from CS
fullname should be of the form <System>/<DBname>
"""
cs_path = getDatabaseSection(fullname)
parameters = {}
result = gConfig.getOption(cs_path + '/Host')
if not result['OK']:
# No host name found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/Host')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: Host. Using localhost")
dbHost = 'localhost'
else:
dbHost = result['Value']
else:
dbHost = result['Value']
# Check if the host is the local one and then set it to 'localhost' to use
# a socket connection
if dbHost != 'localhost':
localHostName = socket.getfqdn()
if localHostName == dbHost:
dbHost = 'localhost'
parameters['Host'] = dbHost
# Elasticsearch standard port
result = gConfig.getOption(cs_path + '/Port')
if not result['OK']:
# No individual port number found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/Port')
if not result['OK']:
gLogger.warn("No configuration parameter set for Port, assuming URL points to right location")
dbPort = None
else:
dbPort = int(result['Value'])
else:
dbPort = int(result['Value'])
parameters['Port'] = dbPort
result = gConfig.getOption(cs_path + '/User')
if not result['OK']:
# No individual user name found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/User')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: User. Assuming no user/password is provided/needed")
dbUser = None
else:
dbUser = result['Value']
else:
dbUser = result['Value']
parameters['User'] = dbUser
result = gConfig.getOption(cs_path + '/Password')
if not result['OK']:
# No individual password found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/Password')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: Password. Assuming no user/password is provided/needed")
dbPass = None
else:
dbPass = result['Value']
else:
dbPass = result['Value']
parameters['Password'] = dbPass
result = gConfig.getOption(cs_path + '/SSL')
if not result['OK']:
# No SSL option found, try at the common place
result = gConfig.getOption('/Systems/NoSQLDatabases/SSL')
if not result['OK']:
gLogger.warn("Failed to get the configuration parameter: SSL. Assuming SSL is needed")
ssl = True
else:
ssl = False if result['Value'].lower() in ('false', 'no', 'n') else True
else:
ssl = False if result['Value'].lower() in ('false', 'no', 'n') else True
parameters['SSL'] = ssl
return S_OK(parameters)
|
andresailer/DIRAC
|
ConfigurationSystem/Client/Utilities.py
|
Python
|
gpl-3.0
| 22,730
|
[
"DIRAC"
] |
10fa5dd1293a86c5aeffc22e3fd9593353b88f9c0c7839a87cbd4923ab179f3b
|
import base64
import json
import os
from contextlib import closing
from urllib.parse import urlparse, parse_qs
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.timezone import datetime
from django.shortcuts import reverse
from django.test import override_settings
from django.test.client import RequestFactory
from badgrsocialauth.models import Saml2Configuration, Saml2Account
from badgrsocialauth.views import auto_provision, saml2_client_for, create_saml_config_for
from badgrsocialauth.utils import set_session_authcode, set_session_badgr_app, userdata_from_saml_assertion
from badgeuser.models import CachedEmailAddress, BadgeUser
from mainsite.models import BadgrApp
from mainsite.tests import BadgrTestCase
from mainsite import TOP_DIR
from mainsite.utils import set_url_query_params
from saml2 import config, saml, BINDING_SOAP, BINDING_HTTP_REDIRECT, BINDING_HTTP_POST
from saml2.authn_context import authn_context_class_ref
# TODO: Revert to library code once library is fixed for python3
# from saml2.metadata import create_metadata_string
from badgrsocialauth.saml2_utils import create_metadata_string
from saml2.saml import AuthnContext, AuthnStatement, NAME_FORMAT_URI, NAMEID_FORMAT_PERSISTENT, \
NAME_FORMAT_BASIC, AUTHN_PASSWORD_PROTECTED
from saml2.server import Server
from saml2.s_utils import MissingValue
class SAML2Tests(BadgrTestCase):
def setUp(self):
super(SAML2Tests, self).setUp()
self.test_files_path = os.path.join(TOP_DIR, 'apps', 'badgrsocialauth', 'testfiles')
self.idp_metadata_for_sp_config_path = os.path.join(self.test_files_path, 'idp-metadata-for-saml2configuration.xml')
with open(self.idp_metadata_for_sp_config_path, 'r') as f:
metadata_xml = f.read()
self.config = Saml2Configuration.objects.create(
metadata_conf_url="http://example.com",
slug="saml2.test",
cached_metadata=metadata_xml
)
self.badgr_app = BadgrApp.objects.create(
ui_login_redirect="https://example.com",
ui_signup_failure_redirect='https://example.com/fail'
)
self.badgr_app.is_default = True
self.badgr_app.save()
self.ipd_cert_path = os.path.join(self.test_files_path, 'idp-test-cert.pem')
self.ipd_key_path = os.path.join(self.test_files_path, 'idp-test-key.pem')
self.sp_acs_location = 'http://localhost:8000/account/saml2/{}/acs/'.format(self.config.slug)
def _skip_if_xmlsec_binary_missing(self):
xmlsec_binary_path = getattr(settings, 'XMLSEC_BINARY_PATH', None)
if xmlsec_binary_path is None:
self.skipTest("SKIPPING: In order to test XML Signing, XMLSEC_BINARY_PATH to xmlsec1 must be configured.")
def _initiate_login(self, idp_name, badgr_app, user=None):
# Sets a BadgrApp in the session for later redirect, allows setting of a session authcode
url = set_url_query_params(reverse('socialaccount_login'), provider=idp_name)
if user is not None:
self.client.force_authenticate(user=user)
preflight_response = self.client.get(
reverse('v2_api_user_socialaccount_connect') + '?provider={}'.format(idp_name)
)
location = urlparse(preflight_response.data['result']['url'])
url = '?'.join([location.path, location.query]) # strip server info from location
return self.client.get(url, HTTP_REFERER=badgr_app.ui_login_redirect)
def test_signed_authn_request_option_creates_signed_metadata(self):
self._skip_if_xmlsec_binary_missing()
self.config.use_signed_authn_request = True
self.config.save()
with override_settings(
SAML_KEY_FILE=self.ipd_key_path,
SAML_CERT_FILE=self.ipd_cert_path):
saml_client, config = saml2_client_for(self.config)
self.assertTrue(saml_client.authn_requests_signed)
self.assertNotEqual(saml_client.sec.sec_backend, None)
def test_signed_authn_request_option_returns_self_posting_form_populated_with_signed_metadata(self):
self._skip_if_xmlsec_binary_missing()
self.config.use_signed_authn_request = True
self.config.save()
with override_settings(
SAML_KEY_FILE=self.ipd_key_path,
SAML_CERT_FILE=self.ipd_cert_path):
authn_request = self.config
url = '/account/sociallogin?provider=' + authn_request.slug
redirect_url = '/account/saml2/' + authn_request.slug + '/'
response = self.client.get(url, follow=True)
intermediate_url, intermediate_url_status = response.redirect_chain[0]
# login redirect to saml2 login
self.assertEqual(intermediate_url, redirect_url)
self.assertEqual(intermediate_url_status, 302)
# self populated form generated with metadata file from self.ipd_metadata_path
self.assertEqual(response.status_code, 200)
# changing attribute location of element md:SingleSignOnService necessitates updating this value
self.assertIsNot(
response.content.find(b'<form action="https://example.com/saml2/idp/SSOService.php" method="post">'), -1)
self.assertIsNot(
response.content.find(b'<input type="hidden" name="SAMLRequest" value="'), -1)
def test_create_saml2_client(self):
Saml2Configuration.objects.create(metadata_conf_url="http://example.com", cached_metadata="<xml></xml>", slug="saml2.test2")
client = saml2_client_for("saml2.test2")
self.assertNotEqual(client, None)
def test_oauth_to_saml2_redirection_flow(self):
resp = self.client.get('/account/sociallogin?provider=' + self.config.slug)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.url, '/account/saml2/{}/'.format(self.config.slug))
def test_login_with_registered_saml2_account(self):
email = "test123@example.com"
first_name = "firsty"
last_name = "lastington"
new_user = BadgeUser.objects.create(
email=email,
first_name=first_name,
last_name=last_name,
)
# Auto verify emails
cached_email = CachedEmailAddress.objects.get(email=email)
cached_email.verified = True
cached_email.save()
Saml2Account.objects.create(config=self.config, user=new_user, uuid=email)
badgr_app = BadgrApp.objects.create(ui_login_redirect="example.com", cors='example.com')
resp = auto_provision(None, [email], first_name, last_name, self.config)
self.assertEqual(resp.status_code, 302)
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
self.assertIn("authToken", resp.url)
def test_login_with_unregistered_saml2_account(self):
email = "test456@example.com"
first_name = "firsty"
last_name = "lastington"
badgr_app = self.badgr_app
resp = auto_provision(None, [email], first_name, last_name, self.config)
self.assertEqual(resp.status_code, 302)
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
self.assertIn("authToken", resp.url)
def test_login_with_email_variant(self):
email = "testemail@example.com"
first_name = "firsty"
last_name = "lastington"
resp = auto_provision(None, [email], first_name, last_name, self.config)
self.assertEqual(resp.status_code, 302)
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
self.assertIn("authToken", resp.url)
email = "testEMAIL@example.com"
resp = auto_provision(None, [email], first_name, last_name, self.config)
self.assertIn("authcode", resp.url)
def test_saml2_login_with_conflicts(self):
email = "test8679@example.com"
email2 = "test234425@example.com"
first_name = "firsty"
last_name = "lastington"
idp_name = self.config.slug
badgr_app = self.badgr_app
# email does not exist
resp = auto_provision(
None, ["different425@example.com"], first_name, last_name, self.config
)
self.assertEqual(resp.status_code, 302)
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
self.assertIn("authToken", resp.url)
self.assertEqual(Saml2Account.objects.all().count(), 1)
email_address = CachedEmailAddress.objects.get(email='different425@example.com')
self.assertTrue(email_address.verified)
self.assertTrue(email_address.primary)
# email exists, but is unverified
BadgeUser.objects.create(
email=email,
first_name=first_name,
last_name=last_name,
send_confirmation=False
)
resp = auto_provision(None, [email], first_name, last_name, self.config)
self.assertEqual(resp.status_code, 302)
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
self.assertIn("authToken", resp.url)
email_address = CachedEmailAddress.objects.get(email=email)
self.assertTrue(email_address.verified)
self.assertTrue(email_address.primary)
# Can auto provision again
resp = auto_provision(None, [email], first_name, last_name, self.config)
self.assertEqual(resp.status_code, 302)
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
self.assertIn("authToken", resp.url)
# email exists, but is verified
BadgeUser.objects.create(
email=email2,
first_name=first_name,
last_name=last_name,
send_confirmation=False
)
cachedemail = CachedEmailAddress.objects.get(email=email2)
cachedemail.verified = True
cachedemail.save()
saml_account_count = Saml2Account.objects.count()
self._initiate_login(idp_name, badgr_app)
resp = auto_provision(None, [email2], first_name, last_name, self.config)
self.assertEqual(resp.status_code, 302)
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
self.assertIn("authError=Could+not", resp.url)
self.assertIn(self.config.slug, resp.url)
self.assertEqual(saml_account_count, Saml2Account.objects.count(), "A Saml2Account must not have been created.")
resp = self.client.get(resp.url)
self.assertIn(self.config.slug, resp.url, "Query params are included in the response all the way back to the UI")
def test_add_samlaccount_to_existing_user(self):
# email exists, but is verified
email = 'exampleuser@example.com'
test_user = self.setup_user(
email=email,
token_scope='rw:profile rw:issuer rw:backpack'
)
preflight_response = self.client.get(
reverse('v2_api_user_socialaccount_connect') + '?provider={}'.format(self.config.slug)
)
self.assertEqual(preflight_response.status_code, 200)
location = urlparse(preflight_response.data['result']['url'])
authcode = parse_qs(location.query)['authCode'][0]
location = '?'.join([location.path, location.query])
# the location now includes an auth code
self.client.logout()
response = self.client.get(location)
self.assertEqual(response.status_code, 302)
location = response._headers['location'][1]
response = self.client.get(location)
self.assertEqual(response.status_code, 302)
# Can auto provision again
rf = RequestFactory()
fake_request = rf.post(
reverse('assertion_consumer_service', kwargs={'idp_name': self.config.slug}),
{'saml_assertion': 'very fake'}
)
fake_request.session = dict()
set_session_authcode(fake_request, authcode)
resp = auto_provision(
fake_request, [email], test_user.first_name, test_user.last_name, self.config
)
self.assertEqual(resp.status_code, 302)
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
self.assertIn("authToken", resp.url)
account = Saml2Account.objects.get(user=test_user)
def get_idp_config(self, meta=None):
metadata_sp_1 = os.path.join(self.test_files_path, 'metadata_sp_1.xml')
metadata_sp_2 = os.path.join(self.test_files_path, 'metadata_sp_2.xml')
vo_metadata = os.path.join(self.test_files_path, 'vo_metadata.xml')
attribute_map_dir = os.path.join(self.test_files_path, 'attributemaps')
BASE = "http://localhost:8088"
local_metadata = {"local": [metadata_sp_1, metadata_sp_2, vo_metadata]}
metadata_source = local_metadata if meta is None else {'inline': [meta]}
return {
"entityid": "urn:mace:example.com:saml:roland:idp",
"name": "Rolands IdP",
"service": {
"idp": {
"endpoints": {
"single_sign_on_service": [
("%s/sso" % BASE, BINDING_HTTP_REDIRECT)],
"single_logout_service": [
("%s/slo" % BASE, BINDING_SOAP),
("%s/slop" % BASE, BINDING_HTTP_POST)]
},
"policy": {
"default": {
"lifetime": {"minutes": 15},
"attribute_restrictions": None, # means all I have
"name_form": NAME_FORMAT_URI,
},
self.sp_acs_location: {
"lifetime": {"minutes": 5},
"nameid_format": NAMEID_FORMAT_PERSISTENT,
},
"https://example.com/sp": {
"lifetime": {"minutes": 5},
"nameid_format": NAMEID_FORMAT_PERSISTENT,
"name_form": NAME_FORMAT_BASIC
}
},
},
},
"debug": 1,
"key_file": self.ipd_key_path,
"cert_file": self.ipd_cert_path,
"xmlsec_binary": getattr(settings, 'XMLSEC_BINARY_PATH', None),
"metadata": metadata_source,
"attribute_map_dir": attribute_map_dir,
"organization": {
"name": "Exempel AB",
"display_name": [("Exempel AB", "se"), ("Example Co.", "en")],
"url": "http://www.example.com/roland",
},
"contact_person": [
{
"given_name": "John",
"sur_name": "Smith",
"email_address": ["john.smith@example.com"],
"contact_type": "technical",
},
],
}
def get_authn_response(self, idp_config, identity):
with closing(SamlServer(idp_config)) as server:
name_id = server.ident.transient_nameid(
"urn:mace:example.com:saml:roland:idp", "id12")
authn_context_ref = authn_context_class_ref(AUTHN_PASSWORD_PROTECTED)
authn_context = AuthnContext(authn_context_class_ref=authn_context_ref)
locality = saml.SubjectLocality()
locality.address = "172.31.25.30"
authn_statement = AuthnStatement(
subject_locality=locality,
authn_instant=datetime.now().isoformat(),
authn_context=authn_context,
session_index="id12"
)
return server.create_authn_response(
identity,
"id12", # in_response_to
self.sp_acs_location, # consumer_url. config.sp.endpoints.assertion_consumer_service:["acs_endpoint"]
self.sp_acs_location, # sp_entity_id
name_id=name_id,
sign_assertion=True,
sign_response=True,
authn_statement=authn_statement
)
def test_saml2_create_account(self):
self._skip_if_xmlsec_binary_missing()
self.config.use_signed_authn_request = True
self.config.save()
with override_settings(SAML_KEY_FILE=self.ipd_key_path, SAML_CERT_FILE=self.ipd_cert_path):
saml2config = self.config
sp_config = config.SPConfig()
sp_config.load(create_saml_config_for(saml2config))
sp_metadata = create_metadata_string('', config=sp_config, sign=True)
idp_config = self.get_idp_config(sp_metadata)
identity = {"eduPersonAffiliation": ["staff", "member"],
"surName": ["Jeter"], "givenName": ["Derek"],
"mail": ["foo@gmail.com"],
"title": ["shortstop"]}
authn_response = self.get_authn_response(idp_config, identity)
base64_encoded_response_metadata = base64.b64encode(authn_response.encode('utf-8'))
base_64_utf8_response_metadata = base64_encoded_response_metadata.decode('utf-8')
response = self.client.post(
reverse('assertion_consumer_service', kwargs={'idp_name': self.config.slug}),
{'SAMLResponse': base_64_utf8_response_metadata}
)
self.assertEqual(response.status_code, 302)
location = response._headers['location'][1]
response = self.client.get(location)
self.assertEqual(Saml2Account.objects.count(), 1)
self.assertEqual(CachedEmailAddress.objects.count(), 1)
self.assertEqual(BadgeUser.objects.count(), 1)
def test_saml2_create_account_multiple_emails(self):
self._skip_if_xmlsec_binary_missing()
self.config.use_signed_authn_request = True
self.config.save()
with override_settings(SAML_KEY_FILE=self.ipd_key_path, SAML_CERT_FILE=self.ipd_cert_path):
saml2config = self.config
sp_config = config.SPConfig()
sp_config.load(create_saml_config_for(saml2config))
sp_metadata = create_metadata_string('', config=sp_config, sign=True)
idp_config = self.get_idp_config(sp_metadata)
identity = {"eduPersonAffiliation": ["staff", "member"],
"surName": ["Jeter"], "givenName": ["Derek"],
"mail": ["foo@gmail.com", "foo2@gmail.com"],
"title": ["shortstop"]}
authn_response = self.get_authn_response(idp_config, identity)
base64_encoded_response_metadata = base64.b64encode(authn_response.encode('utf-8'))
base_64_utf8_response_metadata = base64_encoded_response_metadata.decode('utf-8')
response = self.client.post(
reverse('assertion_consumer_service', kwargs={'idp_name': self.config.slug}),
{'SAMLResponse': base_64_utf8_response_metadata}
)
self.assertEqual(response.status_code, 302)
location = response._headers['location'][1]
response = self.client.get(location)
self.assertEqual(Saml2Account.objects.count(), 1)
self.assertEqual(CachedEmailAddress.objects.count(), 2)
self.assertEqual(BadgeUser.objects.count(), 1)
def test_saml2_create_account_multiple_email_assertions(self):
self._skip_if_xmlsec_binary_missing()
self.config.use_signed_authn_request = True
self.config.save()
with override_settings(SAML_KEY_FILE=self.ipd_key_path, SAML_CERT_FILE=self.ipd_cert_path):
saml2config = self.config
sp_config = config.SPConfig()
sp_config.load(create_saml_config_for(saml2config))
sp_metadata = create_metadata_string('', config=sp_config, sign=True)
idp_config = self.get_idp_config(sp_metadata)
identity = {"eduPersonAffiliation": ["staff", "member"],
"surName": ["Jeter"], "givenName": ["Derek"],
"mail": ["foo@gmail.com", "foo2@gmail.com"],
"email": ["foo3@gmail.com"],
"title": ["shortstop"]}
authn_response = self.get_authn_response(idp_config, identity)
base64_encoded_response_metadata = base64.b64encode(authn_response.encode('utf-8'))
base_64_utf8_response_metadata = base64_encoded_response_metadata.decode('utf-8')
response = self.client.post(
reverse('assertion_consumer_service', kwargs={'idp_name': self.config.slug}),
{'SAMLResponse': base_64_utf8_response_metadata}
)
self.assertEqual(response.status_code, 302)
location = response._headers['location'][1]
response = self.client.get(location)
self.assertEqual(Saml2Account.objects.count(), 1)
self.assertEqual(CachedEmailAddress.objects.count(), 3)
self.assertEqual(BadgeUser.objects.count(), 1)
def test_saml2_create_account_multiple_email_already_taken(self):
self._skip_if_xmlsec_binary_missing()
self.config.use_signed_authn_request = True
self.config.save()
email = 'exampleuser@example.com'
t_user = self.setup_user(
email=email,
token_scope='rw:profile rw:issuer rw:backpack'
)
with override_settings(SAML_KEY_FILE=self.ipd_key_path, SAML_CERT_FILE=self.ipd_cert_path):
saml2config = self.config
sp_config = config.SPConfig()
sp_config.load(create_saml_config_for(saml2config))
sp_metadata = create_metadata_string('', config=sp_config, sign=True)
idp_config = self.get_idp_config(sp_metadata)
identity = {"eduPersonAffiliation": ["staff", "member"],
"surName": ["Jeter"], "givenName": ["Derek"],
"mail": ["foo@gmail.com", "foo2@gmail.com"],
"email": ["exampleuser@example.com"],
"title": ["shortstop"]}
authn_response = self.get_authn_response(idp_config, identity)
base64_encoded_response_metadata = base64.b64encode(authn_response.encode('utf-8'))
base_64_utf8_response_metadata = base64_encoded_response_metadata.decode('utf-8')
response = self.client.post(
reverse('assertion_consumer_service', kwargs={'idp_name': self.config.slug}),
{'SAMLResponse': base_64_utf8_response_metadata}
)
self.assertEqual(response.status_code, 302)
location = response._headers['location'][1]
response = self.client.get(location)
self.assertEqual(Saml2Account.objects.count(), 0)
self.assertEqual(CachedEmailAddress.objects.count(), 1)
self.assertEqual(BadgeUser.objects.count(), 1)
def test_add_samlaccount_to_existing_user_with_varying_email(self):
email = 'exampleuser@example.com'
t_user = self.setup_user(
email=email,
token_scope='rw:profile rw:issuer rw:backpack'
)
preflight_response = self.client.get(
reverse('v2_api_user_socialaccount_connect') + '?provider={}'.format(self.config.slug)
)
self.assertEqual(preflight_response.status_code, 200)
location = urlparse(preflight_response.data['result']['url'])
authcode = parse_qs(location.query)['authCode'][0]
location = '?'.join([location.path, location.query])
# the location now includes an auth code
self.client.logout()
response = self.client.get(location)
self.assertEqual(response.status_code, 302)
location = response._headers['location'][1]
response = self.client.get(location)
self.assertEqual(response.status_code, 302)
# Can auto provision again
rf = RequestFactory()
fake_request = rf.post(
reverse('assertion_consumer_service', kwargs={'idp_name': self.config.slug}),
{'saml_assertion': 'very fake'}
)
email2 = 'exampleuser_alt@example.com'
resp = auto_provision(fake_request, [email2], t_user.first_name, t_user.last_name, self.config)
self.assertEqual(resp.status_code, 302)
fake_request.session = dict()
set_session_authcode(fake_request, authcode)
set_session_badgr_app(fake_request, self.badgr_app)
fake_request.session['idp_name'] = self.config.slug
resp = self.client.get(resp.url)
self.assertEqual(resp.status_code, 302)
self.assertIn("authToken", resp.url)
self.assertIn(self.badgr_app.ui_login_redirect, resp.url)
Saml2Account.objects.get(user=t_user) # There is a Saml account associated with the user.
CachedEmailAddress.objects.get(email=email2, user=t_user, verified=True, primary=False) # User has the email.
email3 = 'exampleuser_moredifferent@example.com'
resp = auto_provision(fake_request, [email2, email3], t_user.first_name, t_user.last_name, self.config)
CachedEmailAddress.objects.get(email=email3, user=t_user, verified=True, primary=False) # User has new email.
def test_can_extract_custom_userdata(self):
self.config.custom_settings = json.dumps({
'first_name': ['customMyClientFirstName']
})
self.config.save()
reloaded_config = Saml2Configuration.objects.get(pk=self.config.pk)
self.assertEqual(reloaded_config.custom_settings_data['email'], [], "default is set to an empty list")
self.assertEqual(reloaded_config.custom_settings_data['first_name'], ['customMyClientFirstName'])
fake_saml_assertion = {
'emailaddress': ['moe@example.com'],
'LastName': 'McMoe',
'customMyClientFirstName': ['Moe']
}
self.assertEqual(
userdata_from_saml_assertion(fake_saml_assertion, 'email', config=reloaded_config),
fake_saml_assertion['emailaddress'][0]
)
self.assertEqual(
userdata_from_saml_assertion(fake_saml_assertion, 'first_name', config=reloaded_config),
fake_saml_assertion['customMyClientFirstName'][0]
)
self.assertEqual(
userdata_from_saml_assertion(fake_saml_assertion, 'last_name', config=reloaded_config),
fake_saml_assertion['LastName']
)
class SamlServer(Server):
def __int__(self, kwargs):
super(SamlServer, self).__init__(**kwargs)
def create_authn_response(self, identity, in_response_to, destination,
sp_entity_id, name_id_policy=None, userid=None,
name_id=None, authn=None, issuer=None,
sign_response=None, sign_assertion=None,
encrypt_cert_advice=None,
encrypt_cert_assertion=None,
encrypt_assertion=None,
encrypt_assertion_self_contained=True,
encrypted_advice_attributes=False, pefim=False,
sign_alg=None, digest_alg=None,
session_not_on_or_after=None,
**kwargs):
""" Constructs an AuthenticationResponse
:param identity: Information about an user
:param in_response_to: The identifier of the authentication request
this response is an answer to.
:param destination: Where the response should be sent
:param sp_entity_id: The entity identifier of the Service Provider
:param name_id_policy: How the NameID should be constructed
:param userid: The subject identifier
:param name_id: The identifier of the subject. A saml.NameID instance.
:param authn: Dictionary with information about the authentication
context
:param issuer: Issuer of the response
:param sign_assertion: Whether the assertion should be signed or not.
:param sign_response: Whether the response should be signed or not.
:param encrypt_assertion: True if assertions should be encrypted.
:param encrypt_assertion_self_contained: True if all encrypted
assertions should have alla namespaces
selfcontained.
:param encrypted_advice_attributes: True if assertions in the advice
element should be encrypted.
:param encrypt_cert_advice: Certificate to be used for encryption of
assertions in the advice element.
:param encrypt_cert_assertion: Certificate to be used for encryption
of assertions.
:param sign_assertion: True if assertions should be signed.
:param pefim: True if a response according to the PEFIM profile
should be created.
:return: A response instance
"""
try:
args = self.gather_authn_response_args(
sp_entity_id, name_id_policy=name_id_policy, userid=userid,
name_id=name_id, sign_response=sign_response,
sign_assertion=sign_assertion,
encrypt_cert_advice=encrypt_cert_advice,
encrypt_cert_assertion=encrypt_cert_assertion,
encrypt_assertion=encrypt_assertion,
encrypt_assertion_self_contained
=encrypt_assertion_self_contained,
encrypted_advice_attributes=encrypted_advice_attributes,
pefim=pefim, **kwargs)
# authn statement is not returned from gather_authn_response_args()
# make sure to include it in args if it was passed in initially
if 'authn_statement' in kwargs:
args['authn_statement'] = kwargs['authn_statement']
except IOError as exc:
response = self.create_error_response(in_response_to,
destination,
sp_entity_id,
exc, name_id)
return ("%s" % response).split("\n")
try:
_authn = authn
if (sign_assertion or sign_response) and \
self.sec.cert_handler.generate_cert():
with self.lock:
self.sec.cert_handler.update_cert(True)
return self._authn_response(
in_response_to, destination, sp_entity_id, identity,
authn=_authn, issuer=issuer, pefim=pefim,
sign_alg=sign_alg, digest_alg=digest_alg,
session_not_on_or_after=session_not_on_or_after, **args)
return self._authn_response(
in_response_to, destination, sp_entity_id, identity,
authn=_authn, issuer=issuer, pefim=pefim, sign_alg=sign_alg,
digest_alg=digest_alg,
session_not_on_or_after=session_not_on_or_after, **args)
except MissingValue as exc:
return self.create_error_response(in_response_to, destination,
sp_entity_id, exc, name_id)
|
concentricsky/badgr-server
|
apps/badgrsocialauth/tests/test_saml2.py
|
Python
|
agpl-3.0
| 31,708
|
[
"MOE"
] |
023c2d700c70fdbcd27729b1200efd1d80b6372343e8acd67a306f87540976ef
|
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ensemble of SNGP models on CIFAR.
This script only performs evaluation, not training. We recommend training
ensembles by launching independent runs of `sngp.py` over different
seeds.
"""
import os
from absl import app
from absl import flags
from absl import logging
import edward2 as ed
import numpy as np
import robustness_metrics as rm
import tensorflow as tf
import tensorflow_datasets as tfds
import uncertainty_baselines as ub
import ood_utils # local file import from baselines.cifar
import utils # local file import from baselines.cifar
flags.DEFINE_string('checkpoint_dir', None,
'The directory where the model weights are stored.')
flags.mark_flag_as_required('checkpoint_dir')
flags.DEFINE_integer('ensemble_size', 10, 'The number of models to ensemble.')
flags.DEFINE_integer(
'total_batch_size', 256,
'The total train (and test) batch size, split across all devices.')
# SNGP ensemble flags
flags.DEFINE_float(
'gp_mean_field_factor_ensemble', 0.0005,
'The tunable multiplicative factor used in the mean-field approximation '
'for the posterior mean of softmax Gaussian process. If -1 then use '
'posterior mode instead of posterior mean.')
# Dropout flags
flags.DEFINE_bool('use_filterwise_dropout', True,
'Whether to use filterwise dropout for the hidden layers.')
flags.DEFINE_bool('use_mc_dropout', False,
'Whether to use Monte Carlo dropout for the hidden layers.')
flags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate.')
# SNGP flags.
flags.DEFINE_bool('use_spec_norm', True,
'Whether to apply spectral normalization.')
flags.DEFINE_bool('use_gp_layer', True,
'Whether to use Gaussian process as the output layer.')
# Spectral normalization flags.
flags.DEFINE_integer(
'spec_norm_iteration', 1,
'Number of power iterations to perform for estimating '
'the spectral norm of weight matrices.')
flags.DEFINE_float('spec_norm_bound', 6.,
'Upper bound to spectral norm of weight matrices.')
# Gaussian process flags.
flags.DEFINE_float('gp_bias', 0., 'The bias term for GP layer.')
flags.DEFINE_float(
'gp_scale', 1.,
'The length-scale parameter for the RBF kernel of the GP layer.')
flags.DEFINE_integer(
'gp_input_dim', -1,
'The dimension to reduce the neural network input for the GP layer '
'(via random Gaussian projection which preserves distance by the '
' Johnson-Lindenstrauss lemma). If -1, no dimension reduction.')
flags.DEFINE_integer(
'gp_hidden_dim', 1024,
'The hidden dimension of the GP layer, which corresponds to the number of '
'random features used for the approximation.')
flags.DEFINE_bool(
'gp_input_normalization', False,
'Whether to normalize the input using LayerNorm for GP layer.'
'This is similar to automatic relevance determination (ARD) in the classic '
'GP learning.')
flags.DEFINE_string(
'gp_random_feature_type', 'orf',
'The type of random feature to use. One of "rff" (random fourier feature), '
'"orf" (orthogonal random feature).')
flags.DEFINE_float('gp_cov_ridge_penalty', 1.,
'Ridge penalty parameter for GP posterior covariance.')
flags.DEFINE_float(
'gp_cov_discount_factor', -1.,
'The discount factor to compute the moving average of precision matrix'
'across epochs. If -1 then compute the exact precision matrix within the '
'latest epoch.')
# OOD flags.
flags.DEFINE_bool('eval_on_ood', False,
'Whether to run OOD evaluation on specified OOD datasets.')
flags.DEFINE_list('ood_dataset', 'cifar100,svhn_cropped',
'list of OOD datasets to evaluate on.')
flags.DEFINE_bool('dempster_shafer_ood', False,
'Wheter to use DempsterShafer Uncertainty score.')
FLAGS = flags.FLAGS
def main(argv):
del argv # unused arg
if not FLAGS.use_gpu:
raise ValueError('Only GPU is currently supported.')
if FLAGS.num_cores > 1:
raise ValueError('Only a single accelerator is currently supported.')
tf.random.set_seed(FLAGS.seed)
tf.io.gfile.makedirs(FLAGS.output_dir)
ds_info = tfds.builder(FLAGS.dataset).info
batch_size = FLAGS.total_batch_size
steps_per_eval = ds_info.splits['test'].num_examples // batch_size
num_classes = ds_info.features['label'].num_classes
data_dir = FLAGS.data_dir
dataset_builder = ub.datasets.get(
FLAGS.dataset,
data_dir=data_dir,
download_data=FLAGS.download_data,
split=tfds.Split.TEST,
drop_remainder=FLAGS.drop_remainder_for_eval)
dataset = dataset_builder.load(batch_size=batch_size)
test_datasets = {'clean': dataset}
if FLAGS.eval_on_ood:
ood_dataset_names = FLAGS.ood_dataset
ood_datasets, steps_per_ood = ood_utils.load_ood_datasets(
ood_dataset_names,
dataset_builder,
1. - FLAGS.train_proportion,
batch_size,
drop_remainder=FLAGS.drop_remainder_for_eval)
test_datasets.update(ood_datasets)
extra_kwargs = {}
if FLAGS.dataset == 'cifar100':
data_dir = FLAGS.cifar100_c_path
corruption_types, _ = utils.load_corrupted_test_info(FLAGS.dataset)
for corruption_type in corruption_types:
for severity in range(1, 6):
dataset = ub.datasets.get(
f'{FLAGS.dataset}_corrupted',
corruption_type=corruption_type,
data_dir=data_dir,
severity=severity,
split=tfds.Split.TEST,
drop_remainder=FLAGS.drop_remainder_for_eval,
**extra_kwargs).load(batch_size=batch_size)
test_datasets[f'{corruption_type}_{severity}'] = dataset
model = ub.models.wide_resnet_sngp(
input_shape=ds_info.features['image'].shape,
batch_size=FLAGS.total_batch_size // FLAGS.num_cores,
depth=28,
width_multiplier=10,
num_classes=num_classes,
l2=0.,
use_mc_dropout=FLAGS.use_mc_dropout,
use_filterwise_dropout=FLAGS.use_filterwise_dropout,
dropout_rate=FLAGS.dropout_rate,
use_gp_layer=FLAGS.use_gp_layer,
gp_input_dim=FLAGS.gp_input_dim,
gp_hidden_dim=FLAGS.gp_hidden_dim,
gp_scale=FLAGS.gp_scale,
gp_bias=FLAGS.gp_bias,
gp_input_normalization=FLAGS.gp_input_normalization,
gp_random_feature_type=FLAGS.gp_random_feature_type,
gp_cov_discount_factor=FLAGS.gp_cov_discount_factor,
gp_cov_ridge_penalty=FLAGS.gp_cov_ridge_penalty,
use_spec_norm=FLAGS.use_spec_norm,
spec_norm_iteration=FLAGS.spec_norm_iteration,
spec_norm_bound=FLAGS.spec_norm_bound)
logging.info('Model input shape: %s', model.input_shape)
logging.info('Model output shape: %s', model.output_shape)
logging.info('Model number of weights: %s', model.count_params())
# Search for checkpoints from their index file; then remove the index suffix.
ensemble_filenames = tf.io.gfile.glob(os.path.join(FLAGS.checkpoint_dir,
'**/*.index'))
# Only apply ensemble on the models with the same model architecture
ensemble_filenames0 = [
filename for filename in ensemble_filenames
if f'use_gp_layer:{FLAGS.use_gp_layer}' in filename and
f'use_spec_norm:{FLAGS.use_spec_norm}' in filename
]
np.random.seed(FLAGS.seed)
ensemble_filenames = np.random.choice(
ensemble_filenames0, FLAGS.ensemble_size, replace=True)
ensemble_filenames = [filename[:-6] for filename in ensemble_filenames]
ensemble_size = len(ensemble_filenames)
logging.info('Ensemble size: %s', ensemble_size)
logging.info('Ensemble filenames: %s', ensemble_filenames)
logging.info('Ensemble number of weights: %s',
ensemble_size * model.count_params())
logging.info('Ensemble filenames: %s', str(ensemble_filenames))
checkpoint = tf.train.Checkpoint(model=model)
# Write model predictions to files.
num_datasets = len(test_datasets)
for m, ensemble_filename in enumerate(ensemble_filenames):
checkpoint.restore(ensemble_filename)
for n, (name, test_dataset) in enumerate(test_datasets.items()):
filename = '{dataset}_{member}.npy'.format(
dataset=name.replace('/', '_'), member=m) # ood dataset name has '/'
filename = os.path.join(FLAGS.output_dir, filename)
if not tf.io.gfile.exists(filename):
logits = []
test_iterator = iter(test_dataset)
steps = steps_per_eval if 'ood/' not in name else steps_per_ood[name]
for _ in range(steps):
features = next(test_iterator)['features'] # pytype: disable=unsupported-operands
logits_member = model(features, training=False)
if isinstance(logits_member, (list, tuple)):
# If model returns a tuple of (logits, covmat), extract both
logits_member, covmat_member = logits_member
logits_member = ed.layers.utils.mean_field_logits(
logits_member, covmat_member,
FLAGS.gp_mean_field_factor_ensemble)
logits.append(logits_member)
logits = tf.concat(logits, axis=0)
with tf.io.gfile.GFile(filename, 'w') as f:
np.save(f, logits.numpy())
percent = (m * num_datasets + (n + 1)) / (ensemble_size * num_datasets)
message = ('{:.1%} completion for prediction: ensemble member {:d}/{:d}. '
'Dataset {:d}/{:d}'.format(percent,
m + 1,
ensemble_size,
n + 1,
num_datasets))
logging.info(message)
metrics = {
'test/negative_log_likelihood': tf.keras.metrics.Mean(),
'test/gibbs_cross_entropy': tf.keras.metrics.Mean(),
'test/accuracy': tf.keras.metrics.SparseCategoricalAccuracy(),
'test/ece': rm.metrics.ExpectedCalibrationError(
num_bins=FLAGS.num_bins),
}
if FLAGS.eval_on_ood:
ood_metrics = ood_utils.create_ood_metrics(ood_dataset_names)
metrics.update(ood_metrics)
corrupt_metrics = {}
for name in test_datasets:
corrupt_metrics['test/nll_{}'.format(name)] = tf.keras.metrics.Mean()
corrupt_metrics['test/accuracy_{}'.format(name)] = (
tf.keras.metrics.SparseCategoricalAccuracy())
corrupt_metrics['test/ece_{}'.format(name)] = (
rm.metrics.ExpectedCalibrationError(num_bins=FLAGS.num_bins))
# Evaluate model predictions.
for n, (name, test_dataset) in enumerate(test_datasets.items()):
logits_dataset = []
for m in range(ensemble_size):
filename = '{dataset}_{member}.npy'.format(
dataset=name.replace('/', '_'), member=m) # ood dataset name has '/'
filename = os.path.join(FLAGS.output_dir, filename)
with tf.io.gfile.GFile(filename, 'rb') as f:
logits_dataset.append(np.load(f))
logits_dataset = tf.convert_to_tensor(logits_dataset)
test_iterator = iter(test_dataset)
steps = steps_per_eval if 'ood/' not in name else steps_per_ood[name]
for step in range(steps):
inputs = next(test_iterator)
labels = inputs['labels'] # pytype: disable=unsupported-operands
logits = logits_dataset[:, (step*batch_size):((step+1)*batch_size)]
labels = tf.cast(labels, tf.int32)
negative_log_likelihood_metric = rm.metrics.EnsembleCrossEntropy()
negative_log_likelihood_metric.add_batch(logits, labels=labels)
negative_log_likelihood = list(
negative_log_likelihood_metric.result().values())[0]
per_probs = tf.nn.softmax(logits)
probs = tf.reduce_mean(per_probs, axis=0)
logits_mean = tf.reduce_mean(logits, axis=0)
if name == 'clean':
gibbs_ce_metric = rm.metrics.GibbsCrossEntropy()
gibbs_ce_metric.add_batch(logits, labels=labels)
gibbs_ce = list(gibbs_ce_metric.result().values())[0]
metrics['test/negative_log_likelihood'].update_state(
negative_log_likelihood)
metrics['test/gibbs_cross_entropy'].update_state(gibbs_ce)
metrics['test/accuracy'].update_state(labels, probs)
metrics['test/ece'].add_batch(probs, label=labels)
elif name.startswith('ood/'):
ood_labels = 1 - inputs['is_in_distribution'] # pytype: disable=unsupported-operands
if FLAGS.dempster_shafer_ood:
ood_scores = ood_utils.DempsterShaferUncertainty(logits_mean)
else:
ood_scores = 1 - tf.reduce_max(probs, axis=-1)
for metric_name, metric in metrics.items():
if name in metric_name:
metric.update_state(ood_labels, ood_scores)
else:
corrupt_metrics['test/nll_{}'.format(name)].update_state(
negative_log_likelihood)
corrupt_metrics['test/accuracy_{}'.format(name)].update_state(
labels, probs)
corrupt_metrics['test/ece_{}'.format(name)].add_batch(
probs, label=labels)
message = ('{:.1%} completion for evaluation: dataset {:d}/{:d}'.format(
(n + 1) / num_datasets, n + 1, num_datasets))
logging.info(message)
corrupt_results = utils.aggregate_corrupt_metrics(corrupt_metrics,
corruption_types)
total_results = {name: metric.result() for name, metric in metrics.items()}
total_results.update(corrupt_results)
# Metrics from Robustness Metrics (like ECE) will return a dict with a
# single key/value, instead of a scalar.
total_results = {
k: (list(v.values())[0] if isinstance(v, dict) else v)
for k, v in total_results.items()
}
logging.info('Metrics: %s', total_results)
if __name__ == '__main__':
app.run(main)
|
google/uncertainty-baselines
|
baselines/cifar/sngp_ensemble.py
|
Python
|
apache-2.0
| 14,238
|
[
"Gaussian"
] |
3938496d5d92b8fa53271c234b2f68137be31b56760259652f9b4ef7cd1f1f00
|
# $HeadURL$
""" Queries BDII for unknown CE.
Queries BDII for CE information and puts it to CS.
"""
__RCSID__ = "$Id$"
from DIRAC import S_OK, S_ERROR, gConfig
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities import List
from DIRAC.Core.Utilities.Grid import ldapSite, ldapCluster, ldapCE, ldapCEState
from DIRAC.FrameworkSystem.Client.NotificationClient import NotificationClient
from DIRAC.ConfigurationSystem.Client.CSAPI import CSAPI
from DIRAC.Core.Security.ProxyInfo import getProxyInfo, formatProxyInfoAsString
from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath
from DIRAC.ConfigurationSystem.Client.Helpers.CSGlobals import getVO
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources
class CE2CSAgent( AgentModule ):
addressTo = ''
addressFrom = ''
voName = ''
subject = "CE2CSAgent"
alternativeBDIIs = []
def initialize( self ):
# TODO: Have no default and if no mail is found then use the diracAdmin group
# and resolve all associated mail addresses.
self.addressTo = self.am_getOption( 'MailTo', self.addressTo )
self.addressFrom = self.am_getOption( 'MailFrom', self.addressFrom )
# Create a list of alternative bdii urls
self.alternativeBDIIs = self.am_getOption( 'AlternativeBDIIs', [] )
# Check if the bdii url is appended by a port number, if not append the default 2170
for index, url in enumerate( self.alternativeBDIIs ):
if not url.split( ':' )[-1].isdigit():
self.alternativeBDIIs[index] += ':2170'
if self.addressTo and self.addressFrom:
self.log.info( "MailTo", self.addressTo )
self.log.info( "MailFrom", self.addressFrom )
if self.alternativeBDIIs :
self.log.info( "AlternativeBDII URLs:", self.alternativeBDIIs )
self.subject = "CE2CSAgent"
# This sets the Default Proxy to used as that defined under
# /Operations/Shifter/TestManager
# the shifterProxy option in the Configuration can be used to change this default.
self.am_setOption( 'shifterProxy', 'TestManager' )
self.voName = self.am_getOption( 'VirtualOrganization', [] )
if not self.voName:
vo = getVO()
if vo:
self.voName = [ vo ]
if self.voName:
self.log.info( "Agent will manage VO(s) %s" % self.voName )
else:
self.log.fatal( "VirtualOrganization option not defined for agent" )
return S_ERROR()
self.csAPI = CSAPI()
return self.csAPI.initialize()
def execute( self ):
self.log.info( "Start Execution" )
result = getProxyInfo()
if not result['OK']:
return result
infoDict = result[ 'Value' ]
self.log.info( formatProxyInfoAsString( infoDict ) )
# Get a "fresh" copy of the CS data
result = self.csAPI.downloadCSData()
if not result['OK']:
self.log.warn( "Could not download a fresh copy of the CS data", result[ 'Message' ] )
self.__lookForCE()
self.__infoFromCE()
self.log.info( "End Execution" )
return S_OK()
def __checkAlternativeBDIISite( self, fun, *args ):
if self.alternativeBDIIs:
self.log.warn( "Trying to use alternative BDII sites" )
for site in self.alternativeBDIIs :
self.log.info( "Trying to contact alternative BDII", site )
if len( args ) == 1 :
result = fun( args[0], host = site )
elif len( args ) == 2 :
result = fun( args[0], vo = args[1], host = site )
if not result['OK'] :
self.log.error ( "Problem contacting alternative BDII", result['Message'] )
elif result['OK'] :
return result
self.log.warn( "Also checking alternative BDII sites failed" )
return result
def __lookForCE( self ):
knownCEs = self.am_getOption( 'BannedCEs', [] )
resources = Resources( self.voName )
result = resources.getEligibleResources( 'Computing', {'CEType':['LCG','CREAM'] } )
if not result['OK']:
return
grids = result['Value']
for grid in grids:
result = gConfig.getSections( '/Resources/Sites/%s' % grid )
if not result['OK']:
return
sites = result['Value']
for site in sites:
opt = gConfig.getOptionsDict( '/Resources/Sites/%s/%s' % ( grid, site ) )['Value']
ces = List.fromChar( opt.get( 'CE', '' ) )
knownCEs += ces
response = ''
for vo in self.voName:
self.log.info( "Check for available CEs for VO", vo )
response = ldapCEState( '', vo )
if not response['OK']:
self.log.error( "Error during BDII request", response['Message'] )
response = self.__checkAlternativeBDIISite( ldapCEState, '', vo )
return response
newCEs = {}
for queue in response['Value']:
try:
queueName = queue['GlueCEUniqueID']
except:
continue
ceName = queueName.split( ":" )[0]
if not ceName in knownCEs:
newCEs[ceName] = None
self.log.debug( "New CE", ceName )
body = ""
possibleNewSites = []
for ce in newCEs.iterkeys():
response = ldapCluster( ce )
if not response['OK']:
self.log.warn( "Error during BDII request", response['Message'] )
response = self.__checkAlternativeBDIISite( ldapCluster, ce )
continue
clusters = response['Value']
if len( clusters ) != 1:
self.log.warn( "Error in cluster length", " CE %s Length %d" % ( ce, len( clusters ) ) )
if len( clusters ) == 0:
continue
cluster = clusters[0]
fkey = cluster.get( 'GlueForeignKey', [] )
if type( fkey ) == type( '' ):
fkey = [fkey]
nameBDII = None
for entry in fkey:
if entry.count( 'GlueSiteUniqueID' ):
nameBDII = entry.split( '=' )[1]
break
if not nameBDII:
continue
ceString = "CE: %s, GOCDB Name: %s" % ( ce, nameBDII )
self.log.info( ceString )
response = ldapCE( ce )
if not response['OK']:
self.log.warn( "Error during BDII request", response['Message'] )
response = self.__checkAlternativeBDIISite( ldapCE, ce )
continue
ceInfos = response['Value']
if len( ceInfos ):
ceInfo = ceInfos[0]
systemName = ceInfo.get( 'GlueHostOperatingSystemName', 'Unknown' )
systemVersion = ceInfo.get( 'GlueHostOperatingSystemVersion', 'Unknown' )
systemRelease = ceInfo.get( 'GlueHostOperatingSystemRelease', 'Unknown' )
else:
systemName = "Unknown"
systemVersion = "Unknown"
systemRelease = "Unknown"
osString = "SystemName: %s, SystemVersion: %s, SystemRelease: %s" % ( systemName, systemVersion, systemRelease )
self.log.info( osString )
response = ldapCEState( ce, vo )
if not response['OK']:
self.log.warn( "Error during BDII request", response['Message'] )
response = self.__checkAlternativeBDIISite( ldapCEState, ce, vo )
continue
newCEString = "\n\n%s\n%s" % ( ceString, osString )
usefull = False
ceStates = response['Value']
for ceState in ceStates:
queueName = ceState.get( 'GlueCEUniqueID', 'UnknownName' )
queueStatus = ceState.get( 'GlueCEStateStatus', 'UnknownStatus' )
queueString = "%s %s" % ( queueName, queueStatus )
self.log.info( queueString )
newCEString += "\n%s" % queueString
if queueStatus.count( 'Production' ):
usefull = True
if usefull:
body += newCEString
possibleNewSites.append( 'dirac-admin-add-site DIRACSiteName %s %s' % ( nameBDII, ce ) )
if body:
body = "We are glad to inform You about new CE(s) possibly suitable for %s:\n" % vo + body
body += "\n\nTo suppress information about CE add its name to BannedCEs list."
for possibleNewSite in possibleNewSites:
body = "%s\n%s" % ( body, possibleNewSite )
self.log.info( body )
if self.addressTo and self.addressFrom:
notification = NotificationClient()
result = notification.sendMail( self.addressTo, self.subject, body, self.addressFrom, localAttempt = False )
return S_OK()
def __infoFromCE( self ):
sitesSection = cfgPath( 'Resources', 'Sites' )
result = gConfig.getSections( sitesSection )
if not result['OK']:
return
grids = result['Value']
changed = False
body = ""
for grid in grids:
gridSection = cfgPath( sitesSection, grid )
result = gConfig.getSections( gridSection )
if not result['OK']:
return
sites = result['Value']
for site in sites:
siteSection = cfgPath( gridSection, site )
opt = gConfig.getOptionsDict( siteSection )['Value']
name = opt.get( 'Name', '' )
if name:
coor = opt.get( 'Coordinates', 'Unknown' )
mail = opt.get( 'Mail', 'Unknown' )
result = ldapSite( name )
if not result['OK']:
self.log.warn( "BDII site %s: %s" % ( name, result['Message'] ) )
result = self.__checkAlternativeBDIISite( ldapSite, name )
if result['OK']:
bdiiSites = result['Value']
if len( bdiiSites ) == 0:
self.log.warn( name, "Error in BDII: leng = 0" )
else:
if not len( bdiiSites ) == 1:
self.log.warn( name, "Warning in BDII: leng = %d" % len( bdiiSites ) )
bdiiSite = bdiiSites[0]
try:
longitude = bdiiSite['GlueSiteLongitude']
latitude = bdiiSite['GlueSiteLatitude']
newcoor = "%s:%s" % ( longitude, latitude )
except:
self.log.warn( "Error in BDII coordinates" )
newcoor = "Unknown"
try:
newmail = bdiiSite['GlueSiteSysAdminContact'].split( ":" )[-1].strip()
except:
self.log.warn( "Error in BDII mail" )
newmail = "Unknown"
self.log.debug( "%s %s %s" % ( name, newcoor, newmail ) )
if newcoor != coor:
self.log.info( "%s" % ( name ), "%s -> %s" % ( coor, newcoor ) )
if coor == 'Unknown':
self.csAPI.setOption( cfgPath( siteSection, 'Coordinates' ), newcoor )
else:
self.csAPI.modifyValue( cfgPath( siteSection, 'Coordinates' ), newcoor )
changed = True
if newmail != mail:
self.log.info( "%s" % ( name ), "%s -> %s" % ( mail, newmail ) )
if mail == 'Unknown':
self.csAPI.setOption( cfgPath( siteSection, 'Mail' ), newmail )
else:
self.csAPI.modifyValue( cfgPath( siteSection, 'Mail' ), newmail )
changed = True
ceList = List.fromChar( opt.get( 'CE', '' ) )
if not ceList:
self.log.warn( site, 'Empty site list' )
continue
# result = gConfig.getSections( cfgPath( siteSection,'CEs' )
# if not result['OK']:
# self.log.debug( "Section CEs:", result['Message'] )
for ce in ceList:
ceSection = cfgPath( siteSection, 'CEs', ce )
result = gConfig.getOptionsDict( ceSection )
if not result['OK']:
self.log.debug( "Section CE", result['Message'] )
wnTmpDir = 'Unknown'
arch = 'Unknown'
os = 'Unknown'
si00 = 'Unknown'
pilot = 'Unknown'
ceType = 'Unknown'
else:
ceopt = result['Value']
wnTmpDir = ceopt.get( 'wnTmpDir', 'Unknown' )
arch = ceopt.get( 'architecture', 'Unknown' )
os = ceopt.get( 'OS', 'Unknown' )
si00 = ceopt.get( 'SI00', 'Unknown' )
pilot = ceopt.get( 'Pilot', 'Unknown' )
ceType = ceopt.get( 'CEType', 'Unknown' )
result = ldapCE( ce )
if not result['OK']:
self.log.warn( 'Error in BDII for %s' % ce, result['Message'] )
result = self.__checkAlternativeBDIISite( ldapCE, ce )
continue
try:
bdiiCE = result['Value'][0]
except:
self.log.warn( 'Error in BDII for %s' % ce, result )
bdiiCE = None
if bdiiCE:
try:
newWNTmpDir = bdiiCE['GlueSubClusterWNTmpDir']
except:
newWNTmpDir = 'Unknown'
if wnTmpDir != newWNTmpDir and newWNTmpDir != 'Unknown':
section = cfgPath( ceSection, 'wnTmpDir' )
self.log.info( section, " -> ".join( ( wnTmpDir, newWNTmpDir ) ) )
if wnTmpDir == 'Unknown':
self.csAPI.setOption( section, newWNTmpDir )
else:
self.csAPI.modifyValue( section, newWNTmpDir )
changed = True
try:
newArch = bdiiCE['GlueHostArchitecturePlatformType']
except:
newArch = 'Unknown'
if arch != newArch and newArch != 'Unknown':
section = cfgPath( ceSection, 'architecture' )
self.log.info( section, " -> ".join( ( arch, newArch ) ) )
if arch == 'Unknown':
self.csAPI.setOption( section, newArch )
else:
self.csAPI.modifyValue( section, newArch )
changed = True
try:
newOS = '_'.join( ( bdiiCE['GlueHostOperatingSystemName'],
bdiiCE['GlueHostOperatingSystemVersion'],
bdiiCE['GlueHostOperatingSystemRelease'] ) )
except:
newOS = 'Unknown'
if os != newOS and newOS != 'Unknown':
section = cfgPath( ceSection, 'OS' )
self.log.info( section, " -> ".join( ( os, newOS ) ) )
if os == 'Unknown':
self.csAPI.setOption( section, newOS )
else:
self.csAPI.modifyValue( section, newOS )
changed = True
body = body + "OS was changed %s -> %s for %s at %s\n" % ( os, newOS, ce, site )
try:
newSI00 = bdiiCE['GlueHostBenchmarkSI00']
except:
newSI00 = 'Unknown'
if si00 != newSI00 and newSI00 != 'Unknown':
section = cfgPath( ceSection, 'SI00' )
self.log.info( section, " -> ".join( ( si00, newSI00 ) ) )
if si00 == 'Unknown':
self.csAPI.setOption( section, newSI00 )
else:
self.csAPI.modifyValue( section, newSI00 )
changed = True
try:
rte = bdiiCE['GlueHostApplicationSoftwareRunTimeEnvironment']
for vo in self.voName:
if vo.lower() == 'lhcb':
if 'VO-lhcb-pilot' in rte:
newPilot = 'True'
else:
newPilot = 'False'
else:
newPilot = 'Unknown'
except:
newPilot = 'Unknown'
if pilot != newPilot and newPilot != 'Unknown':
section = cfgPath( ceSection, 'Pilot' )
self.log.info( section, " -> ".join( ( pilot, newPilot ) ) )
if pilot == 'Unknown':
self.csAPI.setOption( section, newPilot )
else:
self.csAPI.modifyValue( section, newPilot )
changed = True
newVO = ''
for vo in self.voName:
result = ldapCEState( ce, vo ) #getBDIICEVOView
if not result['OK']:
self.log.warn( 'Error in BDII for queue %s' % ce, result['Message'] )
result = self.__checkAlternativeBDIISite( ldapCEState, ce, vo )
continue
try:
queues = result['Value']
except:
self.log.warn( 'Error in BDII for queue %s' % ce, result['Massage'] )
continue
newCEType = 'Unknown'
for queue in queues:
try:
queueType = queue['GlueCEImplementationName']
except:
queueType = 'Unknown'
if newCEType == 'Unknown':
newCEType = queueType
else:
if queueType != newCEType:
self.log.warn( 'Error in BDII for CE %s ' % ce, 'different CE types %s %s' % ( newCEType, queueType ) )
if newCEType=='ARC-CE':
newCEType = 'ARC'
if ceType != newCEType and newCEType != 'Unknown':
section = cfgPath( ceSection, 'CEType' )
self.log.info( section, " -> ".join( ( ceType, newCEType ) ) )
if ceType == 'Unknown':
self.csAPI.setOption( section, newCEType )
else:
self.csAPI.modifyValue( section, newCEType )
changed = True
for queue in queues:
try:
queueName = queue['GlueCEUniqueID'].split( '/' )[-1]
except:
self.log.warn( 'Error in queueName ', queue )
continue
try:
newMaxCPUTime = queue['GlueCEPolicyMaxCPUTime']
except:
newMaxCPUTime = None
newSI00 = None
try:
caps = queue['GlueCECapability']
if type( caps ) == type( '' ):
caps = [caps]
for cap in caps:
if cap.count( 'CPUScalingReferenceSI00' ):
newSI00 = cap.split( '=' )[-1]
except:
newSI00 = None
queueSection = cfgPath( ceSection, 'Queues', queueName )
result = gConfig.getOptionsDict( queueSection )
if not result['OK']:
self.log.warn( "Section Queues", result['Message'] )
maxCPUTime = 'Unknown'
si00 = 'Unknown'
allowedVOs = ['']
else:
queueOpt = result['Value']
maxCPUTime = queueOpt.get( 'maxCPUTime', 'Unknown' )
si00 = queueOpt.get( 'SI00', 'Unknown' )
if newVO == '': # Remember previous iteration, if none - read from conf
allowedVOs = queueOpt.get( 'VO', '' ).split( "," )
else: # Else use newVO, as it can contain changes, which aren't in conf yet
allowedVOs = newVO.split( "," )
if newMaxCPUTime and ( maxCPUTime != newMaxCPUTime ):
section = cfgPath( queueSection, 'maxCPUTime' )
self.log.info( section, " -> ".join( ( maxCPUTime, newMaxCPUTime ) ) )
if maxCPUTime == 'Unknown':
self.csAPI.setOption( section, newMaxCPUTime )
else:
self.csAPI.modifyValue( section, newMaxCPUTime )
changed = True
if newSI00 and ( si00 != newSI00 ):
section = cfgPath( queueSection, 'SI00' )
self.log.info( section, " -> ".join( ( si00, newSI00 ) ) )
if si00 == 'Unknown':
self.csAPI.setOption( section, newSI00 )
else:
self.csAPI.modifyValue( section, newSI00 )
changed = True
modifyVO = True # Flag saying if we need VO option to change
newVO = ''
if allowedVOs != ['']:
for allowedVO in allowedVOs:
allowedVO = allowedVO.strip() # Get rid of spaces
newVO += allowedVO
if allowedVO == vo: # Current VO has been already in list
newVO = ''
modifyVO = False # Don't change anything
break # Skip next 'if', proceed to next VO
newVO += ', '
if modifyVO:
section = cfgPath( queueSection, 'VO' )
newVO += vo
self.log.info( section, " -> ".join( ( '%s' % allowedVOs, newVO ) ) )
if allowedVOs == ['']:
self.csAPI.setOption( section, newVO )
else:
self.csAPI.modifyValue( section, newVO )
changed = True
if changed:
self.log.info( body )
if body and self.addressTo and self.addressFrom:
notification = NotificationClient()
result = notification.sendMail( self.addressTo, self.subject, body, self.addressFrom, localAttempt = False )
return self.csAPI.commit()
else:
self.log.info( "No changes found" )
return S_OK()
|
sposs/DIRAC
|
ConfigurationSystem/Agent/CE2CSAgent.py
|
Python
|
gpl-3.0
| 21,176
|
[
"DIRAC"
] |
7a70372462b087e24cda662162a4c50b5df8777bd83d61b980a77bf0a255f25e
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# freeseer - vga/presentation capture software
#
# Copyright (C) 2011, 2013 Free and Open Source Software Learning Centre
# http://fosslc.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# For support, questions, suggestions or any other inquiries, visit:
# http://wiki.github.com/Freeseer/freeseer/
import unittest
from freeseer.framework.config.persist import JSONConfigStorage
from freeseer.tests.framework.config.persist import ConfigStorageTest
initial_config = '''\
{
"this_section": {
"option1": "othello",
"option2": "0"
}
}\
'''
after_config = '''\
{
"this_section": {
"option1": "something_new",
"option2": "10"
}
}\
'''
class TestJSONConfigStorage(ConfigStorageTest, unittest.TestCase):
"""Tests that JSONConfigStorage works with a generic Config subclass."""
CONFIG_STORAGE_CLASS = JSONConfigStorage
INITIAL_LOAD_CONFIG = initial_config
AFTER_STORE_CONFIG = after_config
|
Freeseer/freeseer
|
src/freeseer/tests/framework/config/persist/test_jsonstorage.py
|
Python
|
gpl-3.0
| 1,605
|
[
"VisIt"
] |
cd9e2d388489a9d3617b797f64a94be2e051d6ca972286a2136731399d4f6545
|
#
#@BEGIN LICENSE
#
# PSI4: an ab initio quantum chemistry software package
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#@END LICENSE
#
"""
| Database (Truhlar) of several classes of noncovalent interactions.
| Geometries from Truhlar and coworkers at site http://comp.chem.umn.edu/database_noncov/noncovalent.htm
| Reference energies from Truhlar and coworkers at site http://comp.chem.umn.edu/database_noncov/noncovalent.htm
| First comprehensive citation JPCA 109 5656 (2005).
- **cp** ``'off'`` || ``'on'``
- **rlxd** ``'off'`` || ``'on'``
- **benchmark**
- ``'<benchmark_name>'`` <Reference>.
- |dl| ``'<default_benchmark_name>'`` |dr| <Reference>.
- **subset**
- ``'small'`` 3: HF-HF, He-Ne, HCCH-HCCH
- ``'large'`` 1: BzBz_PD
- ``'HB6'`` hydrogen-bonded
- ``'CT7'`` charge-transfer
- ``'DI6'`` dipole-interacting
- ``'WI7'`` weakly interacting
- ``'PPS5'`` pi-pi stacking
"""
import qcdb
# <<< NCB31 Database Module >>>
dbse = 'NCB31'
# <<< Database Members >>>
HRXN_SM = ['HB6-2', 'WI7-1', 'PPS5-1']
HRXN_LG = ['PPS5-5']
HB6 = ['HB6-1', 'HB6-2', 'HB6-3', 'HB6-4', 'HB6-5', 'HB6-6']
CT7 = ['CT7-1', 'CT7-2', 'CT7-3', 'CT7-4', 'CT7-5', 'CT7-6', 'CT7-7']
DI6 = ['DI6-1', 'DI6-2', 'DI6-3', 'DI6-4', 'DI6-5', 'DI6-6']
WI7 = ['WI7-1', 'WI7-2', 'WI7-3', 'WI7-4', 'WI7-5', 'WI7-6', 'WI7-7']
PPS5 = ['PPS5-1', 'PPS5-2', 'PPS5-3', 'PPS5-4', 'PPS5-5']
HRXN = sum([HB6, CT7, DI6, WI7, PPS5], [])
# <<< Chemical Systems Involved >>>
RXNM = {} # reaction matrix of reagent contributions per reaction
RXNM_CPRLX = {} # reaction matrix of reagent contributions per reaction for counterpoise- and deformation-corrected
ACTV = {} # order of active reagents per reaction
ACTV_CP = {} # order of active reagents per counterpoise-corrected reaction
ACTV_SA = {} # order of active reagents for non-supramolecular calculations
ACTV_RLX = {} # order of active reagents for deformation-corrected reaction
ACTV_CPRLX = {} # order of active reagents for counterpoise- and deformation-corrected reaction
hold = {}
hold['CT7-1'] = ['C2H4', 'F2']
hold['CT7-2'] = ['NH3', 'F2']
hold['CT7-3'] = ['HCCH', 'ClF']
hold['CT7-4'] = ['HCN', 'ClF']
hold['CT7-5'] = ['NH3', 'Cl2']
hold['CT7-6'] = ['H2O', 'ClF']
hold['CT7-7'] = ['NH3', 'ClF']
hold['DI6-1'] = ['H2S', 'H2S']
hold['DI6-2'] = ['HCl', 'HCl']
hold['DI6-3'] = ['HCl', 'H2S']
hold['DI6-4'] = ['CH3Cl', 'HCl']
hold['DI6-5'] = ['HCN', 'CH3SH']
hold['DI6-6'] = ['CH3SH', 'HCl']
hold['HB6-1'] = ['NH3', 'NH3']
hold['HB6-2'] = ['HF', 'HF']
hold['HB6-3'] = ['H2O', 'H2O']
hold['HB6-4'] = ['NH3', 'H2O']
hold['HB6-5'] = ['HCONH2', 'HCONH2']
hold['HB6-6'] = ['HCOOH', 'HCOOH']
hold['PPS5-1'] = ['HCCH', 'HCCH']
hold['PPS5-2'] = ['C2H4', 'C2H4']
hold['PPS5-3'] = ['Bz', 'Bz']
hold['PPS5-4'] = ['Bz', 'Bz']
hold['PPS5-5'] = ['Bz', 'Bz']
hold['WI7-1'] = ['He', 'Ne']
hold['WI7-2'] = ['He', 'Ar']
hold['WI7-3'] = ['Ne', 'Ne']
hold['WI7-4'] = ['Ne', 'Ar']
hold['WI7-5'] = ['CH4', 'Ne']
hold['WI7-6'] = ['Bz', 'Ne']
hold['WI7-7'] = ['CH4', 'CH4']
for rxn in HRXN:
RXNM[ '%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : -1,
'%s-%s-monoB-unCP' % (dbse, rxn) : -1,
'%s-%s-mono-RLX' % (dbse, hold[rxn][0]) : -1,
'%s-%s-mono-RLX' % (dbse, hold[rxn][1]) : -1 }
RXNM_CPRLX['%s-%s' % (dbse, rxn)] = {'%s-%s-dimer' % (dbse, rxn) : +1,
'%s-%s-monoA-CP' % (dbse, rxn) : -1,
'%s-%s-monoB-CP' % (dbse, rxn) : -1,
'%s-%s-monoA-unCP' % (dbse, rxn) : +1,
'%s-%s-monoB-unCP' % (dbse, rxn) : +1,
'%s-%s-mono-RLX' % (dbse, hold[rxn][0]) : -1,
'%s-%s-mono-RLX' % (dbse, hold[rxn][1]) : -1 }
ACTV_SA[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn) ]
ACTV[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn) ]
ACTV_CP[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn) ]
ACTV_RLX[ '%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-mono-RLX' % (dbse, hold[rxn][0]),
'%s-%s-mono-RLX' % (dbse, hold[rxn][1]) ]
ACTV_CPRLX['%s-%s' % (dbse, rxn)] = ['%s-%s-dimer' % (dbse, rxn),
'%s-%s-monoA-CP' % (dbse, rxn),
'%s-%s-monoB-CP' % (dbse, rxn),
'%s-%s-monoA-unCP' % (dbse, rxn),
'%s-%s-monoB-unCP' % (dbse, rxn),
'%s-%s-mono-RLX' % (dbse, hold[rxn][0]),
'%s-%s-mono-RLX' % (dbse, hold[rxn][1]) ]
# <<< Reference Values [kcal/mol] >>>
BIND = {}
nan = float('NaN')
BIND['%s-%s' % (dbse, 'CT7-1' )] = -1.06
BIND['%s-%s' % (dbse, 'CT7-2' )] = -1.81
BIND['%s-%s' % (dbse, 'CT7-3' )] = -3.81
BIND['%s-%s' % (dbse, 'CT7-4' )] = -4.86
BIND['%s-%s' % (dbse, 'CT7-5' )] = -4.88
BIND['%s-%s' % (dbse, 'CT7-6' )] = -5.36
BIND['%s-%s' % (dbse, 'CT7-7' )] = -10.62
BIND['%s-%s' % (dbse, 'DI6-1' )] = -1.66
BIND['%s-%s' % (dbse, 'DI6-2' )] = -2.01
BIND['%s-%s' % (dbse, 'DI6-3' )] = -3.35
BIND['%s-%s' % (dbse, 'DI6-4' )] = -3.55
BIND['%s-%s' % (dbse, 'DI6-5' )] = -3.59
BIND['%s-%s' % (dbse, 'DI6-6' )] = -4.16
BIND['%s-%s' % (dbse, 'HB6-1' )] = -3.15
BIND['%s-%s' % (dbse, 'HB6-2' )] = -4.57
BIND['%s-%s' % (dbse, 'HB6-3' )] = -4.97
BIND['%s-%s' % (dbse, 'HB6-4' )] = -6.41
BIND['%s-%s' % (dbse, 'HB6-5' )] = -14.94
BIND['%s-%s' % (dbse, 'HB6-6' )] = -16.15
BIND['%s-%s' % (dbse, 'PPS5-1' )] = -1.34
BIND['%s-%s' % (dbse, 'PPS5-2' )] = -1.42
BIND['%s-%s' % (dbse, 'PPS5-3' )] = -1.81
BIND['%s-%s' % (dbse, 'PPS5-4' )] = -2.74
BIND['%s-%s' % (dbse, 'PPS5-5' )] = -2.78
BIND['%s-%s' % (dbse, 'WI7-1' )] = -0.04
BIND['%s-%s' % (dbse, 'WI7-2' )] = -0.06
BIND['%s-%s' % (dbse, 'WI7-3' )] = -0.08
BIND['%s-%s' % (dbse, 'WI7-4' )] = -0.13
BIND['%s-%s' % (dbse, 'WI7-5' )] = -0.22
BIND['%s-%s' % (dbse, 'WI7-6' )] = -0.47
BIND['%s-%s' % (dbse, 'WI7-7' )] = -0.51
# <<< Comment Lines >>>
TAGL = {}
TAGL['%s-%s' % (dbse, 'CT7-1' )] = """Ethene-Fluorine Molecule Complex (C2H4-F2) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-1' )] = """Dimer from Ethene-Fluorine Molecule Complex (C2H4-F2) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-1' )] = """Monomer A from Ethene-Fluorine Molecule Complex (C2H4-F2) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-1' )] = """Monomer B from Ethene-Fluorine Molecule Complex (C2H4-F2) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-1' )] = """Monomer A from Ethene-Fluorine Molecule Complex (C2H4-F2) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-1' )] = """Monomer B from Ethene-Fluorine Molecule Complex (C2H4-F2) """
TAGL['%s-%s' % (dbse, 'CT7-2' )] = """Ammonia-Fluorine Molecule Complex (NH3-F2) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-2' )] = """Dimer from Ammonia-Fluorine Molecule Complex (NH3-F2) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-2' )] = """Monomer A from Ammonia-Fluorine Molecule Complex (NH3-F2) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-2' )] = """Monomer B from Ammonia-Fluorine Molecule Complex (NH3-F2) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-2' )] = """Monomer A from Ammonia-Fluorine Molecule Complex (NH3-F2) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-2' )] = """Monomer B from Ammonia-Fluorine Molecule Complex (NH3-F2) """
TAGL['%s-%s' % (dbse, 'CT7-3' )] = """Ethine-Chlorine Monofluoride Complex (HCCH-ClF) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-3' )] = """Dimer from Ethine-Chlorine Monofluoride Complex (HCCH-ClF) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-3' )] = """Monomer A from Ethine-Chlorine Monofluoride Complex (HCCH-ClF) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-3' )] = """Monomer B from Ethine-Chlorine Monofluoride Complex (HCCH-ClF) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-3' )] = """Monomer A from Ethine-Chlorine Monofluoride Complex (HCCH-ClF) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-3' )] = """Monomer B from Ethine-Chlorine Monofluoride Complex (HCCH-ClF) """
TAGL['%s-%s' % (dbse, 'CT7-4' )] = """Hydrogen Cyanide-Chlorine Monofluoride Complex (HCN-ClF) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-4' )] = """Dimer from Hydrogen Cyanide-Chlorine Monofluoride Complex (HCN-ClF) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-4' )] = """Monomer A from Hydrogen Cyanide-Chlorine Monofluoride Complex (HCN-ClF) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-4' )] = """Monomer B from Hydrogen Cyanide-Chlorine Monofluoride Complex (HCN-ClF) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-4' )] = """Monomer A from Hydrogen Cyanide-Chlorine Monofluoride Complex (HCN-ClF) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-4' )] = """Monomer B from Hydrogen Cyanide-Chlorine Monofluoride Complex (HCN-ClF) """
TAGL['%s-%s' % (dbse, 'CT7-5' )] = """Ammonia-Chlorine Molecule (NH3-Cl2) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-5' )] = """Dimer from Ammonia-Chlorine Molecule (NH3-Cl2) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-5' )] = """Monomer A from Ammonia-Chlorine Molecule (NH3-Cl2) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-5' )] = """Monomer B from Ammonia-Chlorine Molecule (NH3-Cl2) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-5' )] = """Monomer A from Ammonia-Chlorine Molecule (NH3-Cl2) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-5' )] = """Monomer B from Ammonia-Chlorine Molecule (NH3-Cl2) """
TAGL['%s-%s' % (dbse, 'CT7-6' )] = """Water-Chlorine Monofluoride Complex (H2O-ClF) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-6' )] = """Dimer from Water-Chlorine Monofluoride Complex (H2O-ClF) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-6' )] = """Monomer A from Water-Chlorine Monofluoride Complex (H2O-ClF) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-6' )] = """Monomer B from Water-Chlorine Monofluoride Complex (H2O-ClF) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-6' )] = """Monomer A from Water-Chlorine Monofluoride Complex (H2O-ClF) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-6' )] = """Monomer B from Water-Chlorine Monofluoride Complex (H2O-ClF) """
TAGL['%s-%s' % (dbse, 'CT7-7' )] = """Ammonia-Chlorine Monofluoride Complex (NH3-ClF) """
TAGL['%s-%s-dimer' % (dbse, 'CT7-7' )] = """Dimer from Ammonia-Chlorine Monofluoride Complex (NH3-ClF) """
TAGL['%s-%s-monoA-CP' % (dbse, 'CT7-7' )] = """Monomer A from Ammonia-Chlorine Monofluoride Complex (NH3-ClF) """
TAGL['%s-%s-monoB-CP' % (dbse, 'CT7-7' )] = """Monomer B from Ammonia-Chlorine Monofluoride Complex (NH3-ClF) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'CT7-7' )] = """Monomer A from Ammonia-Chlorine Monofluoride Complex (NH3-ClF) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'CT7-7' )] = """Monomer B from Ammonia-Chlorine Monofluoride Complex (NH3-ClF) """
TAGL['%s-%s' % (dbse, 'DI6-1' )] = """Hydrogen Sulfide Dimer (H2S-H2S) """
TAGL['%s-%s-dimer' % (dbse, 'DI6-1' )] = """Dimer from Hydrogen Sulfide Dimer (H2S-H2S) """
TAGL['%s-%s-monoA-CP' % (dbse, 'DI6-1' )] = """Monomer A from Hydrogen Sulfide Dimer (H2S-H2S) """
TAGL['%s-%s-monoB-CP' % (dbse, 'DI6-1' )] = """Monomer B from Hydrogen Sulfide Dimer (H2S-H2S) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'DI6-1' )] = """Monomer A from Hydrogen Sulfide Dimer (H2S-H2S) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'DI6-1' )] = """Monomer B from Hydrogen Sulfide Dimer (H2S-H2S) """
TAGL['%s-%s' % (dbse, 'DI6-2' )] = """Hydrogen Chloride Dimer (HCl-HCl) """
TAGL['%s-%s-dimer' % (dbse, 'DI6-2' )] = """Dimer from Hydrogen Chloride Dimer (HCl-HCl) """
TAGL['%s-%s-monoA-CP' % (dbse, 'DI6-2' )] = """Monomer A from Hydrogen Chloride Dimer (HCl-HCl) """
TAGL['%s-%s-monoB-CP' % (dbse, 'DI6-2' )] = """Monomer B from Hydrogen Chloride Dimer (HCl-HCl) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'DI6-2' )] = """Monomer A from Hydrogen Chloride Dimer (HCl-HCl) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'DI6-2' )] = """Monomer B from Hydrogen Chloride Dimer (HCl-HCl) """
TAGL['%s-%s' % (dbse, 'DI6-3' )] = """Hydrogen Chloride-Hydrogen Sulfide Complex (HCl-H2S) """
TAGL['%s-%s-dimer' % (dbse, 'DI6-3' )] = """Dimer from Hydrogen Chloride-Hydrogen Sulfide Complex (HCl-H2S) """
TAGL['%s-%s-monoA-CP' % (dbse, 'DI6-3' )] = """Monomer A from Hydrogen Chloride-Hydrogen Sulfide Complex (HCl-H2S) """
TAGL['%s-%s-monoB-CP' % (dbse, 'DI6-3' )] = """Monomer B from Hydrogen Chloride-Hydrogen Sulfide Complex (HCl-H2S) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'DI6-3' )] = """Monomer A from Hydrogen Chloride-Hydrogen Sulfide Complex (HCl-H2S) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'DI6-3' )] = """Monomer B from Hydrogen Chloride-Hydrogen Sulfide Complex (HCl-H2S) """
TAGL['%s-%s' % (dbse, 'DI6-4' )] = """Methyl Chloride-Hydrogen Chloride (CH3Cl-HCl) """
TAGL['%s-%s-dimer' % (dbse, 'DI6-4' )] = """Dimer from Methyl Chloride-Hydrogen Chloride (CH3Cl-HCl) """
TAGL['%s-%s-monoA-CP' % (dbse, 'DI6-4' )] = """Monomer A from Methyl Chloride-Hydrogen Chloride (CH3Cl-HCl) """
TAGL['%s-%s-monoB-CP' % (dbse, 'DI6-4' )] = """Monomer B from Methyl Chloride-Hydrogen Chloride (CH3Cl-HCl) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'DI6-4' )] = """Monomer A from Methyl Chloride-Hydrogen Chloride (CH3Cl-HCl) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'DI6-4' )] = """Monomer B from Methyl Chloride-Hydrogen Chloride (CH3Cl-HCl) """
TAGL['%s-%s' % (dbse, 'DI6-5' )] = """Hydrogen Cyanide-Methanethiol (HCN-CH3SH) """
TAGL['%s-%s-dimer' % (dbse, 'DI6-5' )] = """Dimer from Hydrogen Cyanide-Methanethiol (HCN-CH3SH) """
TAGL['%s-%s-monoA-CP' % (dbse, 'DI6-5' )] = """Monomer A from Hydrogen Cyanide-Methanethiol (HCN-CH3SH) """
TAGL['%s-%s-monoB-CP' % (dbse, 'DI6-5' )] = """Monomer B from Hydrogen Cyanide-Methanethiol (HCN-CH3SH) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'DI6-5' )] = """Monomer A from Hydrogen Cyanide-Methanethiol (HCN-CH3SH) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'DI6-5' )] = """Monomer B from Hydrogen Cyanide-Methanethiol (HCN-CH3SH) """
TAGL['%s-%s' % (dbse, 'DI6-6' )] = """Methanethiol-Hydrogen Chloride Complex (CH3SH-HCl) """
TAGL['%s-%s-dimer' % (dbse, 'DI6-6' )] = """Dimer from Methanethiol-Hydrogen Chloride Complex (CH3SH-HCl) """
TAGL['%s-%s-monoA-CP' % (dbse, 'DI6-6' )] = """Monomer A from Methanethiol-Hydrogen Chloride Complex (CH3SH-HCl) """
TAGL['%s-%s-monoB-CP' % (dbse, 'DI6-6' )] = """Monomer B from Methanethiol-Hydrogen Chloride Complex (CH3SH-HCl) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'DI6-6' )] = """Monomer A from Methanethiol-Hydrogen Chloride Complex (CH3SH-HCl) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'DI6-6' )] = """Monomer B from Methanethiol-Hydrogen Chloride Complex (CH3SH-HCl) """
TAGL['%s-%s' % (dbse, 'HB6-1' )] = """Ammonia Dimer (NH3-NH3) """
TAGL['%s-%s-dimer' % (dbse, 'HB6-1' )] = """Dimer from Ammonia Dimer (NH3-NH3) """
TAGL['%s-%s-monoA-CP' % (dbse, 'HB6-1' )] = """Monomer A from Ammonia Dimer (NH3-NH3) """
TAGL['%s-%s-monoB-CP' % (dbse, 'HB6-1' )] = """Monomer B from Ammonia Dimer (NH3-NH3) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'HB6-1' )] = """Monomer A from Ammonia Dimer (NH3-NH3) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'HB6-1' )] = """Monomer B from Ammonia Dimer (NH3-NH3) """
TAGL['%s-%s' % (dbse, 'HB6-2' )] = """Hydrogen Fluoride Dimer (HF-HF) """
TAGL['%s-%s-dimer' % (dbse, 'HB6-2' )] = """Dimer from Hydrogen Fluoride Dimer (HF-HF) """
TAGL['%s-%s-monoA-CP' % (dbse, 'HB6-2' )] = """Monomer A from Hydrogen Fluoride Dimer (HF-HF) """
TAGL['%s-%s-monoB-CP' % (dbse, 'HB6-2' )] = """Monomer B from Hydrogen Fluoride Dimer (HF-HF) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'HB6-2' )] = """Monomer A from Hydrogen Fluoride Dimer (HF-HF) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'HB6-2' )] = """Monomer B from Hydrogen Fluoride Dimer (HF-HF) """
TAGL['%s-%s' % (dbse, 'HB6-3' )] = """Water Dimer (H2O-H2O) """
TAGL['%s-%s-dimer' % (dbse, 'HB6-3' )] = """Dimer from Water Dimer (H2O-H2O) """
TAGL['%s-%s-monoA-CP' % (dbse, 'HB6-3' )] = """Monomer A from Water Dimer (H2O-H2O) """
TAGL['%s-%s-monoB-CP' % (dbse, 'HB6-3' )] = """Monomer B from Water Dimer (H2O-H2O) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'HB6-3' )] = """Monomer A from Water Dimer (H2O-H2O) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'HB6-3' )] = """Monomer B from Water Dimer (H2O-H2O) """
TAGL['%s-%s' % (dbse, 'HB6-4' )] = """Ammonia-Water Complex (NH3-H2O) """
TAGL['%s-%s-dimer' % (dbse, 'HB6-4' )] = """Dimer from Ammonia-Water Complex (NH3-H2O) """
TAGL['%s-%s-monoA-CP' % (dbse, 'HB6-4' )] = """Monomer A from Ammonia-Water Complex (NH3-H2O) """
TAGL['%s-%s-monoB-CP' % (dbse, 'HB6-4' )] = """Monomer B from Ammonia-Water Complex (NH3-H2O) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'HB6-4' )] = """Monomer A from Ammonia-Water Complex (NH3-H2O) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'HB6-4' )] = """Monomer B from Ammonia-Water Complex (NH3-H2O) """
TAGL['%s-%s' % (dbse, 'HB6-5' )] = """Formamide Dimer (HCONH2-HCONH2) """
TAGL['%s-%s-dimer' % (dbse, 'HB6-5' )] = """Dimer from Formamide Dimer (HCONH2-HCONH2) """
TAGL['%s-%s-monoA-CP' % (dbse, 'HB6-5' )] = """Monomer A from Formamide Dimer (HCONH2-HCONH2) """
TAGL['%s-%s-monoB-CP' % (dbse, 'HB6-5' )] = """Monomer B from Formamide Dimer (HCONH2-HCONH2) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'HB6-5' )] = """Monomer A from Formamide Dimer (HCONH2-HCONH2) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'HB6-5' )] = """Monomer B from Formamide Dimer (HCONH2-HCONH2) """
TAGL['%s-%s' % (dbse, 'HB6-6' )] = """Formic Acid Dimer (HCOOH-HCOOH) """
TAGL['%s-%s-dimer' % (dbse, 'HB6-6' )] = """Dimer from Formic Acid Dimer (HCOOH-HCOOH) """
TAGL['%s-%s-monoA-CP' % (dbse, 'HB6-6' )] = """Monomer A from Formic Acid Dimer (HCOOH-HCOOH) """
TAGL['%s-%s-monoB-CP' % (dbse, 'HB6-6' )] = """Monomer B from Formic Acid Dimer (HCOOH-HCOOH) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'HB6-6' )] = """Monomer A from Formic Acid Dimer (HCOOH-HCOOH) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'HB6-6' )] = """Monomer B from Formic Acid Dimer (HCOOH-HCOOH) """
TAGL['%s-%s' % (dbse, 'PPS5-1' )] = """Ethine Dimer (HCCH-HCCH) """
TAGL['%s-%s-dimer' % (dbse, 'PPS5-1' )] = """Dimer from Ethine Dimer (HCCH-HCCH) """
TAGL['%s-%s-monoA-CP' % (dbse, 'PPS5-1' )] = """Monomer A from Ethine Dimer (HCCH-HCCH) """
TAGL['%s-%s-monoB-CP' % (dbse, 'PPS5-1' )] = """Monomer B from Ethine Dimer (HCCH-HCCH) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'PPS5-1' )] = """Monomer A from Ethine Dimer (HCCH-HCCH) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'PPS5-1' )] = """Monomer B from Ethine Dimer (HCCH-HCCH) """
TAGL['%s-%s' % (dbse, 'PPS5-2' )] = """Ethene Dimer (C2H4-C2H4) """
TAGL['%s-%s-dimer' % (dbse, 'PPS5-2' )] = """Dimer from Ethene Dimer (C2H4-C2H4) """
TAGL['%s-%s-monoA-CP' % (dbse, 'PPS5-2' )] = """Monomer A from Ethene Dimer (C2H4-C2H4) """
TAGL['%s-%s-monoB-CP' % (dbse, 'PPS5-2' )] = """Monomer B from Ethene Dimer (C2H4-C2H4) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'PPS5-2' )] = """Monomer A from Ethene Dimer (C2H4-C2H4) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'PPS5-2' )] = """Monomer B from Ethene Dimer (C2H4-C2H4) """
TAGL['%s-%s' % (dbse, 'PPS5-3' )] = """Sandwich Benzene Dimer (BzBz_S) """
TAGL['%s-%s-dimer' % (dbse, 'PPS5-3' )] = """Dimer from Sandwich Benzene Dimer (BzBz_S) """
TAGL['%s-%s-monoA-CP' % (dbse, 'PPS5-3' )] = """Monomer A from Sandwich Benzene Dimer (BzBz_S) """
TAGL['%s-%s-monoB-CP' % (dbse, 'PPS5-3' )] = """Monomer B from Sandwich Benzene Dimer (BzBz_S) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'PPS5-3' )] = """Monomer A from Sandwich Benzene Dimer (BzBz_S) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'PPS5-3' )] = """Monomer B from Sandwich Benzene Dimer (BzBz_S) """
TAGL['%s-%s' % (dbse, 'PPS5-4' )] = """T-Shaped Benzene Dimer (BzBz_T) """
TAGL['%s-%s-dimer' % (dbse, 'PPS5-4' )] = """Dimer from T-Shaped Benzene Dimer (BzBz_T) """
TAGL['%s-%s-monoA-CP' % (dbse, 'PPS5-4' )] = """Monomer A from T-Shaped Benzene Dimer (BzBz_T) """
TAGL['%s-%s-monoB-CP' % (dbse, 'PPS5-4' )] = """Monomer B from T-Shaped Benzene Dimer (BzBz_T) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'PPS5-4' )] = """Monomer A from T-Shaped Benzene Dimer (BzBz_T) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'PPS5-4' )] = """Monomer B from T-Shaped Benzene Dimer (BzBz_T) """
TAGL['%s-%s' % (dbse, 'PPS5-5' )] = """Parallel-Displaced Benzene Dimer (BzBz_PD) """
TAGL['%s-%s-dimer' % (dbse, 'PPS5-5' )] = """Dimer from Parallel-Displaced Benzene Dimer (BzBz_PD) """
TAGL['%s-%s-monoA-CP' % (dbse, 'PPS5-5' )] = """Monomer A from Parallel-Displaced Benzene Dimer (BzBz_PD) """
TAGL['%s-%s-monoB-CP' % (dbse, 'PPS5-5' )] = """Monomer B from Parallel-Displaced Benzene Dimer (BzBz_PD) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'PPS5-5' )] = """Monomer A from Parallel-Displaced Benzene Dimer (BzBz_PD) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'PPS5-5' )] = """Monomer B from Parallel-Displaced Benzene Dimer (BzBz_PD) """
TAGL['%s-%s' % (dbse, 'WI7-1' )] = """Helium-Neon Complex (He-Ne) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-1' )] = """Dimer from Helium-Neon Complex (He-Ne) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-1' )] = """Monomer A from Helium-Neon Complex (He-Ne) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-1' )] = """Monomer B from Helium-Neon Complex (He-Ne) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-1' )] = """Monomer A from Helium-Neon Complex (He-Ne) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-1' )] = """Monomer B from Helium-Neon Complex (He-Ne) """
TAGL['%s-%s' % (dbse, 'WI7-2' )] = """Helium-Argon Complex (He-Ar) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-2' )] = """Dimer from Helium-Argon Complex (He-Ar) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-2' )] = """Monomer A from Helium-Argon Complex (He-Ar) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-2' )] = """Monomer B from Helium-Argon Complex (He-Ar) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-2' )] = """Monomer A from Helium-Argon Complex (He-Ar) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-2' )] = """Monomer B from Helium-Argon Complex (He-Ar) """
TAGL['%s-%s' % (dbse, 'WI7-3' )] = """Neon Dimer (Ne-Ne) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-3' )] = """Dimer from Neon Dimer (Ne-Ne) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-3' )] = """Monomer A from Neon Dimer (Ne-Ne) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-3' )] = """Monomer B from Neon Dimer (Ne-Ne) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-3' )] = """Monomer A from Neon Dimer (Ne-Ne) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-3' )] = """Monomer B from Neon Dimer (Ne-Ne) """
TAGL['%s-%s' % (dbse, 'WI7-4' )] = """Neon-Argon Complex (Ne-Ar) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-4' )] = """Dimer from Neon-Argon Complex (Ne-Ar) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-4' )] = """Monomer A from Neon-Argon Complex (Ne-Ar) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-4' )] = """Monomer B from Neon-Argon Complex (Ne-Ar) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-4' )] = """Monomer A from Neon-Argon Complex (Ne-Ar) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-4' )] = """Monomer B from Neon-Argon Complex (Ne-Ar) """
TAGL['%s-%s' % (dbse, 'WI7-5' )] = """Methane-Neon Complex (CH4-Ne) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-5' )] = """Dimer from Methane-Neon Complex (CH4-Ne) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-5' )] = """Monomer A from Methane-Neon Complex (CH4-Ne) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-5' )] = """Monomer B from Methane-Neon Complex (CH4-Ne) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-5' )] = """Monomer A from Methane-Neon Complex (CH4-Ne) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-5' )] = """Monomer B from Methane-Neon Complex (CH4-Ne) """
TAGL['%s-%s' % (dbse, 'WI7-6' )] = """Benzene-Neon Complex (Bz-Ne) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-6' )] = """Dimer from Benzene-Neon Complex (Bz-Ne) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-6' )] = """Monomer A from Benzene-Neon Complex (Bz-Ne) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-6' )] = """Monomer B from Benzene-Neon Complex (Bz-Ne) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-6' )] = """Monomer A from Benzene-Neon Complex (Bz-Ne) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-6' )] = """Monomer B from Benzene-Neon Complex (Bz-Ne) """
TAGL['%s-%s' % (dbse, 'WI7-7' )] = """Methane Dimer (CH4-CH4) """
TAGL['%s-%s-dimer' % (dbse, 'WI7-7' )] = """Dimer from Methane Dimer (CH4-CH4) """
TAGL['%s-%s-monoA-CP' % (dbse, 'WI7-7' )] = """Monomer A from Methane Dimer (CH4-CH4) """
TAGL['%s-%s-monoB-CP' % (dbse, 'WI7-7' )] = """Monomer B from Methane Dimer (CH4-CH4) """
TAGL['%s-%s-monoA-unCP' % (dbse, 'WI7-7' )] = """Monomer A from Methane Dimer (CH4-CH4) """
TAGL['%s-%s-monoB-unCP' % (dbse, 'WI7-7' )] = """Monomer B from Methane Dimer (CH4-CH4) """
TAGL['%s-%s-mono-RLX' % (dbse, 'HCCH' )] = """Ethine Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'C2H4' )] = """Ethene Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'Bz' )] = """Benzene Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'CH3Cl' )] = """Methyl Chloride Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'CH3SH' )] = """Methanethiol Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'CH4' )] = """Methane Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'F2' )] = """Fluorine Molecule Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'H2O' )] = """Water Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'H2S' )] = """Hydrogen Sulfide Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'HCl' )] = """Hydrogen Chloride Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'HCN' )] = """Hydrogen Cyanide Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'HCONH2' )] = """Formamide Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'HCOOH' )] = """Formic Acid Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'He' )] = """Helium Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'Ne' )] = """Neon Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'Ar' )] = """Argon Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'HF' )] = """Hydrogen Fluoride Relaxed Monomer """
TAGL['%s-%s-mono-RLX' % (dbse, 'NH3' )] = """Ammonia Relaxed Monomer """
# <<< Geometry Specification Strings >>>
GEOS = {}
GEOS['%s-%s-%s' % (dbse, 'CT7-1', 'dimer')] = qcdb.Molecule("""
0 1
C 0.00000000 -2.19285000 -0.66839500
C -0.00000000 -2.19286000 0.66839500
H -0.92518700 -2.19231600 -1.23398200
H 0.92518700 -2.19232500 -1.23398300
H -0.92518700 -2.19232000 1.23398200
H 0.92518700 -2.19231100 1.23398200
--
0 1
F 0.00000000 0.78568800 0.00000000
F 0.00000000 2.20564800 0.00000100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CT7-2', 'dimer')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 -2.14998500
H 0.00000000 0.93965200 -2.53440100
H 0.81376200 -0.46982600 -2.53440100
H -0.81376200 -0.46982600 -2.53440100
--
0 1
F 0.00000000 0.00000000 0.54577100
F 0.00000000 0.00000000 1.97124000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CT7-3', 'dimer')] = qcdb.Molecule("""
0 1
H 0.00000000 1.67189100 -2.21255500
C 0.00000000 0.60529300 -2.19955900
C 0.00000000 -0.60529300 -2.19955900
H 0.00000000 -1.67189100 -2.21255500
--
0 1
Cl 0.00000000 -0.00000000 0.61188000
F 0.00000000 -0.00000000 2.26865100
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CT7-4', 'dimer')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 -1.83951900
C 0.00000000 0.00000000 -2.99573100
H 0.00000000 0.00000000 -4.06502600
--
0 1
F -0.00000000 0.00000000 2.42592000
Cl -0.00000000 0.00000000 0.76957400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CT7-5', 'dimer')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 -2.83845100
H 0.00000000 0.94268700 -3.21538300
H 0.81639100 -0.47134300 -3.21538300
H -0.81639100 -0.47134300 -3.21538300
--
0 1
Cl 0.00000000 0.00000000 -0.15004400
Cl 0.00000000 0.00000000 1.88623900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CT7-6', 'dimer')] = qcdb.Molecule("""
0 1
O 2.23981900 0.00002700 -0.08823100
H 2.60088700 0.76196300 0.37705500
H 2.60108700 -0.76172700 0.37719400
--
0 1
Cl -0.31586800 -0.00006600 -0.01691400
F -1.97230800 0.00007400 0.02657000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CT7-7', 'dimer')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 -2.05789900
H 0.00000000 0.94960500 -2.41448800
H 0.82238200 -0.47480300 -2.41448800
H -0.82238200 -0.47480300 -2.41448800
--
0 1
Cl 0.00000000 0.00000000 0.24385500
F 0.00000000 0.00000000 1.94480300
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'DI6-1', 'dimer')] = qcdb.Molecule("""
0 1
S -2.03099600 0.10323300 -0.00078200
H -1.93402000 -0.81846200 0.96967600
H -1.94045000 -0.83661600 -0.95429900
--
0 1
S 2.07983800 -0.08511200 0.00018100
H 2.33915400 1.23101900 -0.00221400
H 0.75384800 0.13412100 -0.00353700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'DI6-2', 'dimer')] = qcdb.Molecule("""
0 1
Cl 1.86082400 -0.06541100 -0.00006800
H 1.75394100 1.21098100 0.00034100
--
0 1
Cl -1.92526600 0.00557100 -0.00009700
H -0.65842700 -0.19370300 0.00247600
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'DI6-3', 'dimer')] = qcdb.Molecule("""
0 1
Cl -1.91163600 -0.00001100 0.00349800
H -0.62731700 -0.00005800 -0.10405100
--
0 1
S 1.84252900 0.00001300 -0.10154300
H 1.82277900 -0.96181000 0.83465000
H 1.82187700 0.96186000 0.83462200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'DI6-4', 'dimer')] = qcdb.Molecule("""
0 1
C -1.49512800 1.12579900 -0.00000200
Cl -1.40247600 -0.66254400 0.00013900
H -0.48106900 1.51836100 -0.00121600
H -2.02718100 1.43516300 0.89531200
H -2.02924000 1.43492300 -0.89417200
--
0 1
Cl 2.13960800 0.03729800 -0.00013800
H 0.97700200 -0.51405400 0.00007200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'DI6-5', 'dimer')] = qcdb.Molecule("""
0 1
C 1.99644300 0.05718500 -0.00648300
N 2.98021800 0.65834500 0.10945000
H 1.07234100 -0.48518900 -0.10641600
--
0 1
S -1.51439900 -0.79999400 -0.11697900
C -1.57014400 1.01297400 0.01160700
H -1.55457900 -1.05260000 1.20049200
H -1.54556000 1.39238100 -1.01019600
H -0.70866100 1.40255300 0.55309700
H -2.49314500 1.33992300 0.48665400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'DI6-6', 'dimer')] = qcdb.Molecule("""
0 1
C -1.44764800 1.15564900 0.01851300
S -1.41459500 -0.65984600 -0.08354400
H -1.46628400 1.51681600 -1.00988000
H -0.55297100 1.53526500 0.51001200
H -2.34423900 1.49773300 0.53186300
H -1.37736100 -0.89092100 1.23821400
--
0 1
Cl 2.12576600 0.02408100 0.00315600
H 0.92223800 -0.44463500 -0.09824700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HB6-1', 'dimer')] = qcdb.Molecule("""
0 1
N 1.57522500 0.00008500 -0.04260700
H 2.13110800 0.81394900 -0.28661400
H 1.49645000 -0.00293600 0.97025700
H 2.13172100 -0.81189200 -0.29145300
--
0 1
N -1.68824500 0.00008300 0.10484800
H -2.12640300 -0.81268000 -0.31731000
H -2.12744200 0.81184200 -0.31815800
H -0.71429700 0.00054300 -0.19240700
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HB6-2', 'dimer')] = qcdb.Molecule("""
0 1
F 1.32373600 -0.09022600 -0.00000700
H 1.74043700 0.73339000 0.00001300
--
0 1
F -1.45719500 0.01925700 -0.00001100
H -0.53931000 -0.09466400 0.00014500
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HB6-3', 'dimer')] = qcdb.Molecule("""
0 1
O 1.53175000 0.00592200 -0.12088000
H 0.57596800 -0.00524900 0.02496600
H 1.90624900 -0.03756100 0.76321800
--
0 1
O -1.39622600 -0.00499000 0.10676600
H -1.78937200 -0.74228300 -0.37100900
H -1.77703700 0.77763800 -0.30426400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HB6-4', 'dimer')] = qcdb.Molecule("""
0 1
N -1.39559100 -0.02156400 0.00003700
H -1.62981100 0.96109600 -0.10622400
H -1.86276700 -0.51254400 -0.75597400
H -1.83354700 -0.33077000 0.86230700
--
0 1
O 1.56850100 0.10589200 0.00000500
H 0.60673600 -0.03396200 -0.00062800
H 1.94051900 -0.78000500 0.00022200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HB6-5', 'dimer')] = qcdb.Molecule("""
0 1
O -1.14108700 1.44521200 0.00000000
C -0.06175400 2.03094700 0.00000000
H -0.01368700 3.13016900 0.00000000
N 1.14108700 1.43587700 0.00000000
H 1.21768600 0.41652700 0.00000000
H 1.97144600 2.00209500 0.00000000
--
0 1
O 1.14108700 -1.44521200 0.00000000
C 0.06175400 -2.03094700 0.00000000
H 0.01368700 -3.13016900 0.00000000
N -1.14108700 -1.43587700 0.00000000
H -1.21768600 -0.41652700 0.00000000
H -1.97144600 -2.00209500 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HB6-6', 'dimer')] = qcdb.Molecule("""
0 1
C -0.12023400 1.91407000 0.00000000
H -0.16729500 3.00701800 0.00000000
O -1.12185700 1.22098200 0.00000000
O 1.12185700 1.48048900 0.00000000
H 1.12758200 0.48902400 0.00000000
--
0 1
O 1.12185700 -1.22098200 0.00000000
C 0.12023400 -1.91407000 0.00000000
O -1.12185700 -1.48048900 0.00000000
H -1.12758200 -0.48902400 0.00000000
H 0.16729500 -3.00701800 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'PPS5-1', 'dimer')] = qcdb.Molecule("""
0 1
C -0.41254600 1.67817500 0.00000000
C 0.41254600 2.56162700 0.00000000
H -1.13202600 0.89080900 0.00000000
H 1.13465100 3.34577000 0.00000000
--
0 1
C 0.41254600 -1.67817500 0.00000000
C -0.41254600 -2.56162700 0.00000000
H 1.13202600 -0.89080900 0.00000000
H -1.13465100 -3.34577000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'PPS5-2', 'dimer')] = qcdb.Molecule("""
0 1
C 1.85776800 0.47280300 0.47242500
C 1.85776800 -0.47280300 -0.47242500
H 0.93377200 0.87468800 0.87406300
H 2.78381800 0.87170900 0.87155600
H 2.78381800 -0.87170900 -0.87155600
H 0.93377200 -0.87468800 -0.87406300
--
0 1
C -1.85776800 0.47280300 -0.47242500
C -1.85776800 -0.47280300 0.47242500
H -2.78381800 0.87170900 -0.87155600
H -0.93377200 0.87468800 -0.87406300
H -0.93377200 -0.87468800 0.87406300
H -2.78381800 -0.87170900 0.87155600
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'PPS5-3', 'dimer')] = qcdb.Molecule("""
0 1
C 0.00000000 1.95000000 1.39150000
H 0.00000000 1.95000000 2.47150000
C 1.20507435 1.95000000 0.69575000
H 2.14038179 1.95000000 1.23575000
C 1.20507435 1.95000000 -0.69575000
H 2.14038179 1.95000000 -1.23575000
C -0.00000000 1.95000000 -1.39150000
H -0.00000000 1.95000000 -2.47150000
C -1.20507435 1.95000000 -0.69575000
H -2.14038179 1.95000000 -1.23575000
C -1.20507435 1.95000000 0.69575000
H -2.14038179 1.95000000 1.23575000
--
0 1
C -1.20507435 -1.95000000 -0.69575000
H -2.14038179 -1.95000000 -1.23575000
C -0.00000000 -1.95000000 -1.39150000
H -0.00000000 -1.95000000 -2.47150000
C 1.20507435 -1.95000000 -0.69575000
H 2.14038179 -1.95000000 -1.23575000
C 1.20507435 -1.95000000 0.69575000
H 2.14038179 -1.95000000 1.23575000
C -0.00000000 -1.95000000 1.39150000
H -0.00000000 -1.95000000 2.47150000
C -1.20507435 -1.95000000 0.69575000
H -2.14038179 -1.95000000 1.23575000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'PPS5-4', 'dimer')] = qcdb.Molecule("""
0 1
C 1.39150000 -0.00000000 2.49575000
H 2.47150000 -0.00000000 2.49575000
C 0.69575000 1.20507435 2.49575000
H 1.23575000 2.14038179 2.49575000
C 0.69575000 -1.20507435 2.49575000
H 1.23575000 -2.14038179 2.49575000
C -0.69575000 1.20507435 2.49575000
H -1.23575000 2.14038179 2.49575000
C -0.69575000 -1.20507435 2.49575000
H -1.23575000 -2.14038179 2.49575000
C -1.39150000 -0.00000000 2.49575000
H -2.47150000 -0.00000000 2.49575000
--
0 1
C 0.00000000 0.00000000 -1.10425000
C -0.00000000 -1.20507435 -1.80000000
H -0.00000000 -2.14038179 -1.26000000
H 0.00000000 0.00000000 -0.02425000
C -0.00000000 -1.20507435 -3.19150000
H -0.00000000 -2.14038179 -3.73150000
C -0.00000000 0.00000000 -3.88725000
H -0.00000000 0.00000000 -4.96725000
C -0.00000000 1.20507435 -3.19150000
H 0.00000000 2.14038179 -3.73150000
C 0.00000000 1.20507435 -1.80000000
H 0.00000000 2.14038179 -1.26000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'PPS5-5', 'dimer')] = qcdb.Molecule("""
0 1
C -0.80000000 1.80000000 1.39150000
H -0.80000000 1.80000000 2.47150000
C 0.40507435 1.80000000 0.69575000
H 1.34038179 1.80000000 1.23575000
C -2.00507435 1.80000000 0.69575000
H -2.94038179 1.80000000 1.23575000
C 0.40507435 1.80000000 -0.69575000
H 1.34038179 1.80000000 -1.23575000
C -2.00507435 1.80000000 -0.69575000
H -2.94038179 1.80000000 -1.23575000
C -0.80000000 1.80000000 -1.39150000
H -0.80000000 1.80000000 -2.47150000
--
0 1
C 0.80000000 -1.80000000 -1.39150000
C 2.00507435 -1.80000000 -0.69575000
H 2.94038179 -1.80000000 -1.23575000
H 0.80000000 -1.80000000 -2.47150000
C 2.00507435 -1.80000000 0.69575000
H 2.94038179 -1.80000000 1.23575000
C 0.80000000 -1.80000000 1.39150000
H 0.80000000 -1.80000000 2.47150000
C -0.40507435 -1.80000000 0.69575000
H -1.34038179 -1.80000000 1.23575000
C -0.40507435 -1.80000000 -0.69575000
H -1.34038179 -1.80000000 -1.23575000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-1', 'dimer')] = qcdb.Molecule("""
0 1
He 0.00000000 0.00000000 0.00000000
--
0 1
Ne 3.03100000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-2', 'dimer')] = qcdb.Molecule("""
0 1
He 0.00000000 0.00000000 0.00000000
--
0 1
Ar 3.48000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-3', 'dimer')] = qcdb.Molecule("""
0 1
Ne 0.00000000 0.00000000 0.00000000
--
0 1
Ne 3.09100000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-4', 'dimer')] = qcdb.Molecule("""
0 1
Ne 0.00000000 0.00000000 0.00000000
--
0 1
Ar 3.48900000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-5', 'dimer')] = qcdb.Molecule("""
0 1
Ne 0.00070500 -0.03504900 -1.74260200
--
0 1
C -0.00070500 0.03504800 1.74257700
H -0.00115700 0.05752400 2.83186300
H -0.02121400 1.05430800 1.35836800
H -0.87960700 -0.50371400 1.39016200
H 0.89915700 -0.46792400 1.39016200
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-6', 'dimer')] = qcdb.Molecule("""
0 1
C 0.00000000 1.39566300 -0.61935100
C -1.20868000 0.69783100 -0.61935100
C -1.20868000 -0.69783100 -0.61935100
C -0.00000000 -1.39566300 -0.61935100
C 1.20868000 -0.69783100 -0.61935100
C 1.20868000 0.69783100 -0.61935100
H 0.00000000 2.48003700 -0.61754900
H -2.14777500 1.24001800 -0.61754900
H -2.14777500 -1.24001800 -0.61754900
H -0.00000000 -2.48003700 -0.61754900
H 2.14777500 -1.24001800 -0.61754900
H 2.14777500 1.24001800 -0.61754900
--
0 1
Ne 0.00000000 0.00000000 2.60019400
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'WI7-7', 'dimer')] = qcdb.Molecule("""
0 1
C -0.00000000 0.00000000 1.80727900
H -0.00000000 1.02664300 1.44240000
H -0.88909900 -0.51332200 1.44240000
H -0.00000000 0.00000000 2.89684300
H 0.88909900 -0.51332200 1.44240000
--
0 1
C -0.00000000 -0.00000000 -1.80727900
H 0.88909900 0.51332200 -1.44240000
H -0.00000000 -0.00000000 -2.89684300
H -0.88909900 0.51332200 -1.44240000
H -0.00000000 -1.02664300 -1.44240000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HCCH', 'mono-RLX')] = qcdb.Molecule("""
0 1
C 0.00000400 -0.60420400 0.00000000
C 0.00000400 0.60419800 0.00000000
H 0.00679500 -1.67012800 0.00000000
H -0.00683900 1.67016300 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'C2H4', 'mono-RLX')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 0.66807800
C 0.00000000 0.00000000 -0.66807800
H 0.00000000 0.92453300 1.23491900
H 0.00000000 -0.92453300 1.23491900
H 0.00000000 0.92453300 -1.23491900
H 0.00000000 -0.92453300 -1.23491900
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'Bz', 'mono-RLX')] = qcdb.Molecule("""
0 1
C 0.00000000 1.39567100 -0.61715800
C -1.20868600 0.69783500 -0.61715800
C -1.20868600 -0.69783500 -0.61715800
C 0.00000000 -1.39567100 -0.61715800
C 1.20868600 -0.69783500 -0.61715800
C 1.20868600 0.69783500 -0.61715800
H 0.00000000 2.47987600 -0.61699800
H -2.14763600 1.23993800 -0.61699800
H -2.14763600 -1.23993800 -0.61699800
H 0.00000000 -2.47987600 -0.61699800
H 2.14763600 -1.23993800 -0.61699800
H 2.14763600 1.23993800 -0.61699800
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CH3Cl', 'mono-RLX')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -1.12626800
Cl 0.00000000 0.00000000 0.65820600
H 0.00000000 1.03097000 -1.47059600
H 0.89284600 -0.51548500 -1.47059600
H -0.89284600 -0.51548500 -1.47059600
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CH3SH', 'mono-RLX')] = qcdb.Molecule("""
0 1
C -0.04788200 1.15150600 0.00000000
S -0.04788200 -0.66495900 0.00000000
H 1.28433700 -0.82104700 0.00000000
H -1.09471300 1.45662100 0.00000000
H 0.43188500 1.54736900 0.89371000
H 0.43188500 1.54736900 -0.89371000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'CH4', 'mono-RLX')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 0.00000000
H 0.00000000 -1.08947061 0.00000000
H -1.02716274 0.36315688 0.00000000
H 0.34238759 0.36315688 0.96841832
H 0.34238759 0.36315688 -0.96841832
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'F2', 'mono-RLX')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 1.41423000
F 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'H2O', 'mono-RLX')] = qcdb.Molecule("""
0 1
O 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 0.96183119
H 0.00000000 0.93357861 -0.23140921
O
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'H2S', 'mono-RLX')] = qcdb.Molecule("""
0 1
S 0.00000000 0.00000000 0.10389400
H 0.00000000 0.96116200 -0.83115300
H 0.00000000 -0.96116200 -0.83115300
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HCl', 'mono-RLX')] = qcdb.Molecule("""
0 1
Cl 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 1.27907275
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HCN', 'mono-RLX')] = qcdb.Molecule("""
0 1
C 0.00000000 0.00000000 -0.50103200
N 0.00000000 0.00000000 0.65706900
H 0.00000000 0.00000000 -1.57005300
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HCONH2', 'mono-RLX')] = qcdb.Molecule("""
0 1
C -0.16068500 0.38839900 -0.00053800
O -1.19570500 -0.24639200 0.00018900
N 1.08330000 -0.15841900 -0.00029100
H -0.13991800 1.49035000 0.00139300
H 1.18225800 -1.16041500 0.00111600
H 1.90431600 0.41973500 0.00124500
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HCOOH', 'mono-RLX')] = qcdb.Molecule("""
0 1
C -0.13470200 0.40125100 -0.00024900
O -1.13426200 -0.26458200 0.00006900
O 1.11868000 -0.09107500 0.00005600
H -0.10761700 1.49546500 0.00051300
H 1.04048400 -1.05771400 -0.00002000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'He', 'mono-RLX')] = qcdb.Molecule("""
0 1
He 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'Ne', 'mono-RLX')] = qcdb.Molecule("""
0 1
Ne 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'Ar', 'mono-RLX')] = qcdb.Molecule("""
0 1
Ar 0.00000000 0.00000000 0.00000000
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'HF', 'mono-RLX')] = qcdb.Molecule("""
0 1
F 0.00000000 0.00000000 0.00000000
H 0.00000000 0.00000000 0.92073754
units angstrom
""")
GEOS['%s-%s-%s' % (dbse, 'NH3', 'mono-RLX')] = qcdb.Molecule("""
0 1
N 0.00000000 0.00000000 0.11501300
H 0.00000000 0.93975200 -0.26836400
H 0.81385000 -0.46987600 -0.26836400
H -0.81385000 -0.46987600 -0.26836400
units angstrom
""")
# <<< Derived Geometry Strings >>>
for rxn in HRXN:
GEOS['%s-%s-monoA-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1)
GEOS['%s-%s-monoB-unCP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2)
GEOS['%s-%s-monoA-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(1, 2)
GEOS['%s-%s-monoB-CP' % (dbse, rxn)] = GEOS['%s-%s-dimer' % (dbse, rxn)].extract_fragments(2, 1)
|
spring01/libPSI
|
lib/databases/NCB31.py
|
Python
|
gpl-2.0
| 53,417
|
[
"Psi4"
] |
a06c8d4f46aa29d441f5641ef36f8a82a17c8e4b82cf77e1f4b103d051bfde34
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Development script to test the algorithms of all the model coordination environments
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import LocalGeometryFinder
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import AbstractGeometry
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import AllCoordinationGeometries
from math import factorial
import itertools
from random import shuffle
if __name__ == "__main__":
allcg = AllCoordinationGeometries()
test = input('Standard ("s", all permutations for cn <= 6, 500 random permutations for cn > 6) or on demand')
if test == "s":
perms_def = "standard"
elif test == "o":
perms_def = "on_demand"
else:
try:
nperms = int(test)
perms_def = "ndefined"
except Exception:
perms_def = "on_demand"
for coordination in range(1, 13):
print("IN COORDINATION {:d}".format(coordination))
symbol_name_mapping = allcg.get_symbol_name_mapping(coordination=coordination)
if perms_def == "standard":
if coordination > 6:
test = "500"
else:
test = "all"
elif perms_def == "ndefined":
test = nperms
else:
test = input(
'Enter if you want to test all possible permutations ("all" or "a") or a given number of random permutations (i.e. "25")'
)
myindices = range(coordination)
if test == "all" or test == "a":
perms_type = "all"
perms_iterator = itertools.permutations(myindices)
nperms = factorial(coordination)
else:
perms_type = "explicit"
try:
nperms = int(test)
except Exception:
raise ValueError("Could not turn {} into integer ...".format(test))
perms_iterator = []
for ii in range(nperms):
shuffle(myindices)
perms_iterator.append(list(myindices))
for cg_symbol, cg_name in symbol_name_mapping.items():
cg = allcg[cg_symbol]
if cg.deactivate:
continue
print("Testing {} ({})".format(cg_symbol, cg_name))
cg = allcg[cg_symbol]
if cg.points is None:
continue
lgf = LocalGeometryFinder()
lgf.setup_parameters(structure_refinement=lgf.STRUCTURE_REFINEMENT_NONE)
# Reinitialize the itertools permutations
if perms_type == "all":
perms_iterator = itertools.permutations(myindices)
# Loop on the permutations
iperm = 1
for indices_perm in perms_iterator:
lgf.setup_test_perfect_environment(
cg_symbol,
indices=indices_perm,
randomness=True,
max_random_dist=0.1,
random_translation=True,
random_rotation=True,
random_scale=True,
)
lgf.perfect_geometry = AbstractGeometry.from_cg(cg=cg)
points_perfect = lgf.perfect_geometry.points_wocs_ctwocc()
print("Perm # {:d}/{:d} : ".format(iperm, nperms), indices_perm)
algos_results = []
for algo in cg.algorithms:
if algo.algorithm_type == "EXPLICIT_PERMUTATIONS":
results = lgf.coordination_geometry_symmetry_measures(
coordination_geometry=cg, points_perfect=points_perfect
)
# raise ValueError('Do something for the explicit ones ... (these should anyway be by far ok!)')
else:
results = lgf.coordination_geometry_symmetry_measures_separation_plane(
coordination_geometry=cg, separation_plane_algo=algo, points_perfect=points_perfect
)
algos_results.append(min(results[0]))
if not min(results[0]) < 1.5:
print("Following is not close to 0.0 ...")
input(results)
print(" => ", algos_results)
iperm += 1
|
gmatteo/pymatgen
|
dev_scripts/chemenv/test_algos_all_geoms.py
|
Python
|
mit
| 4,792
|
[
"pymatgen"
] |
53f1bb00b57477e91cb0adda282c511759d7d317c1eb9eae9eb44faddd8a6ed4
|
"""
This file can be downloaded and used to create a .txt file containing only
the accession numbers from the ghost-tree.nwk that you plan to use for your
analyses.
You must have skbio installed. http://scikit-bio.org/
If you aren't familiar with skbio, make sure to check it out on its own, too!
You will then use "ghost_tree_tips.txt" output file containing the accession
numbers to filter your .biom table so that it contains only the OTUs that
are in the ghost-tree.nwk that you are using.
http://qiime.org/scripts/filter_otus_from_otu_table.html
Use the required arguments and the following two optional arguments:
-e, --otu_ids_to_exclude_fp
(provide the text file containing OTU ids to exclude)
--negate_ids_to_exclude
(this will keep OTUs in otu_ids_to_exclude_fp, rather than discard them)
"""
from skbio import TreeNode
ghosttree = TreeNode.read("ghost_tree_97_80clusters_from_alpha_release.nwk",
convert_underscores=False) # your file goes here
output = open("ghost_tree_tips_underscore_fix.txt", "w")
for node in ghosttree.tips():
output.write(str(node.name)+"\n")
output.close()
|
JTFouquier/ghost-tree
|
helper_files/get_otus_from_ghost_tree.py
|
Python
|
bsd-3-clause
| 1,130
|
[
"scikit-bio"
] |
477dc4e883bfcc13a062a596a4b65cd030081855fa3c1d8cb1bd3dace5b75671
|
"""
Fourier Aliasing
================
Here, we show that we can view the Fourier transform as an infinitely repeat
set of replicates (aliases, *s.t.*
:math:`Ш(\nu/t_{dw})*\tilde{f}(\nu)`) and view any of those aliases
(of width :math:`SW=1/t_{dw}`)
that we choose.
"""
# from JF noteobok sec:fourier_aliasing_test
from pylab import *
from pyspecdata import *
from pyspecdata.fourier.ft_shift import _get_ft_dt
fl = figlist_var()
t = r_[-10:10:512j]
t -= t[argmin(abs(t))] # to be sure that an index exactly equals zero
data = nddata(empty_like(t,dtype = complex128),[-1],['t']).setaxis('t',t)
data.set_units('t','s') # set the units to s, which are automatically converted to Hz upon FT
sigma = 1.0
data = data.fromaxis('t',lambda x: complex128(exp(-x**2/2./sigma**2)))
test_non_integral = False
data.ft('t',shift = test_non_integral)# this is required for the non-integral shift!
print(data.other_info)
print("is it safe?",data.get_ft_prop('t',['freq','not','aliased']))
fl.next('ft')
fl.plot(data, alpha=0.5)
fl.plot(data.runcopy(imag), alpha=0.5)
expand_x()
expand_y()
print("what is the initial desired startpoint?",data.get_prop("FT_start_time"))
# https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.plot.html
default_plot_kwargs = dict(alpha=0.3, lw=2, mew=2, ms=8, marker='o', ls='none')
print("-----------------------")
print("starting standard")
forplot = data.copy() # keep and re-use the gaussian
print("what is the initial desired startpoint?",forplot.get_prop("FT_start_time"))
forplot.ift('t')
#forplot = forplot['t':(-2,2)]
t_start = forplot.getaxis('t')[0]
fl.next('ift')
fl.plot(forplot,label = '$t_{start}$: standard %0.2fs'%t_start,**default_plot_kwargs)
if test_non_integral:
fl.next('ift -- non-integral')
fl.plot(forplot,label = '$t_{start}$: standard %0.2fs'%t_start,**default_plot_kwargs)
#fl.plot(forplot.runcopy(imag),label = 'I: standard',**default_plot_kwargs)
dt = diff(forplot.getaxis('t')[r_[0,1]]).item()
print("and what is the actual first t index (t_start) after I ift?: ", end=' ')
print("t_start is",t_start,"and dt is",dt)
symbols = iter(['d','x','s','o'])
for this_integer in [2,-250,1000]:
print("-----------------------")
print("starting integral shift for",this_integer)
forplot = data.copy() # keep and re-use the gaussian
print("what is the initial desired startpoint?",forplot.get_ft_prop('t',"start_time"))
new_startpoint = t_start + this_integer * dt
print("now, I try to reset the startpoint to",new_startpoint)
print("my dt",dt,"_get_ft_dt",_get_ft_dt(data,'t'))
forplot.ft_clear_startpoints('t',t = new_startpoint,f = 'current')
print("is it safe?",data.get_ft_prop('t',['freq','not','aliased']))
fl.next('ift')
forplot.ift('t')
print("And the actual t startpoint after ift? ",forplot.getaxis('t')[0])
print("the difference between the two?",forplot.getaxis('t')[0] - forplot.get_ft_prop('t',"start_time"))
default_plot_kwargs['marker'] = next(symbols)
fl.plot(forplot,label = '$t_{start}$: shifted by %0.0fpts $\\rightarrow$ %0.2fs'%(this_integer,new_startpoint),**default_plot_kwargs)
print("-----------------------")
#fl.plot(forplot.runcopy(imag),label = 'I: integral shifted',**default_plot_kwargs)
expand_x()
expand_y()
if test_non_integral:
symbols = iter(['d','x','s','o'])
for this_float in [0.5,0.25,10.75]:
print("-----------------------")
print("starting non-integral shift for",this_float)
forplot = data.copy() # keep and re-use the gaussian
print("what is the initial desired startpoint?",forplot.get_ft_prop('t',"start_time"))
print("is it safe?",data.get_ft_prop('t',['freq','not','aliased']))
new_startpoint = t_start + this_float * dt
print("now, I try to reset the startpoint to",new_startpoint)
forplot.ft_clear_startpoints('t',t = new_startpoint,f = 'current')
fl.next('ift -- non-integral')
print("is it safe?",data.get_ft_prop('t',['freq','not','aliased']))
forplot.ift('t')
print("And the actual t startpoint after ift? ",forplot.getaxis('t')[0])
print("the difference between the two?",forplot.getaxis('t')[0] - forplot.get_ft_prop('t',"start_time"))
default_plot_kwargs['marker'] = next(symbols)
default_plot_kwargs['markersize'] = 10.0
fl.plot(forplot,label = '$t_{start}$: shifted by %0.0fpts $\\rightarrow$ %0.2fs'%(this_float,new_startpoint),**default_plot_kwargs)
#fl.plot(forplot.runcopy(imag),label = 'I: integral shifted',**default_plot_kwargs)
#{{{ these are manually set for a nice view of the peak of the gaussian
xlim(-1,1)
ylim(0.9,1.04)
#}}}
fl.show('interpolation_test_150824.pdf')
|
jmfranck/pyspecdata
|
docs/_downloads/54c91e6b35ea86f52bd26d115eb9b78b/fourier_aliasing.py
|
Python
|
bsd-3-clause
| 4,725
|
[
"Gaussian"
] |
b5308235206e214fa0da7211eae64104974d287ac41ffbe187dd0c9e560d16a1
|
# (C) British Crown Copyright 2010 - 2017, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import cartopy.crs as ccrs
import cf_units
import numpy as np
import numpy.ma as ma
import iris
import iris.analysis.cartography
import iris.analysis.maths
import iris.coord_systems
import iris.coords
import iris.cube
import iris.tests.stock
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib
import matplotlib.pyplot as plt
class TestAnalysisCubeCoordComparison(tests.IrisTest):
def assertComparisonDict(self, comparison_dict, reference_filename):
string = ''
for key in sorted(comparison_dict):
coord_groups = comparison_dict[key]
string += ('%40s ' % key)
names = [[coord.name() if coord is not None else 'None'
for coord in coords]
for coords in coord_groups]
string += str(sorted(names))
string += '\n'
self.assertString(string, reference_filename)
def test_coord_comparison(self):
cube1 = iris.cube.Cube(np.zeros((41, 41)))
lonlat_cs = iris.coord_systems.GeogCS(6371229)
lon_points1 = -180 + 4.5 * np.arange(41, dtype=np.float32)
lat_points = -90 + 4.5 * np.arange(41, dtype=np.float32)
cube1.add_dim_coord(iris.coords.DimCoord(lon_points1, 'longitude', units='degrees', coord_system=lonlat_cs), 0)
cube1.add_dim_coord(iris.coords.DimCoord(lat_points, 'latitude', units='degrees', coord_system=lonlat_cs), 1)
cube1.add_aux_coord(iris.coords.AuxCoord(0, long_name='z'))
cube1.add_aux_coord(iris.coords.AuxCoord(['foobar'], long_name='f', units='no_unit'))
cube2 = iris.cube.Cube(np.zeros((41, 41, 5)))
lonlat_cs = iris.coord_systems.GeogCS(6371229)
lon_points2 = -160 + 4.5 * np.arange(41, dtype=np.float32)
cube2.add_dim_coord(iris.coords.DimCoord(lon_points2, 'longitude', units='degrees', coord_system=lonlat_cs), 0)
cube2.add_dim_coord(iris.coords.DimCoord(lat_points, 'latitude', units='degrees', coord_system=lonlat_cs), 1)
cube2.add_dim_coord(iris.coords.DimCoord([5, 7, 9, 11, 13], long_name='z'), 2)
cube3 = cube1.copy()
lon = cube3.coord("longitude")
lat = cube3.coord("latitude")
cube3.remove_coord(lon)
cube3.remove_coord(lat)
cube3.add_dim_coord(lon, 1)
cube3.add_dim_coord(lat, 0)
cube3.coord('z').points = [20]
cube4 = cube2.copy()
lon = cube4.coord("longitude")
lat = cube4.coord("latitude")
cube4.remove_coord(lon)
cube4.remove_coord(lat)
cube4.add_dim_coord(lon, 1)
cube4.add_dim_coord(lat, 0)
# Test when coords are the same object
lon = cube1.coord('longitude')
lat = cube1.coord('latitude')
cube5 = iris.cube.Cube(np.zeros((41, 41)))
cube5.add_dim_coord(lon, 0)
cube5.add_dim_coord(lat, 1)
coord_comparison = iris.analysis.coord_comparison
self.assertComparisonDict(coord_comparison(cube1, cube1), ('analysis', 'coord_comparison', 'cube1_cube1.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube2), ('analysis', 'coord_comparison', 'cube1_cube2.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube3), ('analysis', 'coord_comparison', 'cube1_cube3.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube4), ('analysis', 'coord_comparison', 'cube1_cube4.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube5), ('analysis', 'coord_comparison', 'cube1_cube5.txt'))
self.assertComparisonDict(coord_comparison(cube2, cube3), ('analysis', 'coord_comparison', 'cube2_cube3.txt'))
self.assertComparisonDict(coord_comparison(cube2, cube4), ('analysis', 'coord_comparison', 'cube2_cube4.txt'))
self.assertComparisonDict(coord_comparison(cube2, cube5), ('analysis', 'coord_comparison', 'cube2_cube5.txt'))
self.assertComparisonDict(coord_comparison(cube3, cube4), ('analysis', 'coord_comparison', 'cube3_cube4.txt'))
self.assertComparisonDict(coord_comparison(cube3, cube5), ('analysis', 'coord_comparison', 'cube3_cube5.txt'))
self.assertComparisonDict(coord_comparison(cube4, cube5), ('analysis', 'coord_comparison', 'cube4_cube5.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube1, cube1), ('analysis', 'coord_comparison', 'cube1_cube1_cube1.txt'))
self.assertComparisonDict(coord_comparison(cube1, cube2, cube1), ('analysis', 'coord_comparison', 'cube1_cube2_cube1.txt'))
# get a coord comparison result and check that we are getting back what was expected
coord_group = coord_comparison(cube1, cube2)['grouped_coords'][0]
self.assertIsInstance(coord_group, iris.analysis._CoordGroup)
self.assertIsInstance(list(coord_group)[0], iris.coords.Coord)
class TestAnalysisWeights(tests.IrisTest):
def test_weighted_mean_little(self):
data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
weights = np.array([[9, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=np.float32)
cube = iris.cube.Cube(data, long_name="test_data", units="1")
hcs = iris.coord_systems.GeogCS(6371229)
lat_coord = iris.coords.DimCoord(np.array([1, 2, 3], dtype=np.float32), long_name="lat", units="1", coord_system=hcs)
lon_coord = iris.coords.DimCoord(np.array([1, 2, 3], dtype=np.float32), long_name="lon", units="1", coord_system=hcs)
cube.add_dim_coord(lat_coord, 0)
cube.add_dim_coord(lon_coord, 1)
cube.add_aux_coord(iris.coords.AuxCoord(np.arange(3, dtype=np.float32), long_name="dummy", units=1), 1)
self.assertCML(cube, ('analysis', 'weighted_mean_source.cml'))
a = cube.collapsed('lat', iris.analysis.MEAN, weights=weights)
# np.ma.average doesn't apply type promotion rules in some versions,
# and instead makes the result type float64. To ignore that case we
# fix up the dtype here if it is promotable from float32. We still want
# to catch cases where there is a loss of precision however.
if a.dtype > np.float32:
cast_data = a.data.astype(np.float32)
a.data = cast_data
self.assertCMLApproxData(a, ('analysis', 'weighted_mean_lat.cml'))
b = cube.collapsed(lon_coord, iris.analysis.MEAN, weights=weights)
if b.dtype > np.float32:
cast_data = b.data.astype(np.float32)
b.data = cast_data
b.data = np.asarray(b.data)
self.assertCMLApproxData(b, ('analysis', 'weighted_mean_lon.cml'))
self.assertEqual(b.coord('dummy').shape, (1, ))
# test collapsing multiple coordinates (and the fact that one of the coordinates isn't the same coordinate instance as on the cube)
c = cube.collapsed([lat_coord[:], lon_coord], iris.analysis.MEAN, weights=weights)
if c.dtype > np.float32:
cast_data = c.data.astype(np.float32)
c.data = cast_data
self.assertCMLApproxData(c, ('analysis', 'weighted_mean_latlon.cml'))
self.assertEqual(c.coord('dummy').shape, (1, ))
# Check new coord bounds - made from points
self.assertArrayEqual(c.coord('lat').bounds, [[1, 3]])
# Check new coord bounds - made from bounds
cube.coord('lat').bounds = [[0.5, 1.5], [1.5, 2.5], [2.5, 3.5]]
c = cube.collapsed(['lat', 'lon'], iris.analysis.MEAN, weights=weights)
self.assertArrayEqual(c.coord('lat').bounds, [[0.5, 3.5]])
cube.coord('lat').bounds = None
# Check there was no residual change
self.assertCML(cube, ('analysis', 'weighted_mean_source.cml'))
@tests.skip_data
def test_weighted_mean(self):
### compare with pp_area_avg - which collapses both lat and lon
#
# pp = ppa('/data/local/dataZoo/PP/simple_pp/global.pp', 0)
# print, pp_area(pp, /box)
# print, pp_area_avg(pp, /box) #287.927
# ;gives an answer of 287.927
#
###
e = iris.tests.stock.simple_pp()
self.assertCML(e, ('analysis', 'weighted_mean_original.cml'))
e.coord('latitude').guess_bounds()
e.coord('longitude').guess_bounds()
area_weights = iris.analysis.cartography.area_weights(e)
e.coord('latitude').bounds = None
e.coord('longitude').bounds = None
f, collapsed_area_weights = e.collapsed('latitude', iris.analysis.MEAN, weights=area_weights, returned=True)
g = f.collapsed('longitude', iris.analysis.MEAN, weights=collapsed_area_weights)
# check it's a 0d, scalar cube
self.assertEqual(g.shape, ())
# check the value - pp_area_avg's result of 287.927 differs by factor of 1.00002959
np.testing.assert_approx_equal(g.data, 287.935, significant=5)
#check we get summed weights even if we don't give any
h, summed_weights = e.collapsed('latitude', iris.analysis.MEAN, returned=True)
assert(summed_weights is not None)
# Check there was no residual change
e.coord('latitude').bounds = None
e.coord('longitude').bounds = None
self.assertCML(e, ('analysis', 'weighted_mean_original.cml'))
# Test collapsing of missing coord
self.assertRaises(iris.exceptions.CoordinateNotFoundError, e.collapsed, 'platitude', iris.analysis.MEAN)
# Test collpasing of non data coord
self.assertRaises(iris.exceptions.CoordinateCollapseError, e.collapsed, 'pressure', iris.analysis.MEAN)
@tests.skip_data
class TestAnalysisBasic(tests.IrisTest):
def setUp(self):
file = tests.get_data_path(('PP', 'aPProt1', 'rotatedMHtimecube.pp'))
cubes = iris.load(file)
self.cube = cubes[0]
self.assertCML(self.cube, ('analysis', 'original.cml'))
def _common(self, name, aggregate, original_name='original_common.cml', *args, **kwargs):
self.cube.data = self.cube.data.astype(np.float64)
self.assertCML(self.cube, ('analysis', original_name))
a = self.cube.collapsed('grid_latitude', aggregate)
self.assertCMLApproxData(a, ('analysis', '%s_latitude.cml' % name), *args, **kwargs)
b = a.collapsed('grid_longitude', aggregate)
self.assertCMLApproxData(b, ('analysis', '%s_latitude_longitude.cml' % name), *args, **kwargs)
c = self.cube.collapsed(['grid_latitude', 'grid_longitude'], aggregate)
self.assertCMLApproxData(c, ('analysis', '%s_latitude_longitude_1call.cml' % name), *args, **kwargs)
# Check there was no residual change
self.assertCML(self.cube, ('analysis', original_name))
def test_mean(self):
self._common('mean', iris.analysis.MEAN, rtol=1e-05)
def test_std_dev(self):
# as the numbers are so high, trim off some trailing digits & compare to 0dp
self._common('std_dev', iris.analysis.STD_DEV, rtol=1e-05)
def test_hmean(self):
# harmonic mean requires data > 0
self.cube.data *= self.cube.data
self._common('hmean', iris.analysis.HMEAN, 'original_hmean.cml', rtol=1e-05)
def test_gmean(self):
self._common('gmean', iris.analysis.GMEAN, rtol=1e-05)
def test_variance(self):
# as the numbers are so high, trim off some trailing digits & compare to 0dp
self._common('variance', iris.analysis.VARIANCE, rtol=1e-05)
def test_median(self):
self._common('median', iris.analysis.MEDIAN)
def test_sum(self):
# as the numbers are so high, trim off some trailing digits & compare to 0dp
self._common('sum', iris.analysis.SUM, rtol=1e-05)
def test_max(self):
self._common('max', iris.analysis.MAX)
def test_min(self):
self._common('min', iris.analysis.MIN)
def test_rms(self):
self._common('rms', iris.analysis.RMS)
def test_duplicate_coords(self):
self.assertRaises(ValueError, tests.stock.track_1d, duplicate_x=True)
class TestMissingData(tests.IrisTest):
def setUp(self):
self.cube_with_nan = tests.stock.simple_2d()
data = self.cube_with_nan.data.astype(np.float32)
self.cube_with_nan.data = data.copy()
self.cube_with_nan.data[1, 0] = np.nan
self.cube_with_nan.data[2, 2] = np.nan
self.cube_with_nan.data[2, 3] = np.nan
self.cube_with_mask = tests.stock.simple_2d()
self.cube_with_mask.data = ma.array(self.cube_with_nan.data,
mask=np.isnan(self.cube_with_nan.data))
def test_max(self):
cube = self.cube_with_nan.collapsed('foo', iris.analysis.MAX)
np.testing.assert_array_equal(cube.data, np.array([3, np.nan, np.nan]))
cube = self.cube_with_mask.collapsed('foo', iris.analysis.MAX)
np.testing.assert_array_equal(cube.data, np.array([3, 7, 9]))
def test_min(self):
cube = self.cube_with_nan.collapsed('foo', iris.analysis.MIN)
np.testing.assert_array_equal(cube.data, np.array([0, np.nan, np.nan]))
cube = self.cube_with_mask.collapsed('foo', iris.analysis.MIN)
np.testing.assert_array_equal(cube.data, np.array([0, 5, 8]))
def test_sum(self):
cube = self.cube_with_nan.collapsed('foo', iris.analysis.SUM)
np.testing.assert_array_equal(cube.data, np.array([6, np.nan, np.nan]))
cube = self.cube_with_mask.collapsed('foo', iris.analysis.SUM)
np.testing.assert_array_equal(cube.data, np.array([6, 18, 17]))
class TestAggregator_mdtol_keyword(tests.IrisTest):
def setUp(self):
data = ma.array([[1, 2], [4, 5]], dtype=np.float32,
mask=[[False, True], [False, True]])
cube = iris.cube.Cube(data, long_name="test_data", units="1")
lat_coord = iris.coords.DimCoord(np.array([1, 2], dtype=np.float32),
long_name="lat", units="1")
lon_coord = iris.coords.DimCoord(np.array([3, 4], dtype=np.float32),
long_name="lon", units="1")
cube.add_dim_coord(lat_coord, 0)
cube.add_dim_coord(lon_coord, 1)
self.cube = cube
def test_single_coord_no_mdtol(self):
collapsed = self.cube.collapsed(
self.cube.coord('lat'), iris.analysis.MEAN)
t = ma.array([2.5, 5.], mask=[False, True])
self.assertMaskedArrayEqual(collapsed.data, t)
def test_single_coord_mdtol(self):
self.cube.data.mask = np.array([[False, True], [False, False]])
collapsed = self.cube.collapsed(
self.cube.coord('lat'), iris.analysis.MEAN, mdtol=0.5)
t = ma.array([2.5, 5], mask=[False, False])
self.assertMaskedArrayEqual(collapsed.data, t)
def test_single_coord_mdtol_alt(self):
self.cube.data.mask = np.array([[False, True], [False, False]])
collapsed = self.cube.collapsed(
self.cube.coord('lat'), iris.analysis.MEAN, mdtol=0.4)
t = ma.array([2.5, 5], mask=[False, True])
self.assertMaskedArrayEqual(collapsed.data, t)
def test_multi_coord_no_mdtol(self):
collapsed = self.cube.collapsed(
[self.cube.coord('lat'), self.cube.coord('lon')],
iris.analysis.MEAN)
t = np.array(2.5)
self.assertArrayEqual(collapsed.data, t)
def test_multi_coord_mdtol(self):
collapsed = self.cube.collapsed(
[self.cube.coord('lat'), self.cube.coord('lon')],
iris.analysis.MEAN, mdtol=0.4)
t = ma.array(2.5, mask=True)
self.assertMaskedArrayEqual(collapsed.data, t)
class TestAggregators(tests.IrisTest):
def _check_collapsed_percentile(self, cube, percents, collapse_coord,
expected_result, CML_filename=None,
**kwargs):
expected_result = np.array(expected_result, dtype=np.float32)
result = cube.collapsed(collapse_coord, iris.analysis.PERCENTILE,
percent=percents, **kwargs)
np.testing.assert_array_almost_equal(result.data, expected_result)
if CML_filename is not None:
self.assertCML(result, ('analysis', CML_filename), checksum=False)
def _check_percentile(self, data, axis, percents, expected_result,
**kwargs):
result = iris.analysis._percentile(data, axis, percents, **kwargs)
np.testing.assert_array_almost_equal(result, expected_result)
def test_percentile_1d_25_percent(self):
cube = tests.stock.simple_1d()
self._check_collapsed_percentile(
cube, 25, 'foo', 2.5, CML_filename='first_quartile_foo_1d.cml')
def test_percentile_1d_75_percent(self):
cube = tests.stock.simple_1d()
self._check_collapsed_percentile(
cube, 75, 'foo', 7.5, CML_filename='third_quartile_foo_1d.cml')
def test_fast_percentile_1d_25_percent(self):
cube = tests.stock.simple_1d()
self._check_collapsed_percentile(
cube, 25, 'foo', 2.5, fast_percentile_method=True,
CML_filename='first_quartile_foo_1d_fast_percentile.cml')
def test_fast_percentile_1d_75_percent(self):
cube = tests.stock.simple_1d()
self._check_collapsed_percentile(
cube, 75, 'foo', 7.5, fast_percentile_method=True,
CML_filename='third_quartile_foo_1d_fast_percentile.cml')
def test_percentile_2d_single_coord(self):
cube = tests.stock.simple_2d()
self._check_collapsed_percentile(
cube, 25, 'foo', [0.75, 4.75, 8.75],
CML_filename='first_quartile_foo_2d.cml')
def test_percentile_2d_two_coords(self):
cube = tests.stock.simple_2d()
self._check_collapsed_percentile(
cube, 25, ['foo', 'bar'], [2.75],
CML_filename='first_quartile_foo_bar_2d.cml')
def test_fast_percentile_2d_single_coord(self):
cube = tests.stock.simple_2d()
self._check_collapsed_percentile(
cube, 25, 'foo', [0.75, 4.75, 8.75], fast_percentile_method=True,
CML_filename='first_quartile_foo_2d_fast_percentile.cml')
def test_fast_percentile_2d_two_coords(self):
cube = tests.stock.simple_2d()
self._check_collapsed_percentile(
cube, 25, ['foo', 'bar'], [2.75], fast_percentile_method=True,
CML_filename='first_quartile_foo_bar_2d_fast_percentile.cml')
def test_percentile_3d(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
expected_result = np.array([[6., 7., 8., 9.],
[10., 11., 12., 13.],
[14., 15., 16., 17.]],
dtype=np.float32)
self._check_percentile(array_3d, 0, 50, expected_result)
def test_fast_percentile_3d(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
expected_result = np.array([[6., 7., 8., 9.],
[10., 11., 12., 13.],
[14., 15., 16., 17.]],
dtype=np.float32)
self._check_percentile(array_3d, 0, 50, expected_result,
fast_percentile_method=True)
def test_percentile_3d_axis_one(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
expected_result = np.array([[4., 5., 6., 7.],
[16., 17., 18., 19.]],
dtype=np.float32)
self._check_percentile(array_3d, 1, 50, expected_result)
def test_fast_percentile_3d_axis_one(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
expected_result = np.array([[4., 5., 6., 7.],
[16., 17., 18., 19.]],
dtype=np.float32)
self._check_percentile(array_3d, 1, 50, expected_result,
fast_percentile_method=True)
def test_percentile_3d_axis_two(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
expected_result = np.array([[1.5, 5.5, 9.5],
[13.5, 17.5, 21.5]],
dtype=np.float32)
self._check_percentile(array_3d, 2, 50, expected_result)
def test_fast_percentile_3d_axis_two(self):
array_3d = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
expected_result = np.array([[1.5, 5.5, 9.5],
[13.5, 17.5, 21.5]],
dtype=np.float32)
self._check_percentile(array_3d, 2, 50, expected_result,
fast_percentile_method=True)
def test_percentile_3d_masked(self):
cube = tests.stock.simple_3d_mask()
expected_result = [[12., 13., 14., 15.],
[16., 17., 18., 19.],
[20., 18., 19., 20.]]
self._check_collapsed_percentile(
cube, 75, 'wibble', expected_result,
CML_filename='last_quartile_foo_3d_masked.cml')
def test_fast_percentile_3d_masked(self):
cube = tests.stock.simple_3d_mask()
msg = 'Cannot use fast np.percentile method with masked array.'
with self.assertRaisesRegexp(TypeError, msg):
cube.collapsed('wibble',
iris.analysis.PERCENTILE, percent=75,
fast_percentile_method=True)
def test_percentile_3d_notmasked(self):
cube = tests.stock.simple_3d()
expected_result = [[9., 10., 11., 12.],
[13., 14., 15., 16.],
[17., 18., 19., 20.]]
self._check_collapsed_percentile(
cube, 75, 'wibble', expected_result,
CML_filename='last_quartile_foo_3d_notmasked.cml')
def test_fast_percentile_3d_notmasked(self):
cube = tests.stock.simple_3d()
expected_result = [[9., 10., 11., 12.],
[13., 14., 15., 16.],
[17., 18., 19., 20.]]
self._check_collapsed_percentile(
cube, 75, 'wibble', expected_result, fast_percentile_method=True,
CML_filename='last_quartile_foo_3d_notmasked_fast_percentile.cml')
def test_proportion(self):
cube = tests.stock.simple_1d()
r = cube.data >= 5
gt5 = cube.collapsed('foo', iris.analysis.PROPORTION, function=lambda val: val >= 5)
np.testing.assert_array_almost_equal(gt5.data, np.array([6 / 11.]))
self.assertCML(gt5, ('analysis', 'proportion_foo_1d.cml'), checksum=False)
def test_proportion_2d(self):
cube = tests.stock.simple_2d()
gt6 = cube.collapsed('foo', iris.analysis.PROPORTION, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([0, 0.5, 1], dtype=np.float32))
self.assertCML(gt6, ('analysis', 'proportion_foo_2d.cml'), checksum=False)
gt6 = cube.collapsed('bar', iris.analysis.PROPORTION, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([1 / 3, 1 / 3, 2 / 3, 2 / 3], dtype=np.float32))
self.assertCML(gt6, ('analysis', 'proportion_bar_2d.cml'), checksum=False)
gt6 = cube.collapsed(('foo', 'bar'), iris.analysis.PROPORTION, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([0.5], dtype=np.float32))
self.assertCML(gt6, ('analysis', 'proportion_foo_bar_2d.cml'), checksum=False)
# mask the data
cube.data = ma.array(cube.data, mask=cube.data % 2)
cube.data.mask[1, 2] = True
gt6_masked = cube.collapsed('bar', iris.analysis.PROPORTION, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6_masked.data, ma.array([1 / 3, None, 1 / 2, None],
mask=[False, True, False, True],
dtype=np.float32))
self.assertCML(gt6_masked, ('analysis', 'proportion_foo_2d_masked.cml'), checksum=False)
def test_count(self):
cube = tests.stock.simple_1d()
gt5 = cube.collapsed('foo', iris.analysis.COUNT, function=lambda val: val >= 5)
np.testing.assert_array_almost_equal(gt5.data, np.array([6]))
gt5.data = gt5.data.astype('i8')
self.assertCML(gt5, ('analysis', 'count_foo_1d.cml'), checksum=False)
def test_count_2d(self):
cube = tests.stock.simple_2d()
gt6 = cube.collapsed('foo', iris.analysis.COUNT, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([0, 2, 4], dtype=np.float32))
gt6.data = gt6.data.astype('i8')
self.assertCML(gt6, ('analysis', 'count_foo_2d.cml'), checksum=False)
gt6 = cube.collapsed('bar', iris.analysis.COUNT, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([1, 1, 2, 2], dtype=np.float32))
gt6.data = gt6.data.astype('i8')
self.assertCML(gt6, ('analysis', 'count_bar_2d.cml'), checksum=False)
gt6 = cube.collapsed(('foo', 'bar'), iris.analysis.COUNT, function=lambda val: val >= 6)
np.testing.assert_array_almost_equal(gt6.data, np.array([6], dtype=np.float32))
gt6.data = gt6.data.astype('i8')
self.assertCML(gt6, ('analysis', 'count_foo_bar_2d.cml'), checksum=False)
def test_weighted_sum_consistency(self):
# weighted sum with unit weights should be the same as a sum
cube = tests.stock.simple_1d()
normal_sum = cube.collapsed('foo', iris.analysis.SUM)
weights = np.ones_like(cube.data)
weighted_sum = cube.collapsed('foo', iris.analysis.SUM, weights=weights)
self.assertArrayAlmostEqual(normal_sum.data, weighted_sum.data)
def test_weighted_sum_1d(self):
# verify 1d weighted sum is correct
cube = tests.stock.simple_1d()
weights = np.array([.05, .05, .1, .1, .2, .3, .2, .1, .1, .05, .05])
result = cube.collapsed('foo', iris.analysis.SUM, weights=weights)
self.assertAlmostEqual(result.data, 6.5)
self.assertCML(result, ('analysis', 'sum_weighted_1d.cml'),
checksum=False)
def test_weighted_sum_2d(self):
# verify 2d weighted sum is correct
cube = tests.stock.simple_2d()
weights = np.array([.3, .4, .3])
weights = iris.util.broadcast_to_shape(weights, cube.shape, [0])
result = cube.collapsed('bar', iris.analysis.SUM, weights=weights)
self.assertArrayAlmostEqual(result.data, np.array([4., 5., 6., 7.]))
self.assertCML(result, ('analysis', 'sum_weighted_2d.cml'),
checksum=False)
def test_weighted_rms(self):
cube = tests.stock.simple_2d()
# modify cube data so that the results are nice numbers
cube.data = np.array([[4, 7, 10, 8],
[21, 30, 12, 24],
[14, 16, 20, 8]],
dtype=np.float64)
weights = np.array([[1, 4, 3, 2],
[6, 4.5, 1.5, 3],
[2, 1, 1.5, 0.5]],
dtype=np.float64)
expected_result = np.array([8.0, 24.0, 16.0])
result = cube.collapsed('foo', iris.analysis.RMS, weights=weights)
self.assertArrayAlmostEqual(result.data, expected_result)
self.assertCML(result, ('analysis', 'rms_weighted_2d.cml'),
checksum=False)
@tests.skip_data
class TestRotatedPole(tests.IrisTest):
def _check_both_conversions(self, cube, index):
rlons, rlats = iris.analysis.cartography.get_xy_grids(cube)
rcs = cube.coord_system('RotatedGeogCS')
x, y = iris.analysis.cartography.unrotate_pole(
rlons, rlats, rcs.grid_north_pole_longitude,
rcs.grid_north_pole_latitude)
self.assertDataAlmostEqual(x, ('analysis',
'rotated_pole.{}.x.json'.format(index)))
self.assertDataAlmostEqual(y, ('analysis',
'rotated_pole.{}.y.json'.format(index)))
self.assertDataAlmostEqual(rlons,
('analysis',
'rotated_pole.{}.rlon.json'.format(index)))
self.assertDataAlmostEqual(rlats,
('analysis',
'rotated_pole.{}.rlat.json'.format(index)))
def test_all(self):
path = tests.get_data_path(('PP', 'ukVorog', 'ukv_orog_refonly.pp'))
master_cube = iris.load_cube(path)
# Check overall behaviour.
cube = master_cube[::10, ::10]
self._check_both_conversions(cube, 0)
# Check numerical stability.
cube = master_cube[210:238, 424:450]
self._check_both_conversions(cube, 1)
def test_unrotate_nd(self):
rlons = np.array([[350., 352.], [350., 352.]])
rlats = np.array([[-5., -0.], [-4., -1.]])
resx, resy = iris.analysis.cartography.unrotate_pole(rlons, rlats,
178.0, 38.0)
# Solutions derived by proj4 direct.
solx = np.array([[-16.42176094, -14.85892262],
[-16.71055023, -14.58434624]])
soly = np.array([[ 46.00724251, 51.29188893],
[ 46.98728486, 50.30706042]])
self.assertArrayAlmostEqual(resx, solx)
self.assertArrayAlmostEqual(resy, soly)
def test_unrotate_1d(self):
rlons = np.array([350., 352., 354., 356.])
rlats = np.array([-5., -0., 5., 10.])
resx, resy = iris.analysis.cartography.unrotate_pole(
rlons.flatten(), rlats.flatten(), 178.0, 38.0)
# Solutions derived by proj4 direct.
solx = np.array([-16.42176094, -14.85892262,
-12.88946157, -10.35078336])
soly = np.array([46.00724251, 51.29188893,
56.55031485, 61.77015703])
self.assertArrayAlmostEqual(resx, solx)
self.assertArrayAlmostEqual(resy, soly)
def test_rotate_nd(self):
rlons = np.array([[350., 351.], [352., 353.]])
rlats = np.array([[10., 15.], [20., 25.]])
resx, resy = iris.analysis.cartography.rotate_pole(rlons, rlats,
20., 80.)
# Solutions derived by proj4 direct.
solx = np.array([[148.69672569, 149.24727087],
[149.79067025, 150.31754368]])
soly = np.array([[18.60905789, 23.67749384],
[28.74419024, 33.8087963 ]])
self.assertArrayAlmostEqual(resx, solx)
self.assertArrayAlmostEqual(resy, soly)
def test_rotate_1d(self):
rlons = np.array([350., 351., 352., 353.])
rlats = np.array([10., 15., 20., 25.])
resx, resy = iris.analysis.cartography.rotate_pole(rlons.flatten(),
rlats.flatten(), 20., 80.)
# Solutions derived by proj4 direct.
solx = np.array([148.69672569, 149.24727087,
149.79067025, 150.31754368])
soly = np.array([18.60905789, 23.67749384,
28.74419024, 33.8087963 ])
self.assertArrayAlmostEqual(resx, solx)
self.assertArrayAlmostEqual(resy, soly)
@tests.skip_data
class TestAreaWeights(tests.IrisTest):
def test_area_weights(self):
small_cube = iris.tests.stock.simple_pp()
# Get offset, subsampled region: small enough to test against literals
small_cube = small_cube[10:, 35:]
small_cube = small_cube[::8, ::8]
small_cube = small_cube[:5, :4]
# pre-check non-data properties
self.assertCML(small_cube, ('analysis', 'areaweights_original.cml'),
checksum=False)
# check area-weights values
small_cube.coord('latitude').guess_bounds()
small_cube.coord('longitude').guess_bounds()
area_weights = iris.analysis.cartography.area_weights(small_cube)
expected_results = np.array(
[[3.11955866e+12, 3.11956008e+12, 3.11955866e+12, 3.11956008e+12],
[5.21951065e+12, 5.21951303e+12, 5.21951065e+12, 5.21951303e+12],
[6.68991281e+12, 6.68991585e+12, 6.68991281e+12, 6.68991585e+12],
[7.35341305e+12, 7.35341640e+12, 7.35341305e+12, 7.35341640e+12],
[7.12998335e+12, 7.12998660e+12, 7.12998335e+12, 7.12998660e+12]],
dtype=np.float64)
self.assertArrayAllClose(area_weights, expected_results, rtol=1e-8)
# Check there was no residual change
small_cube.coord('latitude').bounds = None
small_cube.coord('longitude').bounds = None
self.assertCML(small_cube, ('analysis', 'areaweights_original.cml'),
checksum=False)
@tests.skip_data
class TestAreaWeightGeneration(tests.IrisTest):
def setUp(self):
self.cube = iris.tests.stock.realistic_4d()
def test_area_weights_std(self):
# weights for stock 4d data
weights = iris.analysis.cartography.area_weights(self.cube)
self.assertEqual(weights.shape, self.cube.shape)
def test_area_weights_order(self):
# weights for data with dimensions in a different order
order = [3, 2, 1, 0] # (lon, lat, level, time)
self.cube.transpose(order)
weights = iris.analysis.cartography.area_weights(self.cube)
self.assertEqual(weights.shape, self.cube.shape)
def test_area_weights_non_adjacent(self):
# weights for cube with non-adjacent latitude/longitude dimensions
order = [0, 3, 1, 2] # (time, lon, level, lat)
self.cube.transpose(order)
weights = iris.analysis.cartography.area_weights(self.cube)
self.assertEqual(weights.shape, self.cube.shape)
def test_area_weights_scalar_latitude(self):
# weights for cube with a scalar latitude dimension
cube = self.cube[:, :, 0, :]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_scalar_longitude(self):
# weights for cube with a scalar longitude dimension
cube = self.cube[:, :, :, 0]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_scalar(self):
# weights for cube with scalar latitude and longitude dimensions
cube = self.cube[:, :, 0, 0]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_singleton_latitude(self):
# singleton (1-point) latitude dimension
cube = self.cube[:, :, 0:1, :]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_singleton_longitude(self):
# singleton (1-point) longitude dimension
cube = self.cube[:, :, :, 0:1]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_singletons(self):
# singleton (1-point) latitude and longitude dimensions
cube = self.cube[:, :, 0:1, 0:1]
weights = iris.analysis.cartography.area_weights(cube)
self.assertEqual(weights.shape, cube.shape)
def test_area_weights_normalized(self):
# normalized area weights must sum to one over lat/lon dimensions.
weights = iris.analysis.cartography.area_weights(self.cube,
normalize=True)
sumweights = weights.sum(axis=3).sum(axis=2) # sum over lon and lat
self.assertArrayAlmostEqual(sumweights, 1)
def test_area_weights_non_contiguous(self):
# Slice the cube so that we have non-contiguous longitude
# bounds.
ind = (0, 1, 2, -3, -2, -1)
cube = self.cube[..., ind]
weights = iris.analysis.cartography.area_weights(cube)
expected = iris.analysis.cartography.area_weights(self.cube)[..., ind]
self.assertArrayEqual(weights, expected)
def test_area_weights_no_lon_bounds(self):
self.cube.coord('grid_longitude').bounds = None
with self.assertRaises(ValueError):
iris.analysis.cartography.area_weights(self.cube)
def test_area_weights_no_lat_bounds(self):
self.cube.coord('grid_latitude').bounds = None
with self.assertRaises(ValueError):
iris.analysis.cartography.area_weights(self.cube)
@tests.skip_data
class TestLatitudeWeightGeneration(tests.IrisTest):
def setUp(self):
path = iris.tests.get_data_path(['NetCDF', 'rotated', 'xyt',
'small_rotPole_precipitation.nc'])
self.cube = iris.load_cube(path)
self.cube_dim_lat = self.cube.copy()
self.cube_dim_lat.remove_coord('latitude')
self.cube_dim_lat.remove_coord('longitude')
# The 2d cubes are unrealistic, you would not want to weight by
# anything other than grid latitude in real-world scenarios. However,
# the technical details are suitable for testing purposes, providing
# a nice analog for a 2d latitude coordinate from a curvilinear grid.
self.cube_aux_lat = self.cube.copy()
self.cube_aux_lat.remove_coord('grid_latitude')
self.cube_aux_lat.remove_coord('grid_longitude')
self.lat1d = self.cube.coord('grid_latitude').points
self.lat2d = self.cube.coord('latitude').points
def test_cosine_latitude_weights_range(self):
# check the range of returned values, needs a cube that spans the full
# latitude range
lat_coord = iris.coords.DimCoord(np.linspace(-90, 90, 73),
standard_name='latitude',
units=cf_units.Unit('degrees_north'))
cube = iris.cube.Cube(np.ones([73], dtype=np.float64),
long_name='test_cube', units='1')
cube.add_dim_coord(lat_coord, 0)
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertTrue(weights.max() <= 1)
self.assertTrue(weights.min() >= 0)
def test_cosine_latitude_weights_0d(self):
# 0d latitude dimension (scalar coordinate)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat[:, 0, :])
self.assertEqual(weights.shape, self.cube_dim_lat[:, 0, :].shape)
self.assertAlmostEqual(weights[0, 0],
np.cos(np.deg2rad(self.lat1d[0])))
def test_cosine_latitude_weights_1d_singleton(self):
# singleton (1-point) 1d latitude coordinate (time, lat, lon)
cube = self.cube_dim_lat[:, 0:1, :]
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertEqual(weights.shape, cube.shape)
self.assertAlmostEqual(weights[0, 0, 0],
np.cos(np.deg2rad(self.lat1d[0])))
def test_cosine_latitude_weights_1d(self):
# 1d latitude coordinate (time, lat, lon)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat)
self.assertEqual(weights.shape, self.cube.shape)
self.assertArrayAlmostEqual(weights[0, :, 0],
np.cos(np.deg2rad(self.lat1d)))
def test_cosine_latitude_weights_1d_latitude_first(self):
# 1d latitude coordinate with latitude first (lat, time, lon)
order = [1, 0, 2] # (lat, time, lon)
self.cube_dim_lat.transpose(order)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat)
self.assertEqual(weights.shape, self.cube_dim_lat.shape)
self.assertArrayAlmostEqual(weights[:, 0, 0],
np.cos(np.deg2rad(self.lat1d)))
def test_cosine_latitude_weights_1d_latitude_last(self):
# 1d latitude coordinate with latitude last (time, lon, lat)
order = [0, 2, 1] # (time, lon, lat)
self.cube_dim_lat.transpose(order)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat)
self.assertEqual(weights.shape, self.cube_dim_lat.shape)
self.assertArrayAlmostEqual(weights[0, 0, :],
np.cos(np.deg2rad(self.lat1d)))
def test_cosine_latitude_weights_2d_singleton1(self):
# 2d latitude coordinate with first dimension singleton
cube = self.cube_aux_lat[:, 0:1, :]
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertEqual(weights.shape, cube.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d[0:1, :])))
def test_cosine_latitude_weights_2d_singleton2(self):
# 2d latitude coordinate with second dimension singleton
cube = self.cube_aux_lat[:, :, 0:1]
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertEqual(weights.shape, cube.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d[:, 0:1])))
def test_cosine_latitude_weights_2d_singleton3(self):
# 2d latitude coordinate with both dimensions singleton
cube = self.cube_aux_lat[:, 0:1, 0:1]
weights = iris.analysis.cartography.cosine_latitude_weights(cube)
self.assertEqual(weights.shape, cube.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d[0:1, 0:1])))
def test_cosine_latitude_weights_2d(self):
# 2d latitude coordinate (time, lat, lon)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_aux_lat)
self.assertEqual(weights.shape, self.cube_aux_lat.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d)))
def test_cosine_latitude_weights_2d_latitude_first(self):
# 2d latitude coordinate with latitude first (lat, time, lon)
order = [1, 0, 2] # (lat, time, lon)
self.cube_aux_lat.transpose(order)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_aux_lat)
self.assertEqual(weights.shape, self.cube_aux_lat.shape)
self.assertArrayAlmostEqual(weights[:, 0, :],
np.cos(np.deg2rad(self.lat2d)))
def test_cosine_latitude_weights_2d_latitude_last(self):
# 2d latitude coordinate with latitude last (time, lon, lat)
order = [0, 2, 1] # (time, lon, lat)
self.cube_aux_lat.transpose(order)
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_aux_lat)
self.assertEqual(weights.shape, self.cube_aux_lat.shape)
self.assertArrayAlmostEqual(weights[0, :, :],
np.cos(np.deg2rad(self.lat2d.T)))
def test_cosine_latitude_weights_no_latitude(self):
# no coordinate identified as latitude
self.cube_dim_lat.remove_coord('grid_latitude')
with self.assertRaises(ValueError):
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube_dim_lat)
def test_cosine_latitude_weights_multiple_latitude(self):
# two coordinates identified as latitude
with self.assertRaises(ValueError):
weights = iris.analysis.cartography.cosine_latitude_weights(
self.cube)
class TestRollingWindow(tests.IrisTest):
def setUp(self):
# XXX Comes from test_aggregated_by
cube = iris.cube.Cube(np.array([[6, 10, 12, 18], [8, 12, 14, 20], [18, 12, 10, 6]]), long_name='temperature', units='kelvin')
cube.add_dim_coord(iris.coords.DimCoord(np.array([0, 5, 10], dtype=np.float64), 'latitude', units='degrees'), 0)
cube.add_dim_coord(iris.coords.DimCoord(np.array([0, 2, 4, 6], dtype=np.float64), 'longitude', units='degrees'), 1)
self.cube = cube
def test_non_mean_operator(self):
res_cube = self.cube.rolling_window('longitude', iris.analysis.MAX, window=2)
expected_result = np.array([[10, 12, 18],
[12, 14, 20],
[18, 12, 10]], dtype=np.float64)
self.assertArrayEqual(expected_result, res_cube.data)
def test_longitude_simple(self):
res_cube = self.cube.rolling_window('longitude', iris.analysis.MEAN, window=2)
expected_result = np.array([[ 8., 11., 15.],
[ 10., 13., 17.],
[ 15., 11., 8.]], dtype=np.float64)
self.assertArrayEqual(expected_result, res_cube.data)
self.assertCML(res_cube, ('analysis', 'rolling_window', 'simple_longitude.cml'))
self.assertRaises(ValueError, self.cube.rolling_window, 'longitude', iris.analysis.MEAN, window=0)
def test_longitude_masked(self):
self.cube.data = ma.array(self.cube.data,
mask=[[True, True, True, True],
[True, False, True, True],
[False, False, False, False]])
res_cube = self.cube.rolling_window('longitude',
iris.analysis.MEAN,
window=2)
expected_result = np.ma.array([[-99., -99., -99.],
[12., 12., -99.],
[15., 11., 8.]],
mask=[[True, True, True],
[False, False, True],
[False, False, False]],
dtype=np.float64)
self.assertMaskedArrayEqual(expected_result, res_cube.data)
def test_longitude_circular(self):
cube = self.cube
cube.coord('longitude').circular = True
self.assertRaises(iris.exceptions.NotYetImplementedError, self.cube.rolling_window, 'longitude', iris.analysis.MEAN, window=0)
def test_different_length_windows(self):
res_cube = self.cube.rolling_window('longitude', iris.analysis.MEAN, window=4)
expected_result = np.array([[ 11.5],
[ 13.5],
[ 11.5]], dtype=np.float64)
self.assertArrayEqual(expected_result, res_cube.data)
self.assertCML(res_cube, ('analysis', 'rolling_window', 'size_4_longitude.cml'))
# Window too long:
self.assertRaises(ValueError, self.cube.rolling_window, 'longitude', iris.analysis.MEAN, window=6)
# Window too small:
self.assertRaises(ValueError, self.cube.rolling_window, 'longitude', iris.analysis.MEAN, window=0)
def test_bad_coordinate(self):
self.assertRaises(KeyError, self.cube.rolling_window, 'wibble', iris.analysis.MEAN, window=0)
def test_latitude_simple(self):
res_cube = self.cube.rolling_window('latitude', iris.analysis.MEAN, window=2)
expected_result = np.array([[ 7., 11., 13., 19.],
[ 13., 12., 12., 13.]], dtype=np.float64)
self.assertArrayEqual(expected_result, res_cube.data)
self.assertCML(res_cube, ('analysis', 'rolling_window', 'simple_latitude.cml'))
def test_mean_with_weights_consistency(self):
# equal weights should be the same as the mean with no weights
wts = np.array([0.5, 0.5], dtype=np.float64)
res_cube = self.cube.rolling_window('longitude',
iris.analysis.MEAN,
window=2,
weights=wts)
expected_result = self.cube.rolling_window('longitude',
iris.analysis.MEAN,
window=2)
self.assertArrayEqual(expected_result.data, res_cube.data)
def test_mean_with_weights(self):
# rolling window mean with weights
wts = np.array([0.1, 0.6, 0.3], dtype=np.float64)
res_cube = self.cube.rolling_window('longitude',
iris.analysis.MEAN,
window=3,
weights=wts)
expected_result = np.array([[10.2, 13.6],
[12.2, 15.6],
[12.0, 9.0]], dtype=np.float64)
# use almost equal to compare floats
self.assertArrayAlmostEqual(expected_result, res_cube.data)
if __name__ == "__main__":
tests.main()
|
QuLogic/iris
|
lib/iris/tests/test_analysis.py
|
Python
|
gpl-3.0
| 50,394
|
[
"NetCDF"
] |
a0b0626648f826d3ec06d17fd3c9a994451243c319c6c35eb79d70d02a5bf272
|
# minimal.py ---
# Upi Bhalla, NCBS Bangalore 2013.
#
# Commentary:
#
# Testing system for loading in arbitrary multiscale models based on
# model definition files.
# This version has a minimal model with Ca in all 3 compartments,
# and CaM (just one step) in SPINE and PSD, with a nominal product Ca.CaM.
# In this diffusionOnly version there is no reaction. To be more precise,
# the loaded in minimal.g model has the Ca-binding reaction, but it is
# deleted when the file is loaded in.
# Incoming Ca from synaptic events comes to the PSD.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
# Code:
import sys
sys.path.append('../../python')
import os
os.environ['NUMPTHREADS'] = '1'
import math
import moose
import proto18
EREST_ACT = -70e-3
def loadElec():
library = moose.Neutral( '/library' )
moose.setCwe( '/library' )
proto18.make_Ca()
proto18.make_Ca_conc()
proto18.make_K_AHP()
proto18.make_K_C()
proto18.make_Na()
proto18.make_K_DR()
proto18.make_K_A()
proto18.make_glu()
proto18.make_NMDA()
proto18.make_Ca_NMDA()
proto18.make_NMDA_Ca_conc()
proto18.make_axon()
model = moose.Neutral( '/model' )
cellId = moose.loadModel( 'ca1_asym.p', '/model/elec', "hsolve" )
return cellId
def addPlot( objpath, field, plot ):
#assert moose.exists( objpath )
if ( moose.exists( objpath ) ):
tab = moose.Table( '/graphs/' + plot )
obj = moose.element( objpath )
moose.connect( tab, 'requestOut', obj, field )
def dumpPlots( fname ):
if ( os.path.exists( fname ) ):
os.remove( fname )
for x in moose.wildcardFind( '/graphs/#[ISA=Table]' ):
moose.element( x[0] ).xplot( fname, x[0].name )
for x in moose.wildcardFind( '/graphs/elec/#[ISA=Table]' ):
moose.element( x[0] ).xplot( fname, x[0].name + '_e' )
def moveCompt( path, oldParent, newParent ):
meshEntries = moose.element( newParent.path + '/mesh' )
# Set up vol messaging from new compts to all their child objects.
for x in moose.wildcardFind( path + '/##[ISA=PoolBase]' ):
moose.connect( meshEntries, 'mesh', x, 'mesh', 'OneToOne' )
#print 'path = ', path, ', oldparent = ', oldParent.path
orig = moose.element( path )
moose.move( orig, newParent )
moose.delete( moose.vec( oldParent.path ) )
chem = moose.element( '/model/chem' )
moose.move( newParent, chem )
def loadChem( neuroCompt, spineCompt, psdCompt ):
# We need the compartments to come in with a volume of 1 to match the
# original CubeMesh.
assert( neuroCompt.volume == 1.0 )
assert( spineCompt.volume == 1.0 )
assert( psdCompt.volume == 1.0 )
assert( neuroCompt.mesh.num == 1 )
print 'volume = ', neuroCompt.mesh[0].volume
#assert( neuroCompt.mesh[0].volume == 1.0 )
#an unfortunate mismatch
# So we'll have to resize the volumes of the current compartments to the
# new ones.
modelId = moose.loadModel( 'diffonly.g', '/model', 'ee' )
#moose.le( '/model/model' )
#moose.le( '/model/model/kinetics' )
#moose.le( '/model/model/kinetics/PSD' )
#moose.le( '/model/model/kinetics/SPINE' )
moose.delete( moose.vec( '/model/model/kinetics/PSD/kreac' ) )
moose.delete( moose.vec( '/model/model/kinetics/SPINE/kreac' ) )
#moose.le( '/model/model/kinetics/PSD' )
#moose.le( '/model/model/kinetics/SPINE' )
pCaCaM = moose.element( '/model/model/kinetics/PSD/Ca_CaM' )
pCaCaM.concInit = 0.001
dCaCaM = moose.element( '/model/model/kinetics/PSD/Ca_CaM' )
sCaCaM = moose.element( '/model/model/kinetics/SPINE/Ca_CaM' )
print "CaCaM.concInit[p,s,d] = ", pCaCaM.concInit, sCaCaM.concInit, dCaCaM.concInit
#moose.delete( moose.vec( '/model/model/kinetics/SPINE/Ca_CaM' ) )
#CaCaM2 = moose.element( '/model/model/kinetics/SPINE/Ca_CaM' )
#CaCaM2.concInit = 0.001
chem = moose.element( '/model/model' )
chem.name = 'chem'
oldS = moose.element( '/model/chem/compartment_1' )
oldP = moose.element( '/model/chem/compartment_2' )
oldN = moose.element( '/model/chem/kinetics' )
print 'oldvols[p,s,d] = ', oldP.volume, oldS.volume, oldN.volume
print 'newvols[p,s,d] = ', psdCompt.mesh[0].volume, spineCompt.mesh[0].volume, neuroCompt.mesh[0].volume
oldN.volume = neuroCompt.mesh[0].volume
oldS.volume = spineCompt.mesh[0].volume
oldP.volume = psdCompt.mesh[0].volume
print 'after redoing vols'
print "CaCaM.concInit[p,s,d] = ", pCaCaM.concInit, sCaCaM.concInit, dCaCaM.concInit
moveCompt( '/model/chem/kinetics/SPINE', oldS, spineCompt )
moveCompt( '/model/chem/kinetics/PSD', oldP, psdCompt )
# Need to do the DEND last because the oldN is /kinetics,
# and it will be deleted.
moveCompt( '/model/chem/kinetics/DEND', oldN, neuroCompt )
print 'after moving to new compts'
print "CaCaM.concInit[p,s,d] = ", pCaCaM.concInit, sCaCaM.concInit, dCaCaM.concInit
def makeNeuroMeshModel():
diffLength = 20e-6 # But we only want diffusion over part of the model.
numSyn = 13
elec = loadElec()
synInput = moose.SpikeGen( '/model/elec/synInput' )
synInput.refractT = 47e-3
synInput.threshold = -1.0
synInput.edgeTriggered = 0
synInput.Vm( 0 )
synInput.refractT = 47e-3
for i in range( numSyn ):
name = '/model/elec/spine_head_14_' + str( i + 1 )
r = moose.element( name + '/glu' )
r.synapse.num = 1
syn = moose.element( r.path + '/synapse' )
moose.connect( synInput, 'spikeOut', syn, 'addSpike', 'Single' )
syn.weight = 0.2 * i * ( numSyn - 1 - i )
syn.delay = i * 1.0e-3
neuroCompt = moose.NeuroMesh( '/model/neuroMesh' )
#print 'neuroMeshvolume = ', neuroCompt.mesh[0].volume
neuroCompt.separateSpines = 1
neuroCompt.diffLength = diffLength
neuroCompt.geometryPolicy = 'cylinder'
spineCompt = moose.SpineMesh( '/model/spineMesh' )
#print 'spineMeshvolume = ', spineCompt.mesh[0].volume
moose.connect( neuroCompt, 'spineListOut', spineCompt, 'spineList', 'OneToOne' )
psdCompt = moose.PsdMesh( '/model/psdMesh' )
#print 'psdMeshvolume = ', psdCompt.mesh[0].volume
moose.connect( neuroCompt, 'psdListOut', psdCompt, 'psdList', 'OneToOne' )
loadChem( neuroCompt, spineCompt, psdCompt )
# Put in the solvers, see how they fare.
nmksolve = moose.GslStoich( '/model/chem/neuroMesh/ksolve' )
nmksolve.path = '/model/chem/neuroMesh/##'
nmksolve.compartment = moose.element( '/model/chem/neuroMesh' )
nmksolve.method = 'rk5'
nm = moose.element( '/model/chem/neuroMesh/mesh' )
moose.connect( nm, 'remesh', nmksolve, 'remesh' )
#print "neuron: nv=", nmksolve.numLocalVoxels, ", nav=", nmksolve.numAllVoxels, nmksolve.numVarPools, nmksolve.numAllPools
#print 'setting up smksolve'
smksolve = moose.GslStoich( '/model/chem/spineMesh/ksolve' )
smksolve.path = '/model/chem/spineMesh/##'
smksolve.compartment = moose.element( '/model/chem/spineMesh' )
smksolve.method = 'rk5'
sm = moose.element( '/model/chem/spineMesh/mesh' )
moose.connect( sm, 'remesh', smksolve, 'remesh' )
#print "spine: nv=", smksolve.numLocalVoxels, ", nav=", smksolve.numAllVoxels, smksolve.numVarPools, smksolve.numAllPools
#
#print 'setting up pmksolve'
pmksolve = moose.GslStoich( '/model/chem/psdMesh/ksolve' )
pmksolve.path = '/model/chem/psdMesh/##'
pmksolve.compartment = moose.element( '/model/chem/psdMesh' )
pmksolve.method = 'rk5'
pm = moose.element( '/model/chem/psdMesh/mesh' )
moose.connect( pm, 'remesh', pmksolve, 'remesh' )
#print "psd: nv=", pmksolve.numLocalVoxels, ", nav=", pmksolve.numAllVoxels, pmksolve.numVarPools, pmksolve.numAllPools
#
print 'neuroMeshvolume = ', neuroCompt.mesh[0].volume
#print 'Assigning the cell model'
# Now to set up the model.
#neuroCompt.cell = elec
neuroCompt.cellPortion( elec, '/model/elec/lat_14_#,/model/elec/spine_neck#,/model/elec/spine_head#' )
"""
ns = neuroCompt.numSegments
#assert( ns == 11 ) # dend, 5x (shaft+head)
ndc = neuroCompt.numDiffCompts
#print 'numDiffCompts = ', ndc
assert( ndc == 145 )
ndc = neuroCompt.mesh.num
#print 'NeuroMeshNum = ', ndc
assert( ndc == 145 )
sdc = spineCompt.mesh.num
#print 'SpineMeshNum = ', sdc
assert( sdc == 13 )
pdc = psdCompt.mesh.num
#print 'PsdMeshNum = ', pdc
assert( pdc == 13 )
"""
mesh = moose.vec( '/model/chem/neuroMesh/mesh' )
#for i in range( ndc ):
# print 's[', i, '] = ', mesh[i].volume
mesh2 = moose.vec( '/model/chem/spineMesh/mesh' )
# for i in range( sdc ):
# print 's[', i, '] = ', mesh2[i].volume
#print 'numPSD = ', moose.element( '/model/chem/psdMesh/mesh' ).localNumField
mesh = moose.vec( '/model/chem/psdMesh/mesh' )
#print 'psd mesh.volume = ', mesh.volume
#for i in range( pdc ):
# print 's[', i, '] = ', mesh[i].volume
#
# We need to use the spine solver as the master for the purposes of
# these calculations. This will handle the diffusion calculations
# between head and dendrite, and between head and PSD.
smksolve.addJunction( nmksolve )
#print "spine: nv=", smksolve.numLocalVoxels, ", nav=", smksolve.numAllVoxels, smksolve.numVarPools, smksolve.numAllPools
smksolve.addJunction( pmksolve )
#print "psd: nv=", pmksolve.numLocalVoxels, ", nav=", pmksolve.numAllVoxels, pmksolve.numVarPools, pmksolve.numAllPools
ndc = neuroCompt.numDiffCompts
#print 'numDiffCompts = ', ndc
assert( ndc == 13 )
ndc = neuroCompt.mesh.num
#print 'NeuroMeshNum = ', ndc
assert( ndc == 13 )
sdc = spineCompt.mesh.num
#print 'SpineMeshNum = ', sdc
assert( sdc == 13 )
pdc = psdCompt.mesh.num
#print 'PsdMeshNum = ', pdc
assert( pdc == 13 )
"""
print 'neuroCompt'
for i in range( ndc ):
print i, neuroCompt.stencilIndex[i]
print i, neuroCompt.stencilRate[i]
print 'spineCompt'
for i in range( sdc * 3 ):
print i, spineCompt.stencilIndex[i]
print i, spineCompt.stencilRate[i]
print 'psdCompt'
for i in range( pdc ):
print i, psdCompt.stencilIndex[i]
print i, psdCompt.stencilRate[i]
print 'Spine parents:'
pavoxel = spineCompt.parentVoxel
for i in range( sdc ):
print i, pavoxel[i]
"""
# oddly, numLocalFields does not work.
#moose.le( '/model/chem/neuroMesh' )
ca = moose.element( '/model/chem/neuroMesh/DEND/Ca' )
assert( ca.lastDimension == ndc )
"""
CaNpsd = moose.vec( '/model/chem/psdMesh/PSD/PP1_PSD/CaN' )
print 'numCaN in PSD = ', CaNpsd.nInit, ', vol = ', CaNpsd.volume
CaNspine = moose.vec( '/model/chem/spineMesh/SPINE/CaN_BULK/CaN' )
print 'numCaN in spine = ', CaNspine.nInit, ', vol = ', CaNspine.volume
"""
# set up adaptors
aCa = moose.Adaptor( '/model/chem/psdMesh/adaptCa', pdc )
adaptCa = moose.vec( '/model/chem/psdMesh/adaptCa' )
chemCa = moose.vec( '/model/chem/psdMesh/PSD/Ca' )
assert( len( adaptCa ) == pdc )
assert( len( chemCa ) == pdc )
for i in range( pdc ):
path = '/model/elec/spine_head_14_' + str( i + 1 ) + '/NMDA_Ca_conc'
elecCa = moose.element( path )
moose.connect( elecCa, 'concOut', adaptCa[i], 'input', 'Single' )
moose.connect( adaptCa, 'outputSrc', chemCa, 'setConc', 'OneToOne' )
adaptCa.inputOffset = 0.0 #
adaptCa.outputOffset = 80e-6 # 80 nM offset in chem.
adaptCa.scale = 1e-5 # 520 to 0.0052 mM
#print adaptCa.outputOffset
#print adaptCa.scale
def makeElecPlots():
graphs = moose.Neutral( '/graphs' )
elec = moose.Neutral( '/graphs/elec' )
addPlot( '/model/elec/soma', 'getVm', 'elec/somaVm' )
addPlot( '/model/elec/soma/Ca_conc', 'getCa', 'elec/somaCa' )
addPlot( '/model/elec/basal_3', 'getVm', 'elec/basal3Vm' )
addPlot( '/model/elec/apical_14', 'getVm', 'elec/apical_14Vm' )
addPlot( '/model/elec/apical_14/Ca_conc', 'getCa', 'elec/apical_14Ca' )
addPlot( '/model/elec/spine_head_14_7', 'getVm', 'elec/spine_7Vm' )
addPlot( '/model/elec/spine_head_14_7/NMDA_Ca_conc', 'getCa', 'elec/spine_7Ca' )
addPlot( '/model/elec/spine_head_14_13/NMDA_Ca_conc', 'getCa', 'elec/spine_13Ca' )
def makeChemPlots():
spineMesh = moose.element( '/model/chem/spineMesh' )
middleSpine = 6
midSpineVoxel = spineMesh.parentVoxel[middleSpine]
graphs = moose.Neutral( '/graphs' )
addPlot( '/model/chem/psdMesh/PSD/Ca[0]', 'getConc', 'pCa0' )
addPlot( '/model/chem/psdMesh/PSD/Ca[6]', 'getConc', 'pCa6' )
addPlot( '/model/chem/psdMesh/PSD/Ca[12]', 'getConc', 'pCa12' )
addPlot( '/model/chem/spineMesh/SPINE/Ca[0]', 'getConc', 'sCa0' )
addPlot( '/model/chem/spineMesh/SPINE/Ca[6]', 'getConc', 'sCa6' )
addPlot( '/model/chem/spineMesh/SPINE/Ca[12]', 'getConc', 'sCa12' )
addPlot( '/model/chem/neuroMesh/DEND/Ca[0]', 'getConc', 'dend0Ca' )
addPlot( '/model/chem/neuroMesh/DEND/Ca[' + str( midSpineVoxel ) + ']', 'getConc', 'dendMidCa' )
addPlot( '/model/chem/neuroMesh/DEND/Ca[' + str( midSpineVoxel+2 ) + ']', 'getConc', 'dendMid2' )
addPlot( '/model/chem/neuroMesh/DEND/Ca[' + str( midSpineVoxel+4 ) + ']', 'getConc', 'dendMid4' )
addPlot( '/model/chem/neuroMesh/DEND/Ca[' + str( midSpineVoxel+6 ) + ']', 'getConc', 'dendMid6' )
addPlot( '/model/chem/neuroMesh/DEND/Ca[144]', 'getConc', 'dend144Ca' )
addPlot( '/model/chem/psdMesh/PSD/CaM[0]', 'getConc', 'pCaM0' )
addPlot( '/model/chem/psdMesh/PSD/CaM[6]', 'getConc', 'pCaM6' )
addPlot( '/model/chem/psdMesh/PSD/CaM[12]', 'getConc', 'pCaM12' )
addPlot( '/model/chem/spineMesh/SPINE/CaM[0]', 'getConc', 'sCaM0' )
addPlot( '/model/chem/spineMesh/SPINE/CaM[6]', 'getConc', 'sCaM6' )
addPlot( '/model/chem/spineMesh/SPINE/CaM[12]', 'getConc', 'sCaM12' )
addPlot( '/model/chem/psdMesh/PSD/Ca_CaM[0]', 'getConc', 'pCaCaM0' )
addPlot( '/model/chem/psdMesh/PSD/Ca_CaM[6]', 'getConc', 'pCaCaM6' )
addPlot( '/model/chem/psdMesh/PSD/Ca_CaM[12]', 'getConc', 'pCaCaM12' )
addPlot( '/model/chem/spineMesh/SPINE/Ca_CaM[0]', 'getConc', 'sCaCaM0' )
addPlot( '/model/chem/spineMesh/SPINE/Ca_CaM[6]', 'getConc', 'sCaCaM6' )
addPlot( '/model/chem/spineMesh/SPINE/Ca_CaM[12]', 'getConc', 'sCaCaM12' )
addPlot( '/model/chem/neuroMesh/DEND/Ca_CaM[0]', 'getConc', 'dCaCaM0' )
addPlot( '/model/chem/neuroMesh/DEND/Ca_CaM[' + str( midSpineVoxel ) + ']', 'getConc', 'dendMidCaCaM' )
addPlot( '/model/chem/neuroMesh/DEND/Ca_CaM[144]', 'getConc', 'dCaCaM144' )
def testNeuroMeshMultiscale():
elecDt = 50e-6
chemDt = 1e-4
plotDt = 5e-4
plotName = 'diffonly.plot'
makeNeuroMeshModel()
"""
for i in moose.wildcardFind( '/model/chem/##[ISA=PoolBase]' ):
if ( i[0].diffConst > 0 ):
grandpaname = i.parent[0].parent.name + '/'
paname = i.parent[0].name + '/'
print grandpaname + paname + i[0].name, i[0].diffConst
moose.le( '/model/chem/spineMesh/ksolve' )
print 'Neighbors:'
for t in moose.element( '/model/chem/spineMesh/ksolve/junction' ).neighbors['masterJunction']:
print 'masterJunction <-', t.path
for t in moose.wildcardFind( '/model/chem/#Mesh/ksolve' ):
k = moose.element( t[0] )
print k.path + ' localVoxels=', k.numLocalVoxels, ', allVoxels= ', k.numAllVoxels
"""
makeChemPlots()
makeElecPlots()
moose.setClock( 0, elecDt )
moose.setClock( 1, elecDt )
moose.setClock( 2, elecDt )
moose.setClock( 5, chemDt )
moose.setClock( 6, chemDt )
moose.setClock( 7, plotDt )
moose.setClock( 8, plotDt )
moose.useClock( 0, '/model/elec/##[ISA=Compartment]', 'init' )
moose.useClock( 1, '/model/elec/##[ISA=SpikeGen]', 'process' )
moose.useClock( 2, '/model/elec/##[ISA=ChanBase],/model/##[ISA=SynBase],/model/##[ISA=CaConc]','process')
moose.useClock( 5, '/model/chem/##[ISA=PoolBase],/model/##[ISA=ReacBase],/model/##[ISA=EnzBase]', 'process' )
moose.useClock( 6, '/model/chem/##[ISA=Adaptor]', 'process' )
moose.useClock( 7, '/graphs/#', 'process' )
moose.useClock( 8, '/graphs/elec/#', 'process' )
moose.useClock( 5, '/model/chem/#Mesh/ksolve', 'init' )
moose.useClock( 6, '/model/chem/#Mesh/ksolve', 'process' )
hsolve = moose.HSolve( '/model/elec/hsolve' )
moose.useClock( 1, '/model/elec/hsolve', 'process' )
hsolve.dt = elecDt
hsolve.target = '/model/elec/compt'
moose.reinit()
moose.reinit()
"""
print 'pre'
eca = moose.vec( '/model/chem/psdMesh/PSD/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'dend'
eca = moose.vec( '/model/chem/neuroMesh/DEND/Ca' )
for i in ( 0, 1, 2, 30, 60, 90, 120, 144 ):
print i, eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'PSD'
eca = moose.vec( '/model/chem/psdMesh/PSD/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
print 'spine'
eca = moose.vec( '/model/chem/spineMesh/SPINE/CaM/Ca' )
for i in range( 3 ):
print eca[i].concInit, eca[i].conc, eca[i].nInit, eca[i].n, eca[i].volume
"""
moose.start( 0.5 )
dumpPlots( plotName )
print 'All done'
def main():
testNeuroMeshMultiscale()
if __name__ == '__main__':
main()
#
# loadMulti.py ends here.
|
dilawar/moose-full
|
moose-examples/snippets/MULTI/diffusionOnly.py
|
Python
|
gpl-2.0
| 17,109
|
[
"MOOSE",
"NEURON"
] |
690e53b178d491f852f16981cfdb5ff38f29530554c6dc391ff51271eeaffdad
|
#!/usr/bin/env python
"""
Program to show the monte carlo error estimate of the polarization correction
operation, assuming a value and uncertainty for polarizer and flipper
efficiencies.
"""
n = 100000
err = 5
Ic = 100000
doplot = False
import numpy,math,pylab
import reflectometry.reduction as reflred
eff = reflred.PolarizationEfficiency()
# Seed polarizer/flipper efficiencies from a gaussian distribution
eff.ff = numpy.random.normal(0.95,0.01*err,n)
eff.fp = numpy.random.normal(0.90,0.01*err,n)
eff.rf = numpy.random.normal(0.95,0.01*err,n)
eff.rp = numpy.random.normal(0.90,0.01*err,n)
eff.Ic = numpy.random.normal(Ic,numpy.sqrt(Ic),n)
data = reflred.PolarizedData()
for V,v in [(data.pp,Ic), (data.pm,Ic/5), (data.mp,Ic/5), (data.mm,Ic)]:
V.v = numpy.ones(n)*v
V.variance = V.v # Variance is poisson variance
V.v = numpy.random.normal(V.v,V.dv) # Randomize inputs
eff(data) # Apply polarization efficiency correction to data
for plt,d,label,E in [(221,data.pp,'++',Ic),
(222,data.pm,'+-',Ic/5),
(223,data.mp,'-+',Ic/5),
(224,data.mm,'--',Ic)]:
if doplot:
pylab.subplot(plt)
pylab.hist(d.v)
pylab.legend(['%s %0.2f (%0.2f)'%(label,pylab.mean(d.v),pylab.std(d.v))])
print "%s measurement uncertainty %.2f, corrected uncertainty %.3f, value %.3f"\
%(label,math.sqrt(E),pylab.std(d.v),numpy.mean(d.v))
if doplot: pylab.show()
|
reflectometry/osrefl
|
osrefl/loaders/reduction/examples/polcorerr.py
|
Python
|
bsd-3-clause
| 1,461
|
[
"Gaussian"
] |
9f85f9e91a30514513f5e42b4b3f89bc2906b77368178855220e849236662c7c
|
<<<<<<< HEAD
<<<<<<< HEAD
import base64
import re
import os
import sys
import urllib.request, urllib.parse, urllib.error
PY3 = sys.version_info[0] == 3
if PY3:
from io import StringIO
str = str
else:
from io import StringIO
try:
import json
except ImportError:
import simplejson as json
from .rest import ErrorResponse, RESTClient, params_to_urlencoded
from .session import BaseSession, DropboxSession, DropboxOAuth2Session
def format_path(path):
"""Normalize path for use with the Dropbox API.
This function turns multiple adjacent slashes into single
slashes, then ensures that there's a leading slash but
not a trailing slash.
"""
if not path:
return path
path = re.sub(r'/+', '/', path)
if path == '/':
return ("" if isinstance(path, str) else "")
else:
return '/' + path.strip('/')
class DropboxClient(object):
"""
This class lets you make Dropbox API calls. You'll need to obtain an
OAuth 2 access token first. You can get an access token using either
:class:`DropboxOAuth2Flow` or :class:`DropboxOAuth2FlowNoRedirect`.
All of the API call methods can raise a :class:`dropbox.rest.ErrorResponse` exception if
the server returns a non-200 or invalid HTTP response. Note that a 401
return status at any point indicates that the access token you're using
is no longer valid and the user must be put through the OAuth 2
authorization flow again.
"""
def __init__(self, oauth2_access_token, locale=None, rest_client=None):
"""Construct a ``DropboxClient`` instance.
Parameters
oauth2_access_token
An OAuth 2 access token (string). For backwards compatibility this may
also be a DropboxSession object (see :meth:`create_oauth2_access_token()`).
locale
The locale of the user of your application. For example "en" or "en_US".
Some API calls return localized data and error messages; this setting
tells the server which locale to use. By default, the server uses "en_US".
rest_client
Optional :class:`dropbox.rest.RESTClient`-like object to use for making
requests.
"""
if rest_client is None: rest_client = RESTClient
if isinstance(oauth2_access_token, str):
if not _OAUTH2_ACCESS_TOKEN_PATTERN.match(oauth2_access_token):
raise ValueError("invalid format for oauth2_access_token: %r"
% (oauth2_access_token,))
self.session = DropboxOAuth2Session(oauth2_access_token, locale)
elif isinstance(oauth2_access_token, DropboxSession):
# Backwards compatibility with OAuth 1
if locale is not None:
raise ValueError("The 'locale' parameter to DropboxClient is only useful "
"when also passing in an OAuth 2 access token")
self.session = oauth2_access_token
else:
raise ValueError("'oauth2_access_token' must either be a string or a DropboxSession")
self.rest_client = rest_client
def request(self, target, params=None, method='POST',
content_server=False, notification_server=False):
"""
An internal method that builds the url, headers, and params for a Dropbox API request.
It is exposed if you need to make API calls not implemented in this library or if you
need to debug requests.
Parameters
target
The target URL with leading slash (e.g. '/files').
params
A dictionary of parameters to add to the request.
method
An HTTP method (e.g. 'GET' or 'POST').
content_server
A boolean indicating whether the request is to the
API content server, for example to fetch the contents of a file
rather than its metadata.
notification_server
A boolean indicating whether the request is to the API notification
server, for example for longpolling.
Returns
A tuple of ``(url, params, headers)`` that should be used to make the request.
OAuth will be added as needed within these fields.
"""
assert method in ['GET','POST', 'PUT'], "Only 'GET', 'POST', and 'PUT' are allowed."
assert not (content_server and notification_server), \
"Cannot construct request simultaneously for content and notification servers."
if params is None:
params = {}
if content_server:
host = self.session.API_CONTENT_HOST
elif notification_server:
host = self.session.API_NOTIFICATION_HOST
else:
host = self.session.API_HOST
base = self.session.build_url(host, target)
headers, params = self.session.build_access_headers(method, base, params)
if method in ('GET', 'PUT'):
url = self.session.build_url(host, target, params)
else:
url = self.session.build_url(host, target)
return url, params, headers
def account_info(self):
"""Retrieve information about the user's account.
Returns
A dictionary containing account information.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#account-info
"""
url, params, headers = self.request("/account/info", method='GET')
return self.rest_client.GET(url, headers)
def disable_access_token(self):
"""
Disable the access token that this ``DropboxClient`` is using. If this call
succeeds, further API calls using this object will fail.
"""
url, params, headers = self.request("/disable_access_token", method='POST')
return self.rest_client.POST(url, params, headers)
def create_oauth2_access_token(self):
"""
If this ``DropboxClient`` was created with an OAuth 1 access token, this method
can be used to create an equivalent OAuth 2 access token. This can be used to
upgrade your app's existing access tokens from OAuth 1 to OAuth 2.
Example::
from dropbox.client import DropboxClient
from dropbox.session import DropboxSession
session = DropboxSession(APP_KEY, APP_SECRET)
access_key, access_secret = '123abc', 'xyz456' # Previously obtained OAuth 1 credentials
session.set_token(access_key, access_secret)
client = DropboxClient(session)
token = client.create_oauth2_access_token()
# Optionally, create a new client using the new token
new_client = DropboxClient(token)
"""
if not isinstance(self.session, DropboxSession):
raise ValueError("This call requires a DropboxClient that is configured with an "
"OAuth 1 access token.")
url, params, headers = self.request("/oauth2/token_from_oauth1", method='POST')
r = self.rest_client.POST(url, params, headers)
return r['access_token']
def get_chunked_uploader(self, file_obj, length):
"""Creates a :class:`ChunkedUploader` to upload the given file-like object.
Parameters
file_obj
The file-like object which is the source of the data
being uploaded.
length
The number of bytes to upload.
The expected use of this function is as follows::
bigFile = open("data.txt", 'rb')
uploader = myclient.get_chunked_uploader(bigFile, size)
print "uploading: ", size
while uploader.offset < size:
try:
upload = uploader.upload_chunked()
except rest.ErrorResponse, e:
# perform error handling and retry logic
uploader.finish('/bigFile.txt')
The SDK leaves the error handling and retry logic to the developer
to implement, as the exact requirements will depend on the application
involved.
"""
return ChunkedUploader(self, file_obj, length)
def upload_chunk(self, file_obj, length=None, offset=0, upload_id=None):
"""Uploads a single chunk of data from a string or file-like object. The majority of users
should use the :class:`ChunkedUploader` object, which provides a simpler interface to the
chunked_upload API endpoint.
Parameters
file_obj
The source of the chunk to upload; a file-like object or a string.
length
This argument is ignored but still present for backward compatibility reasons.
offset
The byte offset to which this source data corresponds in the original file.
upload_id
The upload identifier for which this chunk should be uploaded,
returned by a previous call, or None to start a new upload.
Returns
A dictionary containing the keys:
upload_id
A string used to identify the upload for subsequent calls to :meth:`upload_chunk()`
and :meth:`commit_chunked_upload()`.
offset
The offset at which the next upload should be applied.
expires
The time after which this partial upload is invalid.
"""
params = dict()
if upload_id:
params['upload_id'] = upload_id
params['offset'] = offset
url, ignored_params, headers = self.request("/chunked_upload", params,
method='PUT', content_server=True)
try:
reply = self.rest_client.PUT(url, file_obj, headers)
return reply['offset'], reply['upload_id']
except ErrorResponse as e:
raise e
def commit_chunked_upload(self, full_path, upload_id, overwrite=False, parent_rev=None):
"""Commit the previously uploaded chunks for the given path.
Parameters
full_path
The full path to which the chunks are uploaded, *including the file name*.
If the destination folder does not yet exist, it will be created.
upload_id
The chunked upload identifier, previously returned from upload_chunk.
overwrite
Whether to overwrite an existing file at the given path. (Default ``False``.)
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
parent_rev
Optional rev field from the 'parent' of this upload.
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most recent parent_rev,
and it will never be overwritten if you send a less recent one.
Returns
A dictionary containing the metadata of the newly committed file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#commit-chunked-upload
"""
params = {
'upload_id': upload_id,
'overwrite': overwrite,
}
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.request("/commit_chunked_upload/%s" % full_path,
params, content_server=True)
return self.rest_client.POST(url, params, headers)
def put_file(self, full_path, file_obj, overwrite=False, parent_rev=None):
"""Upload a file.
A typical use case would be as follows::
f = open('working-draft.txt', 'rb')
response = client.put_file('/magnum-opus.txt', f)
print "uploaded:", response
which would return the metadata of the uploaded file, similar to::
{
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'dropbox',
'size': '77 bytes',
'thumb_exists': False
}
Parameters
full_path
The full path to upload the file to, *including the file name*.
If the destination folder does not yet exist, it will be created.
file_obj
A file-like object to upload. If you would like, you can pass a string as file_obj.
overwrite
Whether to overwrite an existing file at the given path. (Default ``False``.)
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
parent_rev
Optional rev field from the 'parent' of this upload.
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most recent parent_rev,
and it will never be overwritten if you send a less recent one.
Returns
A dictionary containing the metadata of the newly uploaded file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#files-put
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 503: User over quota.
"""
path = "/files_put/%s%s" % (self.session.root, format_path(full_path))
params = {
'overwrite': bool(overwrite),
}
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.request(path, params, method='PUT', content_server=True)
return self.rest_client.PUT(url, file_obj, headers)
def get_file(self, from_path, rev=None, start=None, length=None):
"""Download a file.
Example::
out = open('magnum-opus.txt', 'wb')
with client.get_file('/magnum-opus.txt') as f:
out.write(f.read())
which would download the file ``magnum-opus.txt`` and write the contents into
the file ``magnum-opus.txt`` on the local filesystem.
Parameters
from_path
The path to the file to be downloaded.
rev
Optional previous rev value of the file to be downloaded.
start
Optional byte value from which to start downloading.
length
Optional length in bytes for partially downloading the file. If ``length`` is
specified but ``start`` is not, then the last ``length`` bytes will be downloaded.
Returns
A :class:`dropbox.rest.RESTResponse` that is the HTTP response for
the API request. It is a file-like object that can be read from. You
must call ``close()`` when you're done.
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given path, or the file that was there was deleted.
- 200: Request was okay but response was malformed in some way.
"""
path = "/files/%s%s" % (self.session.root, format_path(from_path))
params = {}
if rev is not None:
params['rev'] = rev
url, params, headers = self.request(path, params, method='GET', content_server=True)
if start is not None:
if length:
headers['Range'] = 'bytes=%s-%s' % (start, start + length - 1)
else:
headers['Range'] = 'bytes=%s-' % start
elif length is not None:
headers['Range'] = 'bytes=-%s' % length
return self.rest_client.request("GET", url, headers=headers, raw_response=True)
def get_file_and_metadata(self, from_path, rev=None):
"""Download a file alongwith its metadata.
Acts as a thin wrapper around get_file() (see :meth:`get_file()` comments for
more details)
A typical usage looks like this::
out = open('magnum-opus.txt', 'wb')
f, metadata = client.get_file_and_metadata('/magnum-opus.txt')
with f:
out.write(f.read())
Parameters
from_path
The path to the file to be downloaded.
rev
Optional previous rev value of the file to be downloaded.
Returns
A pair of ``(response, metadata)``:
response
A :class:`dropbox.rest.RESTResponse` that is the HTTP response for
the API request. It is a file-like object that can be read from. You
must call ``close()`` when you're done.
metadata
A dictionary containing the metadata of the file (see
https://www.dropbox.com/developers/core/docs#metadata for details).
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given path, or the file that was there was deleted.
- 200: Request was okay but response was malformed in some way.
"""
file_res = self.get_file(from_path, rev)
metadata = DropboxClient.__parse_metadata_as_dict(file_res)
return file_res, metadata
@staticmethod
def __parse_metadata_as_dict(dropbox_raw_response):
# Parses file metadata from a raw dropbox HTTP response, raising a
# dropbox.rest.ErrorResponse if parsing fails.
metadata = None
for header, header_val in dropbox_raw_response.getheaders().items():
if header.lower() == 'x-dropbox-metadata':
try:
metadata = json.loads(header_val)
except ValueError:
raise ErrorResponse(dropbox_raw_response)
if not metadata: raise ErrorResponse(dropbox_raw_response)
return metadata
def delta(self, cursor=None, path_prefix=None, include_media_info=False):
"""A way of letting you keep up with changes to files and folders in a
user's Dropbox. You can periodically call delta() to get a list of "delta
entries", which are instructions on how to update your local state to
match the server's state.
Parameters
cursor
On the first call, omit this argument (or pass in ``None``). On
subsequent calls, pass in the ``cursor`` string returned by the previous
call.
path_prefix
If provided, results will be limited to files and folders
whose paths are equal to or under ``path_prefix``. The ``path_prefix`` is
fixed for a given cursor. Whatever ``path_prefix`` you use on the first
``delta()`` must also be passed in on subsequent calls that use the returned
cursor.
include_media_info
If True, delta will return additional media info for photos and videos
(the time a photo was taken, the GPS coordinates of a photo, etc.). There
is a delay between when a file is uploaded to Dropbox and when this
information is available; delta will only include a file in the changelist
once its media info is ready. The value you use on the first ``delta()`` must
also be passed in on subsequent calls that use the returned cursor.
Returns
A dict with four keys:
entries
A list of "delta entries" (described below).
reset
If ``True``, you should your local state to be an empty folder
before processing the list of delta entries. This is only ``True`` only
in rare situations.
cursor
A string that is used to keep track of your current state.
On the next call to delta(), pass in this value to return entries
that were recorded since the cursor was returned.
has_more
If ``True``, then there are more entries available; you can
call delta() again immediately to retrieve those entries. If ``False``,
then wait at least 5 minutes (preferably longer) before checking again.
Delta Entries: Each entry is a 2-item list of one of following forms:
- [*path*, *metadata*]: Indicates that there is a file/folder at the given
path. You should add the entry to your local path. (The *metadata*
value is the same as what would be returned by the ``metadata()`` call.)
- If the new entry includes parent folders that don't yet exist in your
local state, create those parent folders in your local state. You
will eventually get entries for those parent folders.
- If the new entry is a file, replace whatever your local state has at
*path* with the new entry.
- If the new entry is a folder, check what your local state has at
*path*. If it's a file, replace it with the new entry. If it's a
folder, apply the new *metadata* to the folder, but do not modify
the folder's children.
- [*path*, ``None``]: Indicates that there is no file/folder at the *path* on
Dropbox. To update your local state to match, delete whatever is at *path*,
including any children (you will sometimes also get "delete" delta entries
for the children, but this is not guaranteed). If your local state doesn't
have anything at *path*, ignore this entry.
Remember: Dropbox treats file names in a case-insensitive but case-preserving
way. To facilitate this, the *path* strings above are lower-cased versions of
the actual path. The *metadata* dicts have the original, case-preserved path.
"""
path = "/delta"
params = {'include_media_info': include_media_info}
if cursor is not None:
params['cursor'] = cursor
if path_prefix is not None:
params['path_prefix'] = path_prefix
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def longpoll_delta(self, cursor, timeout=None):
"""A long-poll endpoint to wait for changes on an account. In conjunction with
:meth:`delta()`, this call gives you a low-latency way to monitor an account for
file changes.
Note that this call goes to ``api-notify.dropbox.com`` instead of ``api.dropbox.com``.
Unlike most other API endpoints, this call does not require OAuth authentication.
The passed-in cursor can only be acquired via an authenticated call to :meth:`delta()`.
Parameters
cursor
A delta cursor as returned from a call to :meth:`delta()`. Note that a cursor
returned from a call to :meth:`delta()` with ``include_media_info=True`` is
incompatible with ``longpoll_delta()`` and an error will be returned.
timeout
An optional integer indicating a timeout, in seconds. The default value is
30 seconds, which is also the minimum allowed value. The maximum is 480
seconds. The request will block for at most this length of time, plus up
to 90 seconds of random jitter added to avoid the thundering herd problem.
Care should be taken when using this parameter, as some network
infrastructure does not support long timeouts.
Returns
The connection will block until there are changes available or a timeout occurs.
The response will be a dictionary that looks like the following example::
{"changes": false, "backoff": 60}
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#longpoll-delta
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (generally due to an invalid parameter; check e.error for details).
"""
path = "/longpoll_delta"
params = {'cursor': cursor}
if timeout is not None:
params['timeout'] = timeout
url, params, headers = self.request(path, params, method='GET', notification_server=True)
return self.rest_client.GET(url, headers)
def create_copy_ref(self, from_path):
"""Creates and returns a copy ref for a specific file. The copy ref can be
used to instantly copy that file to the Dropbox of another account.
Parameters
path
The path to the file for a copy ref to be created on.
Returns
A dictionary that looks like the following example::
{"expires": "Fri, 31 Jan 2042 21:01:05 +0000", "copy_ref": "z1X6ATl6aWtzOGq0c3g5Ng"}
"""
path = "/copy_ref/%s%s" % (self.session.root, format_path(from_path))
url, params, headers = self.request(path, {}, method='GET')
return self.rest_client.GET(url, headers)
def add_copy_ref(self, copy_ref, to_path):
"""Adds the file referenced by the copy ref to the specified path
Parameters
copy_ref
A copy ref string that was returned from a create_copy_ref call.
The copy_ref can be created from any other Dropbox account, or from the same account.
path
The path to where the file will be created.
Returns
A dictionary containing the metadata of the new copy of the file.
"""
path = "/fileops/copy"
params = {'from_copy_ref': copy_ref,
'to_path': format_path(to_path),
'root': self.session.root}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def file_copy(self, from_path, to_path):
"""Copy a file or folder to a new location.
Parameters
from_path
The path to the file or folder to be copied.
to_path
The destination path of the file or folder to be copied.
This parameter should include the destination filename (e.g.
from_path: '/test.txt', to_path: '/dir/test.txt'). If there's
already a file at the to_path it will raise an ErrorResponse.
Returns
A dictionary containing the metadata of the new copy of the file or folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#fileops-copy
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 403: An invalid copy operation was attempted
(e.g. there is already a file at the given destination,
or trying to copy a shared folder).
- 404: No file was found at given from_path.
- 503: User over storage quota.
"""
params = {'root': self.session.root,
'from_path': format_path(from_path),
'to_path': format_path(to_path),
}
url, params, headers = self.request("/fileops/copy", params)
return self.rest_client.POST(url, params, headers)
def file_create_folder(self, path):
"""Create a folder.
Parameters
path
The path of the new folder.
Returns
A dictionary containing the metadata of the newly created folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#fileops-create-folder
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 403: A folder at that path already exists.
"""
params = {'root': self.session.root, 'path': format_path(path)}
url, params, headers = self.request("/fileops/create_folder", params)
return self.rest_client.POST(url, params, headers)
def file_delete(self, path):
"""Delete a file or folder.
Parameters
path
The path of the file or folder.
Returns
A dictionary containing the metadata of the just deleted file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#fileops-delete
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given path.
"""
params = {'root': self.session.root, 'path': format_path(path)}
url, params, headers = self.request("/fileops/delete", params)
return self.rest_client.POST(url, params, headers)
def file_move(self, from_path, to_path):
"""Move a file or folder to a new location.
Parameters
from_path
The path to the file or folder to be moved.
to_path
The destination path of the file or folder to be moved.
This parameter should include the destination filename (e.g. if
``from_path`` is ``'/test.txt'``, ``to_path`` might be
``'/dir/test.txt'``). If there's already a file at the
``to_path`` it will raise an ErrorResponse.
Returns
A dictionary containing the metadata of the new copy of the file or folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#fileops-move
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 403: An invalid move operation was attempted
(e.g. there is already a file at the given destination,
or moving a shared folder into a shared folder).
- 404: No file was found at given from_path.
- 503: User over storage quota.
"""
params = {'root': self.session.root,
'from_path': format_path(from_path),
'to_path': format_path(to_path)}
url, params, headers = self.request("/fileops/move", params)
return self.rest_client.POST(url, params, headers)
def metadata(self, path, list=True, file_limit=25000, hash=None,
rev=None, include_deleted=False, include_media_info=False):
"""Retrieve metadata for a file or folder.
A typical use would be::
folder_metadata = client.metadata('/')
print "metadata:", folder_metadata
which would return the metadata of the root folder. This
will look something like::
{
'bytes': 0,
'contents': [
{
'bytes': 0,
'icon': 'folder',
'is_dir': True,
'modified': 'Thu, 25 Aug 2011 00:03:15 +0000',
'path': '/Sample Folder',
'rev': '803beb471',
'revision': 8,
'root': 'dropbox',
'size': '0 bytes',
'thumb_exists': False
},
{
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'dropbox',
'size': '77 bytes',
'thumb_exists': False
}
],
'hash': 'efdac89c4da886a9cece1927e6c22977',
'icon': 'folder',
'is_dir': True,
'path': '/',
'root': 'app_folder',
'size': '0 bytes',
'thumb_exists': False
}
In this example, the root folder contains two things: ``Sample Folder``,
which is a folder, and ``/magnum-opus.txt``, which is a text file 77 bytes long
Parameters
path
The path to the file or folder.
list
Whether to list all contained files (only applies when
path refers to a folder).
file_limit
The maximum number of file entries to return within
a folder. If the number of files in the folder exceeds this
limit, an exception is raised. The server will return at max
25,000 files within a folder.
hash
Every folder listing has a hash parameter attached that
can then be passed back into this function later to save on
bandwidth. Rather than returning an unchanged folder's contents,
the server will instead return a 304.
rev
Optional revision of the file to retrieve the metadata for.
This parameter only applies for files. If omitted, you'll receive
the most recent revision metadata.
include_deleted
When listing contained files, include files that have been deleted.
include_media_info
If True, includes additional media info for photos and videos if
available (the time a photo was taken, the GPS coordinates of a photo,
etc.).
Returns
A dictionary containing the metadata of the file or folder
(and contained files if appropriate).
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#metadata
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 304: Current folder hash matches hash parameters, so contents are unchanged.
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at given path.
- 406: Too many file entries to return.
"""
path = "/metadata/%s%s" % (self.session.root, format_path(path))
params = {'file_limit': file_limit,
'list': 'true',
'include_deleted': include_deleted,
'include_media_info': include_media_info,
}
if not list:
params['list'] = 'false'
if hash is not None:
params['hash'] = hash
if rev:
params['rev'] = rev
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
def thumbnail(self, from_path, size='m', format='JPEG'):
"""Download a thumbnail for an image.
Parameters
from_path
The path to the file to be thumbnailed.
size
A string specifying the desired thumbnail size. Currently
supported sizes: ``"xs"`` (32x32), ``"s"`` (64x64), ``"m"`` (128x128),
``"l``" (640x480), ``"xl"`` (1024x768).
Check https://www.dropbox.com/developers/core/docs#thumbnails for
more details.
format
The image format the server should use for the returned
thumbnail data. Either ``"JPEG"`` or ``"PNG"``.
Returns
A :class:`dropbox.rest.RESTResponse` that is the HTTP response for
the API request. It is a file-like object that can be read from. You
must call ``close()`` when you're done.
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given from_path,
or files of that type cannot be thumbnailed.
- 415: Image is invalid and cannot be thumbnailed.
"""
assert format in ['JPEG', 'PNG'], \
"expected a thumbnail format of 'JPEG' or 'PNG', got %s" % format
path = "/thumbnails/%s%s" % (self.session.root, format_path(from_path))
url, params, headers = self.request(path, {'size': size, 'format': format},
method='GET', content_server=True)
return self.rest_client.request("GET", url, headers=headers, raw_response=True)
def thumbnail_and_metadata(self, from_path, size='m', format='JPEG'):
"""Download a thumbnail for an image alongwith its metadata.
Acts as a thin wrapper around thumbnail() (see :meth:`thumbnail()` comments for
more details)
Parameters
from_path
The path to the file to be thumbnailed.
size
A string specifying the desired thumbnail size. See :meth:`thumbnail()`
for details.
format
The image format the server should use for the returned
thumbnail data. Either ``"JPEG"`` or ``"PNG"``.
Returns
A pair of ``(response, metadata)``:
response
A :class:`dropbox.rest.RESTResponse` that is the HTTP response for
the API request. It is a file-like object that can be read from. You
must call ``close()`` when you're done.
metadata
A dictionary containing the metadata of the file whose thumbnail
was downloaded (see https://www.dropbox.com/developers/core/docs#metadata
for details).
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given from_path,
or files of that type cannot be thumbnailed.
- 415: Image is invalid and cannot be thumbnailed.
- 200: Request was okay but response was malformed in some way.
"""
thumbnail_res = self.thumbnail(from_path, size, format)
metadata = DropboxClient.__parse_metadata_as_dict(thumbnail_res)
return thumbnail_res, metadata
def search(self, path, query, file_limit=1000, include_deleted=False):
"""Search folder for filenames matching query.
Parameters
path
The folder to search within.
query
The query to search on (minimum 3 characters).
file_limit
The maximum number of file entries to return within a folder.
The server will return at max 1,000 files.
include_deleted
Whether to include deleted files in search results.
Returns
A list of the metadata of all matching files (up to
file_limit entries). For a detailed description of what
this call returns, visit:
https://www.dropbox.com/developers/core/docs#search
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
"""
path = "/search/%s%s" % (self.session.root, format_path(path))
params = {
'query': query,
'file_limit': file_limit,
'include_deleted': include_deleted,
}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def revisions(self, path, rev_limit=1000):
"""Retrieve revisions of a file.
Parameters
path
The file to fetch revisions for. Note that revisions
are not available for folders.
rev_limit
The maximum number of file entries to return within
a folder. The server will return at max 1,000 revisions.
Returns
A list of the metadata of all matching files (up to rev_limit entries).
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#revisions
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No revisions were found at the given path.
"""
path = "/revisions/%s%s" % (self.session.root, format_path(path))
params = {
'rev_limit': rev_limit,
}
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
def restore(self, path, rev):
"""Restore a file to a previous revision.
Parameters
path
The file to restore. Note that folders can't be restored.
rev
A previous rev value of the file to be restored to.
Returns
A dictionary containing the metadata of the newly restored file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#restore
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: Unable to find the file at the given revision.
"""
path = "/restore/%s%s" % (self.session.root, format_path(path))
params = {
'rev': rev,
}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def media(self, path):
"""Get a temporary unauthenticated URL for a media file.
All of Dropbox's API methods require OAuth, which may cause problems in
situations where an application expects to be able to hit a URL multiple times
(for example, a media player seeking around a video file). This method
creates a time-limited URL that can be accessed without any authentication,
and returns that to you, along with an expiration time.
Parameters
path
The file to return a URL for. Folders are not supported.
Returns
A dictionary that looks like the following example::
{'url': 'https://dl.dropboxusercontent.com/1/view/abcdefghijk/example',
'expires': 'Thu, 16 Sep 2011 01:01:25 +0000'}
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#media
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: Unable to find the file at the given path.
"""
path = "/media/%s%s" % (self.session.root, format_path(path))
url, params, headers = self.request(path, method='GET')
return self.rest_client.GET(url, headers)
def share(self, path, short_url=True):
"""Create a shareable link to a file or folder.
Shareable links created on Dropbox are time-limited, but don't require any
authentication, so they can be given out freely. The time limit should allow
at least a day of shareability, though users have the ability to disable
a link from their account if they like.
Parameters
path
The file or folder to share.
Returns
A dictionary that looks like the following example::
{'url': u'https://db.tt/c0mFuu1Y', 'expires': 'Tue, 01 Jan 2030 00:00:00 +0000'}
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#shares
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: Unable to find the file at the given path.
"""
path = "/shares/%s%s" % (self.session.root, format_path(path))
params = {
'short_url': short_url,
}
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
class ChunkedUploader(object):
"""Contains the logic around a chunked upload, which uploads a
large file to Dropbox via the /chunked_upload endpoint.
"""
def __init__(self, client, file_obj, length):
self.client = client
self.offset = 0
self.upload_id = None
self.last_block = None
self.file_obj = file_obj
self.target_length = length
def upload_chunked(self, chunk_size = 4 * 1024 * 1024):
"""Uploads data from this ChunkedUploader's file_obj in chunks, until
an error occurs. Throws an exception when an error occurs, and can
be called again to resume the upload.
Parameters
chunk_size
The number of bytes to put in each chunk. (Default 4 MB.)
"""
while self.offset < self.target_length:
next_chunk_size = min(chunk_size, self.target_length - self.offset)
if self.last_block == None:
self.last_block = self.file_obj.read(next_chunk_size)
try:
(self.offset, self.upload_id) = self.client.upload_chunk(
StringIO(self.last_block), next_chunk_size, self.offset, self.upload_id)
self.last_block = None
except ErrorResponse as e:
# Handle the case where the server tells us our offset is wrong.
must_reraise = True
if e.status == 400:
reply = e.body
if "offset" in reply and reply['offset'] != 0 and reply['offset'] > self.offset:
self.last_block = None
self.offset = reply['offset']
must_reraise = False
if must_reraise:
raise
def finish(self, path, overwrite=False, parent_rev=None):
"""Commits the bytes uploaded by this ChunkedUploader to a file
in the users dropbox.
Parameters
path
The full path of the file in the Dropbox.
overwrite
Whether to overwrite an existing file at the given path. (Default ``False``.)
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
parent_rev
Optional rev field from the 'parent' of this upload.
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most recent parent_rev,
and it will never be overwritten if you send a less recent one.
"""
path = "/commit_chunked_upload/%s%s" % (self.client.session.root, format_path(path))
params = dict(
overwrite = bool(overwrite),
upload_id = self.upload_id
)
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.client.request(path, params, content_server=True)
return self.client.rest_client.POST(url, params, headers)
# Allow access of ChunkedUploader via DropboxClient for backwards compatibility.
DropboxClient.ChunkedUploader = ChunkedUploader
class DropboxOAuth2FlowBase(object):
def __init__(self, consumer_key, consumer_secret, locale=None, rest_client=RESTClient):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.locale = locale
self.rest_client = rest_client
def _get_authorize_url(self, redirect_uri, state):
params = dict(response_type='code',
client_id=self.consumer_key)
if redirect_uri is not None:
params['redirect_uri'] = redirect_uri
if state is not None:
params['state'] = state
return self.build_url(BaseSession.WEB_HOST, '/oauth2/authorize', params)
def _finish(self, code, redirect_uri):
url = self.build_url(BaseSession.API_HOST, '/oauth2/token')
params = {'grant_type': 'authorization_code',
'code': code,
'client_id': self.consumer_key,
'client_secret': self.consumer_secret,
}
if self.locale is not None:
params['locale'] = self.locale
if redirect_uri is not None:
params['redirect_uri'] = redirect_uri
response = self.rest_client.POST(url, params=params)
access_token = response["access_token"]
user_id = response["uid"]
return access_token, user_id
def build_path(self, target, params=None):
"""Build the path component for an API URL.
This method urlencodes the parameters, adds them
to the end of the target url, and puts a marker for the API
version in front.
Parameters
target
A target url (e.g. '/files') to build upon.
params
Optional dictionary of parameters (name to value).
Returns
The path and parameters components of an API URL.
"""
if sys.version_info < (3,) and type(target) == str:
target = target.encode("utf8")
target_path = urllib.parse.quote(target)
params = params or {}
params = params.copy()
if self.locale:
params['locale'] = self.locale
if params:
query_string = params_to_urlencoded(params)
return "/%s%s?%s" % (BaseSession.API_VERSION, target_path, query_string)
else:
return "/%s%s" % (BaseSession.API_VERSION, target_path)
def build_url(self, host, target, params=None):
"""Build an API URL.
This method adds scheme and hostname to the path
returned from build_path.
Parameters
target
A target url (e.g. '/files') to build upon.
params
Optional dictionary of parameters (name to value).
Returns
The full API URL.
"""
return "https://%s%s" % (host, self.build_path(target, params))
class DropboxOAuth2FlowNoRedirect(DropboxOAuth2FlowBase):
"""
OAuth 2 authorization helper for apps that can't provide a redirect URI
(such as the command-line example apps).
Example::
from dropbox.client import DropboxOAuth2FlowNoRedirect, DropboxClient
from dropbox import rest as dbrest
auth_flow = DropboxOAuth2FlowNoRedirect(APP_KEY, APP_SECRET)
authorize_url = auth_flow.start()
print "1. Go to: " + authorize_url
print "2. Click \\"Allow\\" (you might have to log in first)."
print "3. Copy the authorization code."
auth_code = raw_input("Enter the authorization code here: ").strip()
try:
access_token, user_id = auth_flow.finish(auth_code)
except dbrest.ErrorResponse, e:
print('Error: %s' % (e,))
return
c = DropboxClient(access_token)
"""
def __init__(self, consumer_key, consumer_secret, locale=None, rest_client=None):
"""
Construct an instance.
Parameters
consumer_key
Your API app's "app key"
consumer_secret
Your API app's "app secret"
locale
The locale of the user of your application. For example "en" or "en_US".
Some API calls return localized data and error messages; this setting
tells the server which locale to use. By default, the server uses "en_US".
rest_client
Optional :class:`dropbox.rest.RESTClient`-like object to use for making
requests.
"""
if rest_client is None: rest_client = RESTClient
super(DropboxOAuth2FlowNoRedirect, self).__init__(consumer_key, consumer_secret,
locale, rest_client)
def start(self):
"""
Starts the OAuth 2 authorization process.
Returns
The URL for a page on Dropbox's website. This page will let the user "approve"
your app, which gives your app permission to access the user's Dropbox account.
Tell the user to visit this URL and approve your app.
"""
return self._get_authorize_url(None, None)
def finish(self, code):
"""
If the user approves your app, they will be presented with an "authorization code". Have
the user copy/paste that authorization code into your app and then call this method to
get an access token.
Parameters
code
The authorization code shown to the user when they approved your app.
Returns
A pair of ``(access_token, user_id)``. ``access_token`` is a string that
can be passed to DropboxClient. ``user_id`` is the Dropbox user ID (string) of the
user that just approved your app.
Raises
The same exceptions as :meth:`DropboxOAuth2Flow.finish()`.
"""
return self._finish(code, None)
class DropboxOAuth2Flow(DropboxOAuth2FlowBase):
"""
OAuth 2 authorization helper. Use this for web apps.
OAuth 2 has a two-step authorization process. The first step is having the user authorize
your app. The second involves getting an OAuth 2 access token from Dropbox.
Example::
from dropbox.client import DropboxOAuth2Flow, DropboxClient
def get_dropbox_auth_flow(web_app_session):
redirect_uri = "https://my-web-server.org/dropbox-auth-finish")
return DropboxOAuth2Flow(APP_KEY, APP_SECRET, redirect_uri,
web_app_session, "dropbox-auth-csrf-token")
# URL handler for /dropbox-auth-start
def dropbox_auth_start(web_app_session, request):
authorize_url = get_dropbox_auth_flow(web_app_session).start()
redirect_to(authorize_url)
# URL handler for /dropbox-auth-finish
def dropbox_auth_finish(web_app_session, request):
try:
access_token, user_id, url_state = \\
get_dropbox_auth_flow(web_app_session).finish(request.query_params)
except DropboxOAuth2Flow.BadRequestException, e:
http_status(400)
except DropboxOAuth2Flow.BadStateException, e:
# Start the auth flow again.
redirect_to("/dropbox-auth-start")
except DropboxOAuth2Flow.CsrfException, e:
http_status(403)
except DropboxOAuth2Flow.NotApprovedException, e:
flash('Not approved? Why not?')
return redirect_to("/home")
except DropboxOAuth2Flow.ProviderException, e:
logger.log("Auth error: %s" % (e,))
http_status(403)
"""
def __init__(self, consumer_key, consumer_secret, redirect_uri, session,
csrf_token_session_key, locale=None, rest_client=None):
"""
Construct an instance.
Parameters
consumer_key
Your API app's "app key".
consumer_secret
Your API app's "app secret".
redirect_uri
The URI that the Dropbox server will redirect the user to after the user
finishes authorizing your app. This URI must be HTTPS-based and pre-registered with
the Dropbox servers, though localhost URIs are allowed without pre-registration and can
be either HTTP or HTTPS.
session
A dict-like object that represents the current user's web session (will be
used to save the CSRF token).
csrf_token_session_key
The key to use when storing the CSRF token in the session (for
example: "dropbox-auth-csrf-token").
locale
The locale of the user of your application. For example "en" or "en_US".
Some API calls return localized data and error messages; this setting
tells the server which locale to use. By default, the server uses "en_US".
rest_client
Optional :class:`dropbox.rest.RESTClient`-like object to use for making
requests.
"""
if rest_client is None: rest_client = RESTClient
super(DropboxOAuth2Flow, self).__init__(consumer_key, consumer_secret, locale, rest_client)
self.redirect_uri = redirect_uri
self.session = session
self.csrf_token_session_key = csrf_token_session_key
def start(self, url_state=None):
"""
Starts the OAuth 2 authorization process.
This function builds an "authorization URL". You should redirect your user's browser to
this URL, which will give them an opportunity to grant your app access to their Dropbox
account. When the user completes this process, they will be automatically redirected to
the ``redirect_uri`` you passed in to the constructor.
This function will also save a CSRF token to ``session[csrf_token_session_key]`` (as
provided to the constructor). This CSRF token will be checked on :meth:`finish()` to
prevent request forgery.
Parameters
url_state
Any data that you would like to keep in the URL through the
authorization process. This exact value will be returned to you by :meth:`finish()`.
Returns
The URL for a page on Dropbox's website. This page will let the user "approve"
your app, which gives your app permission to access the user's Dropbox account.
Tell the user to visit this URL and approve your app.
"""
csrf_token = base64.urlsafe_b64encode(os.urandom(16))
state = csrf_token
if url_state is not None:
state += "|" + url_state
self.session[self.csrf_token_session_key] = csrf_token
return self._get_authorize_url(self.redirect_uri, state)
def finish(self, query_params):
"""
Call this after the user has visited the authorize URL (see :meth:`start()`), approved your
app and was redirected to your redirect URI.
Parameters
query_params
The query parameters on the GET request to your redirect URI.
Returns
A tuple of ``(access_token, user_id, url_state)``. ``access_token`` can be used to
construct a :class:`DropboxClient`. ``user_id`` is the Dropbox user ID (string) of the
user that just approved your app. ``url_state`` is the value you originally passed in to
:meth:`start()`.
Raises
:class:`BadRequestException`
If the redirect URL was missing parameters or if the given parameters were not valid.
:class:`BadStateException`
If there's no CSRF token in the session.
:class:`CsrfException`
If the ``'state'`` query parameter doesn't contain the CSRF token from the user's
session.
:class:`NotApprovedException`
If the user chose not to approve your app.
:class:`ProviderException`
If Dropbox redirected to your redirect URI with some unexpected error identifier
and error message.
"""
csrf_token_from_session = self.session[self.csrf_token_session_key]
# Check well-formedness of request.
state = query_params.get('state')
if state is None:
raise self.BadRequestException("Missing query parameter 'state'.")
error = query_params.get('error')
error_description = query_params.get('error_description')
code = query_params.get('code')
if error is not None and code is not None:
raise self.BadRequestException("Query parameters 'code' and 'error' are both set; "
" only one must be set.")
if error is None and code is None:
raise self.BadRequestException("Neither query parameter 'code' or 'error' is set.")
# Check CSRF token
if csrf_token_from_session is None:
raise self.BadStateError("Missing CSRF token in session.")
if len(csrf_token_from_session) <= 20:
raise AssertionError("CSRF token unexpectedly short: %r" % (csrf_token_from_session,))
split_pos = state.find('|')
if split_pos < 0:
given_csrf_token = state
url_state = None
else:
given_csrf_token = state[0:split_pos]
url_state = state[split_pos+1:]
if not _safe_equals(csrf_token_from_session, given_csrf_token):
raise self.CsrfException("expected %r, got %r" % (csrf_token_from_session,
given_csrf_token))
del self.session[self.csrf_token_session_key]
# Check for error identifier
if error is not None:
if error == 'access_denied':
# The user clicked "Deny"
if error_description is None:
raise self.NotApprovedException("No additional description from Dropbox")
else:
raise self.NotApprovedException("Additional description from Dropbox: " +
error_description)
else:
# All other errors
full_message = error
if error_description is not None:
full_message += ": " + error_description
raise self.ProviderError(full_message)
# If everything went ok, make the network call to get an access token.
access_token, user_id = self._finish(code, self.redirect_uri)
return access_token, user_id, url_state
class BadRequestException(Exception):
"""
Thrown if the redirect URL was missing parameters or if the
given parameters were not valid.
The recommended action is to show an HTTP 400 error page.
"""
pass
class BadStateException(Exception):
"""
Thrown if all the parameters are correct, but there's no CSRF token in the session. This
probably means that the session expired.
The recommended action is to redirect the user's browser to try the approval process again.
"""
pass
class CsrfException(Exception):
"""
Thrown if the given 'state' parameter doesn't contain the CSRF
token from the user's session.
This is blocked to prevent CSRF attacks.
The recommended action is to respond with an HTTP 403 error page.
"""
pass
class NotApprovedException(Exception):
"""
The user chose not to approve your app.
"""
pass
class ProviderException(Exception):
"""
Dropbox redirected to your redirect URI with some unexpected error identifier and error
message.
The recommended action is to log the error, tell the user something went wrong, and let
them try again.
"""
pass
def _safe_equals(a, b):
if len(a) != len(b): return False
res = 0
for ca, cb in zip(a, b):
res |= ord(ca) ^ ord(cb)
return res == 0
_OAUTH2_ACCESS_TOKEN_PATTERN = re.compile(r'\A[-_~/A-Za-z0-9\.\+]+=*\Z')
# From the "Bearer" token spec, RFC 6750.
=======
import base64
import re
import os
import sys
import urllib.request, urllib.parse, urllib.error
PY3 = sys.version_info[0] == 3
if PY3:
from io import StringIO
str = str
else:
from io import StringIO
try:
import json
except ImportError:
import simplejson as json
from .rest import ErrorResponse, RESTClient, params_to_urlencoded
from .session import BaseSession, DropboxSession, DropboxOAuth2Session
def format_path(path):
"""Normalize path for use with the Dropbox API.
This function turns multiple adjacent slashes into single
slashes, then ensures that there's a leading slash but
not a trailing slash.
"""
if not path:
return path
path = re.sub(r'/+', '/', path)
if path == '/':
return ("" if isinstance(path, str) else "")
else:
return '/' + path.strip('/')
class DropboxClient(object):
"""
This class lets you make Dropbox API calls. You'll need to obtain an
OAuth 2 access token first. You can get an access token using either
:class:`DropboxOAuth2Flow` or :class:`DropboxOAuth2FlowNoRedirect`.
All of the API call methods can raise a :class:`dropbox.rest.ErrorResponse` exception if
the server returns a non-200 or invalid HTTP response. Note that a 401
return status at any point indicates that the access token you're using
is no longer valid and the user must be put through the OAuth 2
authorization flow again.
"""
def __init__(self, oauth2_access_token, locale=None, rest_client=None):
"""Construct a ``DropboxClient`` instance.
Parameters
oauth2_access_token
An OAuth 2 access token (string). For backwards compatibility this may
also be a DropboxSession object (see :meth:`create_oauth2_access_token()`).
locale
The locale of the user of your application. For example "en" or "en_US".
Some API calls return localized data and error messages; this setting
tells the server which locale to use. By default, the server uses "en_US".
rest_client
Optional :class:`dropbox.rest.RESTClient`-like object to use for making
requests.
"""
if rest_client is None: rest_client = RESTClient
if isinstance(oauth2_access_token, str):
if not _OAUTH2_ACCESS_TOKEN_PATTERN.match(oauth2_access_token):
raise ValueError("invalid format for oauth2_access_token: %r"
% (oauth2_access_token,))
self.session = DropboxOAuth2Session(oauth2_access_token, locale)
elif isinstance(oauth2_access_token, DropboxSession):
# Backwards compatibility with OAuth 1
if locale is not None:
raise ValueError("The 'locale' parameter to DropboxClient is only useful "
"when also passing in an OAuth 2 access token")
self.session = oauth2_access_token
else:
raise ValueError("'oauth2_access_token' must either be a string or a DropboxSession")
self.rest_client = rest_client
def request(self, target, params=None, method='POST',
content_server=False, notification_server=False):
"""
An internal method that builds the url, headers, and params for a Dropbox API request.
It is exposed if you need to make API calls not implemented in this library or if you
need to debug requests.
Parameters
target
The target URL with leading slash (e.g. '/files').
params
A dictionary of parameters to add to the request.
method
An HTTP method (e.g. 'GET' or 'POST').
content_server
A boolean indicating whether the request is to the
API content server, for example to fetch the contents of a file
rather than its metadata.
notification_server
A boolean indicating whether the request is to the API notification
server, for example for longpolling.
Returns
A tuple of ``(url, params, headers)`` that should be used to make the request.
OAuth will be added as needed within these fields.
"""
assert method in ['GET','POST', 'PUT'], "Only 'GET', 'POST', and 'PUT' are allowed."
assert not (content_server and notification_server), \
"Cannot construct request simultaneously for content and notification servers."
if params is None:
params = {}
if content_server:
host = self.session.API_CONTENT_HOST
elif notification_server:
host = self.session.API_NOTIFICATION_HOST
else:
host = self.session.API_HOST
base = self.session.build_url(host, target)
headers, params = self.session.build_access_headers(method, base, params)
if method in ('GET', 'PUT'):
url = self.session.build_url(host, target, params)
else:
url = self.session.build_url(host, target)
return url, params, headers
def account_info(self):
"""Retrieve information about the user's account.
Returns
A dictionary containing account information.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#account-info
"""
url, params, headers = self.request("/account/info", method='GET')
return self.rest_client.GET(url, headers)
def disable_access_token(self):
"""
Disable the access token that this ``DropboxClient`` is using. If this call
succeeds, further API calls using this object will fail.
"""
url, params, headers = self.request("/disable_access_token", method='POST')
return self.rest_client.POST(url, params, headers)
def create_oauth2_access_token(self):
"""
If this ``DropboxClient`` was created with an OAuth 1 access token, this method
can be used to create an equivalent OAuth 2 access token. This can be used to
upgrade your app's existing access tokens from OAuth 1 to OAuth 2.
Example::
from dropbox.client import DropboxClient
from dropbox.session import DropboxSession
session = DropboxSession(APP_KEY, APP_SECRET)
access_key, access_secret = '123abc', 'xyz456' # Previously obtained OAuth 1 credentials
session.set_token(access_key, access_secret)
client = DropboxClient(session)
token = client.create_oauth2_access_token()
# Optionally, create a new client using the new token
new_client = DropboxClient(token)
"""
if not isinstance(self.session, DropboxSession):
raise ValueError("This call requires a DropboxClient that is configured with an "
"OAuth 1 access token.")
url, params, headers = self.request("/oauth2/token_from_oauth1", method='POST')
r = self.rest_client.POST(url, params, headers)
return r['access_token']
def get_chunked_uploader(self, file_obj, length):
"""Creates a :class:`ChunkedUploader` to upload the given file-like object.
Parameters
file_obj
The file-like object which is the source of the data
being uploaded.
length
The number of bytes to upload.
The expected use of this function is as follows::
bigFile = open("data.txt", 'rb')
uploader = myclient.get_chunked_uploader(bigFile, size)
print "uploading: ", size
while uploader.offset < size:
try:
upload = uploader.upload_chunked()
except rest.ErrorResponse, e:
# perform error handling and retry logic
uploader.finish('/bigFile.txt')
The SDK leaves the error handling and retry logic to the developer
to implement, as the exact requirements will depend on the application
involved.
"""
return ChunkedUploader(self, file_obj, length)
def upload_chunk(self, file_obj, length=None, offset=0, upload_id=None):
"""Uploads a single chunk of data from a string or file-like object. The majority of users
should use the :class:`ChunkedUploader` object, which provides a simpler interface to the
chunked_upload API endpoint.
Parameters
file_obj
The source of the chunk to upload; a file-like object or a string.
length
This argument is ignored but still present for backward compatibility reasons.
offset
The byte offset to which this source data corresponds in the original file.
upload_id
The upload identifier for which this chunk should be uploaded,
returned by a previous call, or None to start a new upload.
Returns
A dictionary containing the keys:
upload_id
A string used to identify the upload for subsequent calls to :meth:`upload_chunk()`
and :meth:`commit_chunked_upload()`.
offset
The offset at which the next upload should be applied.
expires
The time after which this partial upload is invalid.
"""
params = dict()
if upload_id:
params['upload_id'] = upload_id
params['offset'] = offset
url, ignored_params, headers = self.request("/chunked_upload", params,
method='PUT', content_server=True)
try:
reply = self.rest_client.PUT(url, file_obj, headers)
return reply['offset'], reply['upload_id']
except ErrorResponse as e:
raise e
def commit_chunked_upload(self, full_path, upload_id, overwrite=False, parent_rev=None):
"""Commit the previously uploaded chunks for the given path.
Parameters
full_path
The full path to which the chunks are uploaded, *including the file name*.
If the destination folder does not yet exist, it will be created.
upload_id
The chunked upload identifier, previously returned from upload_chunk.
overwrite
Whether to overwrite an existing file at the given path. (Default ``False``.)
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
parent_rev
Optional rev field from the 'parent' of this upload.
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most recent parent_rev,
and it will never be overwritten if you send a less recent one.
Returns
A dictionary containing the metadata of the newly committed file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#commit-chunked-upload
"""
params = {
'upload_id': upload_id,
'overwrite': overwrite,
}
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.request("/commit_chunked_upload/%s" % full_path,
params, content_server=True)
return self.rest_client.POST(url, params, headers)
def put_file(self, full_path, file_obj, overwrite=False, parent_rev=None):
"""Upload a file.
A typical use case would be as follows::
f = open('working-draft.txt', 'rb')
response = client.put_file('/magnum-opus.txt', f)
print "uploaded:", response
which would return the metadata of the uploaded file, similar to::
{
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'dropbox',
'size': '77 bytes',
'thumb_exists': False
}
Parameters
full_path
The full path to upload the file to, *including the file name*.
If the destination folder does not yet exist, it will be created.
file_obj
A file-like object to upload. If you would like, you can pass a string as file_obj.
overwrite
Whether to overwrite an existing file at the given path. (Default ``False``.)
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
parent_rev
Optional rev field from the 'parent' of this upload.
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most recent parent_rev,
and it will never be overwritten if you send a less recent one.
Returns
A dictionary containing the metadata of the newly uploaded file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#files-put
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 503: User over quota.
"""
path = "/files_put/%s%s" % (self.session.root, format_path(full_path))
params = {
'overwrite': bool(overwrite),
}
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.request(path, params, method='PUT', content_server=True)
return self.rest_client.PUT(url, file_obj, headers)
def get_file(self, from_path, rev=None, start=None, length=None):
"""Download a file.
Example::
out = open('magnum-opus.txt', 'wb')
with client.get_file('/magnum-opus.txt') as f:
out.write(f.read())
which would download the file ``magnum-opus.txt`` and write the contents into
the file ``magnum-opus.txt`` on the local filesystem.
Parameters
from_path
The path to the file to be downloaded.
rev
Optional previous rev value of the file to be downloaded.
start
Optional byte value from which to start downloading.
length
Optional length in bytes for partially downloading the file. If ``length`` is
specified but ``start`` is not, then the last ``length`` bytes will be downloaded.
Returns
A :class:`dropbox.rest.RESTResponse` that is the HTTP response for
the API request. It is a file-like object that can be read from. You
must call ``close()`` when you're done.
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given path, or the file that was there was deleted.
- 200: Request was okay but response was malformed in some way.
"""
path = "/files/%s%s" % (self.session.root, format_path(from_path))
params = {}
if rev is not None:
params['rev'] = rev
url, params, headers = self.request(path, params, method='GET', content_server=True)
if start is not None:
if length:
headers['Range'] = 'bytes=%s-%s' % (start, start + length - 1)
else:
headers['Range'] = 'bytes=%s-' % start
elif length is not None:
headers['Range'] = 'bytes=-%s' % length
return self.rest_client.request("GET", url, headers=headers, raw_response=True)
def get_file_and_metadata(self, from_path, rev=None):
"""Download a file alongwith its metadata.
Acts as a thin wrapper around get_file() (see :meth:`get_file()` comments for
more details)
A typical usage looks like this::
out = open('magnum-opus.txt', 'wb')
f, metadata = client.get_file_and_metadata('/magnum-opus.txt')
with f:
out.write(f.read())
Parameters
from_path
The path to the file to be downloaded.
rev
Optional previous rev value of the file to be downloaded.
Returns
A pair of ``(response, metadata)``:
response
A :class:`dropbox.rest.RESTResponse` that is the HTTP response for
the API request. It is a file-like object that can be read from. You
must call ``close()`` when you're done.
metadata
A dictionary containing the metadata of the file (see
https://www.dropbox.com/developers/core/docs#metadata for details).
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given path, or the file that was there was deleted.
- 200: Request was okay but response was malformed in some way.
"""
file_res = self.get_file(from_path, rev)
metadata = DropboxClient.__parse_metadata_as_dict(file_res)
return file_res, metadata
@staticmethod
def __parse_metadata_as_dict(dropbox_raw_response):
# Parses file metadata from a raw dropbox HTTP response, raising a
# dropbox.rest.ErrorResponse if parsing fails.
metadata = None
for header, header_val in dropbox_raw_response.getheaders().items():
if header.lower() == 'x-dropbox-metadata':
try:
metadata = json.loads(header_val)
except ValueError:
raise ErrorResponse(dropbox_raw_response)
if not metadata: raise ErrorResponse(dropbox_raw_response)
return metadata
def delta(self, cursor=None, path_prefix=None, include_media_info=False):
"""A way of letting you keep up with changes to files and folders in a
user's Dropbox. You can periodically call delta() to get a list of "delta
entries", which are instructions on how to update your local state to
match the server's state.
Parameters
cursor
On the first call, omit this argument (or pass in ``None``). On
subsequent calls, pass in the ``cursor`` string returned by the previous
call.
path_prefix
If provided, results will be limited to files and folders
whose paths are equal to or under ``path_prefix``. The ``path_prefix`` is
fixed for a given cursor. Whatever ``path_prefix`` you use on the first
``delta()`` must also be passed in on subsequent calls that use the returned
cursor.
include_media_info
If True, delta will return additional media info for photos and videos
(the time a photo was taken, the GPS coordinates of a photo, etc.). There
is a delay between when a file is uploaded to Dropbox and when this
information is available; delta will only include a file in the changelist
once its media info is ready. The value you use on the first ``delta()`` must
also be passed in on subsequent calls that use the returned cursor.
Returns
A dict with four keys:
entries
A list of "delta entries" (described below).
reset
If ``True``, you should your local state to be an empty folder
before processing the list of delta entries. This is only ``True`` only
in rare situations.
cursor
A string that is used to keep track of your current state.
On the next call to delta(), pass in this value to return entries
that were recorded since the cursor was returned.
has_more
If ``True``, then there are more entries available; you can
call delta() again immediately to retrieve those entries. If ``False``,
then wait at least 5 minutes (preferably longer) before checking again.
Delta Entries: Each entry is a 2-item list of one of following forms:
- [*path*, *metadata*]: Indicates that there is a file/folder at the given
path. You should add the entry to your local path. (The *metadata*
value is the same as what would be returned by the ``metadata()`` call.)
- If the new entry includes parent folders that don't yet exist in your
local state, create those parent folders in your local state. You
will eventually get entries for those parent folders.
- If the new entry is a file, replace whatever your local state has at
*path* with the new entry.
- If the new entry is a folder, check what your local state has at
*path*. If it's a file, replace it with the new entry. If it's a
folder, apply the new *metadata* to the folder, but do not modify
the folder's children.
- [*path*, ``None``]: Indicates that there is no file/folder at the *path* on
Dropbox. To update your local state to match, delete whatever is at *path*,
including any children (you will sometimes also get "delete" delta entries
for the children, but this is not guaranteed). If your local state doesn't
have anything at *path*, ignore this entry.
Remember: Dropbox treats file names in a case-insensitive but case-preserving
way. To facilitate this, the *path* strings above are lower-cased versions of
the actual path. The *metadata* dicts have the original, case-preserved path.
"""
path = "/delta"
params = {'include_media_info': include_media_info}
if cursor is not None:
params['cursor'] = cursor
if path_prefix is not None:
params['path_prefix'] = path_prefix
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def longpoll_delta(self, cursor, timeout=None):
"""A long-poll endpoint to wait for changes on an account. In conjunction with
:meth:`delta()`, this call gives you a low-latency way to monitor an account for
file changes.
Note that this call goes to ``api-notify.dropbox.com`` instead of ``api.dropbox.com``.
Unlike most other API endpoints, this call does not require OAuth authentication.
The passed-in cursor can only be acquired via an authenticated call to :meth:`delta()`.
Parameters
cursor
A delta cursor as returned from a call to :meth:`delta()`. Note that a cursor
returned from a call to :meth:`delta()` with ``include_media_info=True`` is
incompatible with ``longpoll_delta()`` and an error will be returned.
timeout
An optional integer indicating a timeout, in seconds. The default value is
30 seconds, which is also the minimum allowed value. The maximum is 480
seconds. The request will block for at most this length of time, plus up
to 90 seconds of random jitter added to avoid the thundering herd problem.
Care should be taken when using this parameter, as some network
infrastructure does not support long timeouts.
Returns
The connection will block until there are changes available or a timeout occurs.
The response will be a dictionary that looks like the following example::
{"changes": false, "backoff": 60}
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#longpoll-delta
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (generally due to an invalid parameter; check e.error for details).
"""
path = "/longpoll_delta"
params = {'cursor': cursor}
if timeout is not None:
params['timeout'] = timeout
url, params, headers = self.request(path, params, method='GET', notification_server=True)
return self.rest_client.GET(url, headers)
def create_copy_ref(self, from_path):
"""Creates and returns a copy ref for a specific file. The copy ref can be
used to instantly copy that file to the Dropbox of another account.
Parameters
path
The path to the file for a copy ref to be created on.
Returns
A dictionary that looks like the following example::
{"expires": "Fri, 31 Jan 2042 21:01:05 +0000", "copy_ref": "z1X6ATl6aWtzOGq0c3g5Ng"}
"""
path = "/copy_ref/%s%s" % (self.session.root, format_path(from_path))
url, params, headers = self.request(path, {}, method='GET')
return self.rest_client.GET(url, headers)
def add_copy_ref(self, copy_ref, to_path):
"""Adds the file referenced by the copy ref to the specified path
Parameters
copy_ref
A copy ref string that was returned from a create_copy_ref call.
The copy_ref can be created from any other Dropbox account, or from the same account.
path
The path to where the file will be created.
Returns
A dictionary containing the metadata of the new copy of the file.
"""
path = "/fileops/copy"
params = {'from_copy_ref': copy_ref,
'to_path': format_path(to_path),
'root': self.session.root}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def file_copy(self, from_path, to_path):
"""Copy a file or folder to a new location.
Parameters
from_path
The path to the file or folder to be copied.
to_path
The destination path of the file or folder to be copied.
This parameter should include the destination filename (e.g.
from_path: '/test.txt', to_path: '/dir/test.txt'). If there's
already a file at the to_path it will raise an ErrorResponse.
Returns
A dictionary containing the metadata of the new copy of the file or folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#fileops-copy
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 403: An invalid copy operation was attempted
(e.g. there is already a file at the given destination,
or trying to copy a shared folder).
- 404: No file was found at given from_path.
- 503: User over storage quota.
"""
params = {'root': self.session.root,
'from_path': format_path(from_path),
'to_path': format_path(to_path),
}
url, params, headers = self.request("/fileops/copy", params)
return self.rest_client.POST(url, params, headers)
def file_create_folder(self, path):
"""Create a folder.
Parameters
path
The path of the new folder.
Returns
A dictionary containing the metadata of the newly created folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#fileops-create-folder
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 403: A folder at that path already exists.
"""
params = {'root': self.session.root, 'path': format_path(path)}
url, params, headers = self.request("/fileops/create_folder", params)
return self.rest_client.POST(url, params, headers)
def file_delete(self, path):
"""Delete a file or folder.
Parameters
path
The path of the file or folder.
Returns
A dictionary containing the metadata of the just deleted file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#fileops-delete
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given path.
"""
params = {'root': self.session.root, 'path': format_path(path)}
url, params, headers = self.request("/fileops/delete", params)
return self.rest_client.POST(url, params, headers)
def file_move(self, from_path, to_path):
"""Move a file or folder to a new location.
Parameters
from_path
The path to the file or folder to be moved.
to_path
The destination path of the file or folder to be moved.
This parameter should include the destination filename (e.g. if
``from_path`` is ``'/test.txt'``, ``to_path`` might be
``'/dir/test.txt'``). If there's already a file at the
``to_path`` it will raise an ErrorResponse.
Returns
A dictionary containing the metadata of the new copy of the file or folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#fileops-move
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 403: An invalid move operation was attempted
(e.g. there is already a file at the given destination,
or moving a shared folder into a shared folder).
- 404: No file was found at given from_path.
- 503: User over storage quota.
"""
params = {'root': self.session.root,
'from_path': format_path(from_path),
'to_path': format_path(to_path)}
url, params, headers = self.request("/fileops/move", params)
return self.rest_client.POST(url, params, headers)
def metadata(self, path, list=True, file_limit=25000, hash=None,
rev=None, include_deleted=False, include_media_info=False):
"""Retrieve metadata for a file or folder.
A typical use would be::
folder_metadata = client.metadata('/')
print "metadata:", folder_metadata
which would return the metadata of the root folder. This
will look something like::
{
'bytes': 0,
'contents': [
{
'bytes': 0,
'icon': 'folder',
'is_dir': True,
'modified': 'Thu, 25 Aug 2011 00:03:15 +0000',
'path': '/Sample Folder',
'rev': '803beb471',
'revision': 8,
'root': 'dropbox',
'size': '0 bytes',
'thumb_exists': False
},
{
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'dropbox',
'size': '77 bytes',
'thumb_exists': False
}
],
'hash': 'efdac89c4da886a9cece1927e6c22977',
'icon': 'folder',
'is_dir': True,
'path': '/',
'root': 'app_folder',
'size': '0 bytes',
'thumb_exists': False
}
In this example, the root folder contains two things: ``Sample Folder``,
which is a folder, and ``/magnum-opus.txt``, which is a text file 77 bytes long
Parameters
path
The path to the file or folder.
list
Whether to list all contained files (only applies when
path refers to a folder).
file_limit
The maximum number of file entries to return within
a folder. If the number of files in the folder exceeds this
limit, an exception is raised. The server will return at max
25,000 files within a folder.
hash
Every folder listing has a hash parameter attached that
can then be passed back into this function later to save on
bandwidth. Rather than returning an unchanged folder's contents,
the server will instead return a 304.
rev
Optional revision of the file to retrieve the metadata for.
This parameter only applies for files. If omitted, you'll receive
the most recent revision metadata.
include_deleted
When listing contained files, include files that have been deleted.
include_media_info
If True, includes additional media info for photos and videos if
available (the time a photo was taken, the GPS coordinates of a photo,
etc.).
Returns
A dictionary containing the metadata of the file or folder
(and contained files if appropriate).
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#metadata
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 304: Current folder hash matches hash parameters, so contents are unchanged.
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at given path.
- 406: Too many file entries to return.
"""
path = "/metadata/%s%s" % (self.session.root, format_path(path))
params = {'file_limit': file_limit,
'list': 'true',
'include_deleted': include_deleted,
'include_media_info': include_media_info,
}
if not list:
params['list'] = 'false'
if hash is not None:
params['hash'] = hash
if rev:
params['rev'] = rev
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
def thumbnail(self, from_path, size='m', format='JPEG'):
"""Download a thumbnail for an image.
Parameters
from_path
The path to the file to be thumbnailed.
size
A string specifying the desired thumbnail size. Currently
supported sizes: ``"xs"`` (32x32), ``"s"`` (64x64), ``"m"`` (128x128),
``"l``" (640x480), ``"xl"`` (1024x768).
Check https://www.dropbox.com/developers/core/docs#thumbnails for
more details.
format
The image format the server should use for the returned
thumbnail data. Either ``"JPEG"`` or ``"PNG"``.
Returns
A :class:`dropbox.rest.RESTResponse` that is the HTTP response for
the API request. It is a file-like object that can be read from. You
must call ``close()`` when you're done.
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given from_path,
or files of that type cannot be thumbnailed.
- 415: Image is invalid and cannot be thumbnailed.
"""
assert format in ['JPEG', 'PNG'], \
"expected a thumbnail format of 'JPEG' or 'PNG', got %s" % format
path = "/thumbnails/%s%s" % (self.session.root, format_path(from_path))
url, params, headers = self.request(path, {'size': size, 'format': format},
method='GET', content_server=True)
return self.rest_client.request("GET", url, headers=headers, raw_response=True)
def thumbnail_and_metadata(self, from_path, size='m', format='JPEG'):
"""Download a thumbnail for an image alongwith its metadata.
Acts as a thin wrapper around thumbnail() (see :meth:`thumbnail()` comments for
more details)
Parameters
from_path
The path to the file to be thumbnailed.
size
A string specifying the desired thumbnail size. See :meth:`thumbnail()`
for details.
format
The image format the server should use for the returned
thumbnail data. Either ``"JPEG"`` or ``"PNG"``.
Returns
A pair of ``(response, metadata)``:
response
A :class:`dropbox.rest.RESTResponse` that is the HTTP response for
the API request. It is a file-like object that can be read from. You
must call ``close()`` when you're done.
metadata
A dictionary containing the metadata of the file whose thumbnail
was downloaded (see https://www.dropbox.com/developers/core/docs#metadata
for details).
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given from_path,
or files of that type cannot be thumbnailed.
- 415: Image is invalid and cannot be thumbnailed.
- 200: Request was okay but response was malformed in some way.
"""
thumbnail_res = self.thumbnail(from_path, size, format)
metadata = DropboxClient.__parse_metadata_as_dict(thumbnail_res)
return thumbnail_res, metadata
def search(self, path, query, file_limit=1000, include_deleted=False):
"""Search folder for filenames matching query.
Parameters
path
The folder to search within.
query
The query to search on (minimum 3 characters).
file_limit
The maximum number of file entries to return within a folder.
The server will return at max 1,000 files.
include_deleted
Whether to include deleted files in search results.
Returns
A list of the metadata of all matching files (up to
file_limit entries). For a detailed description of what
this call returns, visit:
https://www.dropbox.com/developers/core/docs#search
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
"""
path = "/search/%s%s" % (self.session.root, format_path(path))
params = {
'query': query,
'file_limit': file_limit,
'include_deleted': include_deleted,
}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def revisions(self, path, rev_limit=1000):
"""Retrieve revisions of a file.
Parameters
path
The file to fetch revisions for. Note that revisions
are not available for folders.
rev_limit
The maximum number of file entries to return within
a folder. The server will return at max 1,000 revisions.
Returns
A list of the metadata of all matching files (up to rev_limit entries).
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#revisions
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No revisions were found at the given path.
"""
path = "/revisions/%s%s" % (self.session.root, format_path(path))
params = {
'rev_limit': rev_limit,
}
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
def restore(self, path, rev):
"""Restore a file to a previous revision.
Parameters
path
The file to restore. Note that folders can't be restored.
rev
A previous rev value of the file to be restored to.
Returns
A dictionary containing the metadata of the newly restored file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#restore
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: Unable to find the file at the given revision.
"""
path = "/restore/%s%s" % (self.session.root, format_path(path))
params = {
'rev': rev,
}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def media(self, path):
"""Get a temporary unauthenticated URL for a media file.
All of Dropbox's API methods require OAuth, which may cause problems in
situations where an application expects to be able to hit a URL multiple times
(for example, a media player seeking around a video file). This method
creates a time-limited URL that can be accessed without any authentication,
and returns that to you, along with an expiration time.
Parameters
path
The file to return a URL for. Folders are not supported.
Returns
A dictionary that looks like the following example::
{'url': 'https://dl.dropboxusercontent.com/1/view/abcdefghijk/example',
'expires': 'Thu, 16 Sep 2011 01:01:25 +0000'}
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#media
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: Unable to find the file at the given path.
"""
path = "/media/%s%s" % (self.session.root, format_path(path))
url, params, headers = self.request(path, method='GET')
return self.rest_client.GET(url, headers)
def share(self, path, short_url=True):
"""Create a shareable link to a file or folder.
Shareable links created on Dropbox are time-limited, but don't require any
authentication, so they can be given out freely. The time limit should allow
at least a day of shareability, though users have the ability to disable
a link from their account if they like.
Parameters
path
The file or folder to share.
Returns
A dictionary that looks like the following example::
{'url': u'https://db.tt/c0mFuu1Y', 'expires': 'Tue, 01 Jan 2030 00:00:00 +0000'}
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#shares
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: Unable to find the file at the given path.
"""
path = "/shares/%s%s" % (self.session.root, format_path(path))
params = {
'short_url': short_url,
}
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
class ChunkedUploader(object):
"""Contains the logic around a chunked upload, which uploads a
large file to Dropbox via the /chunked_upload endpoint.
"""
def __init__(self, client, file_obj, length):
self.client = client
self.offset = 0
self.upload_id = None
self.last_block = None
self.file_obj = file_obj
self.target_length = length
def upload_chunked(self, chunk_size = 4 * 1024 * 1024):
"""Uploads data from this ChunkedUploader's file_obj in chunks, until
an error occurs. Throws an exception when an error occurs, and can
be called again to resume the upload.
Parameters
chunk_size
The number of bytes to put in each chunk. (Default 4 MB.)
"""
while self.offset < self.target_length:
next_chunk_size = min(chunk_size, self.target_length - self.offset)
if self.last_block == None:
self.last_block = self.file_obj.read(next_chunk_size)
try:
(self.offset, self.upload_id) = self.client.upload_chunk(
StringIO(self.last_block), next_chunk_size, self.offset, self.upload_id)
self.last_block = None
except ErrorResponse as e:
# Handle the case where the server tells us our offset is wrong.
must_reraise = True
if e.status == 400:
reply = e.body
if "offset" in reply and reply['offset'] != 0 and reply['offset'] > self.offset:
self.last_block = None
self.offset = reply['offset']
must_reraise = False
if must_reraise:
raise
def finish(self, path, overwrite=False, parent_rev=None):
"""Commits the bytes uploaded by this ChunkedUploader to a file
in the users dropbox.
Parameters
path
The full path of the file in the Dropbox.
overwrite
Whether to overwrite an existing file at the given path. (Default ``False``.)
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
parent_rev
Optional rev field from the 'parent' of this upload.
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most recent parent_rev,
and it will never be overwritten if you send a less recent one.
"""
path = "/commit_chunked_upload/%s%s" % (self.client.session.root, format_path(path))
params = dict(
overwrite = bool(overwrite),
upload_id = self.upload_id
)
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.client.request(path, params, content_server=True)
return self.client.rest_client.POST(url, params, headers)
# Allow access of ChunkedUploader via DropboxClient for backwards compatibility.
DropboxClient.ChunkedUploader = ChunkedUploader
class DropboxOAuth2FlowBase(object):
def __init__(self, consumer_key, consumer_secret, locale=None, rest_client=RESTClient):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.locale = locale
self.rest_client = rest_client
def _get_authorize_url(self, redirect_uri, state):
params = dict(response_type='code',
client_id=self.consumer_key)
if redirect_uri is not None:
params['redirect_uri'] = redirect_uri
if state is not None:
params['state'] = state
return self.build_url(BaseSession.WEB_HOST, '/oauth2/authorize', params)
def _finish(self, code, redirect_uri):
url = self.build_url(BaseSession.API_HOST, '/oauth2/token')
params = {'grant_type': 'authorization_code',
'code': code,
'client_id': self.consumer_key,
'client_secret': self.consumer_secret,
}
if self.locale is not None:
params['locale'] = self.locale
if redirect_uri is not None:
params['redirect_uri'] = redirect_uri
response = self.rest_client.POST(url, params=params)
access_token = response["access_token"]
user_id = response["uid"]
return access_token, user_id
def build_path(self, target, params=None):
"""Build the path component for an API URL.
This method urlencodes the parameters, adds them
to the end of the target url, and puts a marker for the API
version in front.
Parameters
target
A target url (e.g. '/files') to build upon.
params
Optional dictionary of parameters (name to value).
Returns
The path and parameters components of an API URL.
"""
if sys.version_info < (3,) and type(target) == str:
target = target.encode("utf8")
target_path = urllib.parse.quote(target)
params = params or {}
params = params.copy()
if self.locale:
params['locale'] = self.locale
if params:
query_string = params_to_urlencoded(params)
return "/%s%s?%s" % (BaseSession.API_VERSION, target_path, query_string)
else:
return "/%s%s" % (BaseSession.API_VERSION, target_path)
def build_url(self, host, target, params=None):
"""Build an API URL.
This method adds scheme and hostname to the path
returned from build_path.
Parameters
target
A target url (e.g. '/files') to build upon.
params
Optional dictionary of parameters (name to value).
Returns
The full API URL.
"""
return "https://%s%s" % (host, self.build_path(target, params))
class DropboxOAuth2FlowNoRedirect(DropboxOAuth2FlowBase):
"""
OAuth 2 authorization helper for apps that can't provide a redirect URI
(such as the command-line example apps).
Example::
from dropbox.client import DropboxOAuth2FlowNoRedirect, DropboxClient
from dropbox import rest as dbrest
auth_flow = DropboxOAuth2FlowNoRedirect(APP_KEY, APP_SECRET)
authorize_url = auth_flow.start()
print "1. Go to: " + authorize_url
print "2. Click \\"Allow\\" (you might have to log in first)."
print "3. Copy the authorization code."
auth_code = raw_input("Enter the authorization code here: ").strip()
try:
access_token, user_id = auth_flow.finish(auth_code)
except dbrest.ErrorResponse, e:
print('Error: %s' % (e,))
return
c = DropboxClient(access_token)
"""
def __init__(self, consumer_key, consumer_secret, locale=None, rest_client=None):
"""
Construct an instance.
Parameters
consumer_key
Your API app's "app key"
consumer_secret
Your API app's "app secret"
locale
The locale of the user of your application. For example "en" or "en_US".
Some API calls return localized data and error messages; this setting
tells the server which locale to use. By default, the server uses "en_US".
rest_client
Optional :class:`dropbox.rest.RESTClient`-like object to use for making
requests.
"""
if rest_client is None: rest_client = RESTClient
super(DropboxOAuth2FlowNoRedirect, self).__init__(consumer_key, consumer_secret,
locale, rest_client)
def start(self):
"""
Starts the OAuth 2 authorization process.
Returns
The URL for a page on Dropbox's website. This page will let the user "approve"
your app, which gives your app permission to access the user's Dropbox account.
Tell the user to visit this URL and approve your app.
"""
return self._get_authorize_url(None, None)
def finish(self, code):
"""
If the user approves your app, they will be presented with an "authorization code". Have
the user copy/paste that authorization code into your app and then call this method to
get an access token.
Parameters
code
The authorization code shown to the user when they approved your app.
Returns
A pair of ``(access_token, user_id)``. ``access_token`` is a string that
can be passed to DropboxClient. ``user_id`` is the Dropbox user ID (string) of the
user that just approved your app.
Raises
The same exceptions as :meth:`DropboxOAuth2Flow.finish()`.
"""
return self._finish(code, None)
class DropboxOAuth2Flow(DropboxOAuth2FlowBase):
"""
OAuth 2 authorization helper. Use this for web apps.
OAuth 2 has a two-step authorization process. The first step is having the user authorize
your app. The second involves getting an OAuth 2 access token from Dropbox.
Example::
from dropbox.client import DropboxOAuth2Flow, DropboxClient
def get_dropbox_auth_flow(web_app_session):
redirect_uri = "https://my-web-server.org/dropbox-auth-finish")
return DropboxOAuth2Flow(APP_KEY, APP_SECRET, redirect_uri,
web_app_session, "dropbox-auth-csrf-token")
# URL handler for /dropbox-auth-start
def dropbox_auth_start(web_app_session, request):
authorize_url = get_dropbox_auth_flow(web_app_session).start()
redirect_to(authorize_url)
# URL handler for /dropbox-auth-finish
def dropbox_auth_finish(web_app_session, request):
try:
access_token, user_id, url_state = \\
get_dropbox_auth_flow(web_app_session).finish(request.query_params)
except DropboxOAuth2Flow.BadRequestException, e:
http_status(400)
except DropboxOAuth2Flow.BadStateException, e:
# Start the auth flow again.
redirect_to("/dropbox-auth-start")
except DropboxOAuth2Flow.CsrfException, e:
http_status(403)
except DropboxOAuth2Flow.NotApprovedException, e:
flash('Not approved? Why not?')
return redirect_to("/home")
except DropboxOAuth2Flow.ProviderException, e:
logger.log("Auth error: %s" % (e,))
http_status(403)
"""
def __init__(self, consumer_key, consumer_secret, redirect_uri, session,
csrf_token_session_key, locale=None, rest_client=None):
"""
Construct an instance.
Parameters
consumer_key
Your API app's "app key".
consumer_secret
Your API app's "app secret".
redirect_uri
The URI that the Dropbox server will redirect the user to after the user
finishes authorizing your app. This URI must be HTTPS-based and pre-registered with
the Dropbox servers, though localhost URIs are allowed without pre-registration and can
be either HTTP or HTTPS.
session
A dict-like object that represents the current user's web session (will be
used to save the CSRF token).
csrf_token_session_key
The key to use when storing the CSRF token in the session (for
example: "dropbox-auth-csrf-token").
locale
The locale of the user of your application. For example "en" or "en_US".
Some API calls return localized data and error messages; this setting
tells the server which locale to use. By default, the server uses "en_US".
rest_client
Optional :class:`dropbox.rest.RESTClient`-like object to use for making
requests.
"""
if rest_client is None: rest_client = RESTClient
super(DropboxOAuth2Flow, self).__init__(consumer_key, consumer_secret, locale, rest_client)
self.redirect_uri = redirect_uri
self.session = session
self.csrf_token_session_key = csrf_token_session_key
def start(self, url_state=None):
"""
Starts the OAuth 2 authorization process.
This function builds an "authorization URL". You should redirect your user's browser to
this URL, which will give them an opportunity to grant your app access to their Dropbox
account. When the user completes this process, they will be automatically redirected to
the ``redirect_uri`` you passed in to the constructor.
This function will also save a CSRF token to ``session[csrf_token_session_key]`` (as
provided to the constructor). This CSRF token will be checked on :meth:`finish()` to
prevent request forgery.
Parameters
url_state
Any data that you would like to keep in the URL through the
authorization process. This exact value will be returned to you by :meth:`finish()`.
Returns
The URL for a page on Dropbox's website. This page will let the user "approve"
your app, which gives your app permission to access the user's Dropbox account.
Tell the user to visit this URL and approve your app.
"""
csrf_token = base64.urlsafe_b64encode(os.urandom(16))
state = csrf_token
if url_state is not None:
state += "|" + url_state
self.session[self.csrf_token_session_key] = csrf_token
return self._get_authorize_url(self.redirect_uri, state)
def finish(self, query_params):
"""
Call this after the user has visited the authorize URL (see :meth:`start()`), approved your
app and was redirected to your redirect URI.
Parameters
query_params
The query parameters on the GET request to your redirect URI.
Returns
A tuple of ``(access_token, user_id, url_state)``. ``access_token`` can be used to
construct a :class:`DropboxClient`. ``user_id`` is the Dropbox user ID (string) of the
user that just approved your app. ``url_state`` is the value you originally passed in to
:meth:`start()`.
Raises
:class:`BadRequestException`
If the redirect URL was missing parameters or if the given parameters were not valid.
:class:`BadStateException`
If there's no CSRF token in the session.
:class:`CsrfException`
If the ``'state'`` query parameter doesn't contain the CSRF token from the user's
session.
:class:`NotApprovedException`
If the user chose not to approve your app.
:class:`ProviderException`
If Dropbox redirected to your redirect URI with some unexpected error identifier
and error message.
"""
csrf_token_from_session = self.session[self.csrf_token_session_key]
# Check well-formedness of request.
state = query_params.get('state')
if state is None:
raise self.BadRequestException("Missing query parameter 'state'.")
error = query_params.get('error')
error_description = query_params.get('error_description')
code = query_params.get('code')
if error is not None and code is not None:
raise self.BadRequestException("Query parameters 'code' and 'error' are both set; "
" only one must be set.")
if error is None and code is None:
raise self.BadRequestException("Neither query parameter 'code' or 'error' is set.")
# Check CSRF token
if csrf_token_from_session is None:
raise self.BadStateError("Missing CSRF token in session.")
if len(csrf_token_from_session) <= 20:
raise AssertionError("CSRF token unexpectedly short: %r" % (csrf_token_from_session,))
split_pos = state.find('|')
if split_pos < 0:
given_csrf_token = state
url_state = None
else:
given_csrf_token = state[0:split_pos]
url_state = state[split_pos+1:]
if not _safe_equals(csrf_token_from_session, given_csrf_token):
raise self.CsrfException("expected %r, got %r" % (csrf_token_from_session,
given_csrf_token))
del self.session[self.csrf_token_session_key]
# Check for error identifier
if error is not None:
if error == 'access_denied':
# The user clicked "Deny"
if error_description is None:
raise self.NotApprovedException("No additional description from Dropbox")
else:
raise self.NotApprovedException("Additional description from Dropbox: " +
error_description)
else:
# All other errors
full_message = error
if error_description is not None:
full_message += ": " + error_description
raise self.ProviderError(full_message)
# If everything went ok, make the network call to get an access token.
access_token, user_id = self._finish(code, self.redirect_uri)
return access_token, user_id, url_state
class BadRequestException(Exception):
"""
Thrown if the redirect URL was missing parameters or if the
given parameters were not valid.
The recommended action is to show an HTTP 400 error page.
"""
pass
class BadStateException(Exception):
"""
Thrown if all the parameters are correct, but there's no CSRF token in the session. This
probably means that the session expired.
The recommended action is to redirect the user's browser to try the approval process again.
"""
pass
class CsrfException(Exception):
"""
Thrown if the given 'state' parameter doesn't contain the CSRF
token from the user's session.
This is blocked to prevent CSRF attacks.
The recommended action is to respond with an HTTP 403 error page.
"""
pass
class NotApprovedException(Exception):
"""
The user chose not to approve your app.
"""
pass
class ProviderException(Exception):
"""
Dropbox redirected to your redirect URI with some unexpected error identifier and error
message.
The recommended action is to log the error, tell the user something went wrong, and let
them try again.
"""
pass
def _safe_equals(a, b):
if len(a) != len(b): return False
res = 0
for ca, cb in zip(a, b):
res |= ord(ca) ^ ord(cb)
return res == 0
_OAUTH2_ACCESS_TOKEN_PATTERN = re.compile(r'\A[-_~/A-Za-z0-9\.\+]+=*\Z')
# From the "Bearer" token spec, RFC 6750.
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
import base64
import re
import os
import sys
import urllib.request, urllib.parse, urllib.error
PY3 = sys.version_info[0] == 3
if PY3:
from io import StringIO
str = str
else:
from io import StringIO
try:
import json
except ImportError:
import simplejson as json
from .rest import ErrorResponse, RESTClient, params_to_urlencoded
from .session import BaseSession, DropboxSession, DropboxOAuth2Session
def format_path(path):
"""Normalize path for use with the Dropbox API.
This function turns multiple adjacent slashes into single
slashes, then ensures that there's a leading slash but
not a trailing slash.
"""
if not path:
return path
path = re.sub(r'/+', '/', path)
if path == '/':
return ("" if isinstance(path, str) else "")
else:
return '/' + path.strip('/')
class DropboxClient(object):
"""
This class lets you make Dropbox API calls. You'll need to obtain an
OAuth 2 access token first. You can get an access token using either
:class:`DropboxOAuth2Flow` or :class:`DropboxOAuth2FlowNoRedirect`.
All of the API call methods can raise a :class:`dropbox.rest.ErrorResponse` exception if
the server returns a non-200 or invalid HTTP response. Note that a 401
return status at any point indicates that the access token you're using
is no longer valid and the user must be put through the OAuth 2
authorization flow again.
"""
def __init__(self, oauth2_access_token, locale=None, rest_client=None):
"""Construct a ``DropboxClient`` instance.
Parameters
oauth2_access_token
An OAuth 2 access token (string). For backwards compatibility this may
also be a DropboxSession object (see :meth:`create_oauth2_access_token()`).
locale
The locale of the user of your application. For example "en" or "en_US".
Some API calls return localized data and error messages; this setting
tells the server which locale to use. By default, the server uses "en_US".
rest_client
Optional :class:`dropbox.rest.RESTClient`-like object to use for making
requests.
"""
if rest_client is None: rest_client = RESTClient
if isinstance(oauth2_access_token, str):
if not _OAUTH2_ACCESS_TOKEN_PATTERN.match(oauth2_access_token):
raise ValueError("invalid format for oauth2_access_token: %r"
% (oauth2_access_token,))
self.session = DropboxOAuth2Session(oauth2_access_token, locale)
elif isinstance(oauth2_access_token, DropboxSession):
# Backwards compatibility with OAuth 1
if locale is not None:
raise ValueError("The 'locale' parameter to DropboxClient is only useful "
"when also passing in an OAuth 2 access token")
self.session = oauth2_access_token
else:
raise ValueError("'oauth2_access_token' must either be a string or a DropboxSession")
self.rest_client = rest_client
def request(self, target, params=None, method='POST',
content_server=False, notification_server=False):
"""
An internal method that builds the url, headers, and params for a Dropbox API request.
It is exposed if you need to make API calls not implemented in this library or if you
need to debug requests.
Parameters
target
The target URL with leading slash (e.g. '/files').
params
A dictionary of parameters to add to the request.
method
An HTTP method (e.g. 'GET' or 'POST').
content_server
A boolean indicating whether the request is to the
API content server, for example to fetch the contents of a file
rather than its metadata.
notification_server
A boolean indicating whether the request is to the API notification
server, for example for longpolling.
Returns
A tuple of ``(url, params, headers)`` that should be used to make the request.
OAuth will be added as needed within these fields.
"""
assert method in ['GET','POST', 'PUT'], "Only 'GET', 'POST', and 'PUT' are allowed."
assert not (content_server and notification_server), \
"Cannot construct request simultaneously for content and notification servers."
if params is None:
params = {}
if content_server:
host = self.session.API_CONTENT_HOST
elif notification_server:
host = self.session.API_NOTIFICATION_HOST
else:
host = self.session.API_HOST
base = self.session.build_url(host, target)
headers, params = self.session.build_access_headers(method, base, params)
if method in ('GET', 'PUT'):
url = self.session.build_url(host, target, params)
else:
url = self.session.build_url(host, target)
return url, params, headers
def account_info(self):
"""Retrieve information about the user's account.
Returns
A dictionary containing account information.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#account-info
"""
url, params, headers = self.request("/account/info", method='GET')
return self.rest_client.GET(url, headers)
def disable_access_token(self):
"""
Disable the access token that this ``DropboxClient`` is using. If this call
succeeds, further API calls using this object will fail.
"""
url, params, headers = self.request("/disable_access_token", method='POST')
return self.rest_client.POST(url, params, headers)
def create_oauth2_access_token(self):
"""
If this ``DropboxClient`` was created with an OAuth 1 access token, this method
can be used to create an equivalent OAuth 2 access token. This can be used to
upgrade your app's existing access tokens from OAuth 1 to OAuth 2.
Example::
from dropbox.client import DropboxClient
from dropbox.session import DropboxSession
session = DropboxSession(APP_KEY, APP_SECRET)
access_key, access_secret = '123abc', 'xyz456' # Previously obtained OAuth 1 credentials
session.set_token(access_key, access_secret)
client = DropboxClient(session)
token = client.create_oauth2_access_token()
# Optionally, create a new client using the new token
new_client = DropboxClient(token)
"""
if not isinstance(self.session, DropboxSession):
raise ValueError("This call requires a DropboxClient that is configured with an "
"OAuth 1 access token.")
url, params, headers = self.request("/oauth2/token_from_oauth1", method='POST')
r = self.rest_client.POST(url, params, headers)
return r['access_token']
def get_chunked_uploader(self, file_obj, length):
"""Creates a :class:`ChunkedUploader` to upload the given file-like object.
Parameters
file_obj
The file-like object which is the source of the data
being uploaded.
length
The number of bytes to upload.
The expected use of this function is as follows::
bigFile = open("data.txt", 'rb')
uploader = myclient.get_chunked_uploader(bigFile, size)
print "uploading: ", size
while uploader.offset < size:
try:
upload = uploader.upload_chunked()
except rest.ErrorResponse, e:
# perform error handling and retry logic
uploader.finish('/bigFile.txt')
The SDK leaves the error handling and retry logic to the developer
to implement, as the exact requirements will depend on the application
involved.
"""
return ChunkedUploader(self, file_obj, length)
def upload_chunk(self, file_obj, length=None, offset=0, upload_id=None):
"""Uploads a single chunk of data from a string or file-like object. The majority of users
should use the :class:`ChunkedUploader` object, which provides a simpler interface to the
chunked_upload API endpoint.
Parameters
file_obj
The source of the chunk to upload; a file-like object or a string.
length
This argument is ignored but still present for backward compatibility reasons.
offset
The byte offset to which this source data corresponds in the original file.
upload_id
The upload identifier for which this chunk should be uploaded,
returned by a previous call, or None to start a new upload.
Returns
A dictionary containing the keys:
upload_id
A string used to identify the upload for subsequent calls to :meth:`upload_chunk()`
and :meth:`commit_chunked_upload()`.
offset
The offset at which the next upload should be applied.
expires
The time after which this partial upload is invalid.
"""
params = dict()
if upload_id:
params['upload_id'] = upload_id
params['offset'] = offset
url, ignored_params, headers = self.request("/chunked_upload", params,
method='PUT', content_server=True)
try:
reply = self.rest_client.PUT(url, file_obj, headers)
return reply['offset'], reply['upload_id']
except ErrorResponse as e:
raise e
def commit_chunked_upload(self, full_path, upload_id, overwrite=False, parent_rev=None):
"""Commit the previously uploaded chunks for the given path.
Parameters
full_path
The full path to which the chunks are uploaded, *including the file name*.
If the destination folder does not yet exist, it will be created.
upload_id
The chunked upload identifier, previously returned from upload_chunk.
overwrite
Whether to overwrite an existing file at the given path. (Default ``False``.)
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
parent_rev
Optional rev field from the 'parent' of this upload.
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most recent parent_rev,
and it will never be overwritten if you send a less recent one.
Returns
A dictionary containing the metadata of the newly committed file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#commit-chunked-upload
"""
params = {
'upload_id': upload_id,
'overwrite': overwrite,
}
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.request("/commit_chunked_upload/%s" % full_path,
params, content_server=True)
return self.rest_client.POST(url, params, headers)
def put_file(self, full_path, file_obj, overwrite=False, parent_rev=None):
"""Upload a file.
A typical use case would be as follows::
f = open('working-draft.txt', 'rb')
response = client.put_file('/magnum-opus.txt', f)
print "uploaded:", response
which would return the metadata of the uploaded file, similar to::
{
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'dropbox',
'size': '77 bytes',
'thumb_exists': False
}
Parameters
full_path
The full path to upload the file to, *including the file name*.
If the destination folder does not yet exist, it will be created.
file_obj
A file-like object to upload. If you would like, you can pass a string as file_obj.
overwrite
Whether to overwrite an existing file at the given path. (Default ``False``.)
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
parent_rev
Optional rev field from the 'parent' of this upload.
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most recent parent_rev,
and it will never be overwritten if you send a less recent one.
Returns
A dictionary containing the metadata of the newly uploaded file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#files-put
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 503: User over quota.
"""
path = "/files_put/%s%s" % (self.session.root, format_path(full_path))
params = {
'overwrite': bool(overwrite),
}
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.request(path, params, method='PUT', content_server=True)
return self.rest_client.PUT(url, file_obj, headers)
def get_file(self, from_path, rev=None, start=None, length=None):
"""Download a file.
Example::
out = open('magnum-opus.txt', 'wb')
with client.get_file('/magnum-opus.txt') as f:
out.write(f.read())
which would download the file ``magnum-opus.txt`` and write the contents into
the file ``magnum-opus.txt`` on the local filesystem.
Parameters
from_path
The path to the file to be downloaded.
rev
Optional previous rev value of the file to be downloaded.
start
Optional byte value from which to start downloading.
length
Optional length in bytes for partially downloading the file. If ``length`` is
specified but ``start`` is not, then the last ``length`` bytes will be downloaded.
Returns
A :class:`dropbox.rest.RESTResponse` that is the HTTP response for
the API request. It is a file-like object that can be read from. You
must call ``close()`` when you're done.
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given path, or the file that was there was deleted.
- 200: Request was okay but response was malformed in some way.
"""
path = "/files/%s%s" % (self.session.root, format_path(from_path))
params = {}
if rev is not None:
params['rev'] = rev
url, params, headers = self.request(path, params, method='GET', content_server=True)
if start is not None:
if length:
headers['Range'] = 'bytes=%s-%s' % (start, start + length - 1)
else:
headers['Range'] = 'bytes=%s-' % start
elif length is not None:
headers['Range'] = 'bytes=-%s' % length
return self.rest_client.request("GET", url, headers=headers, raw_response=True)
def get_file_and_metadata(self, from_path, rev=None):
"""Download a file alongwith its metadata.
Acts as a thin wrapper around get_file() (see :meth:`get_file()` comments for
more details)
A typical usage looks like this::
out = open('magnum-opus.txt', 'wb')
f, metadata = client.get_file_and_metadata('/magnum-opus.txt')
with f:
out.write(f.read())
Parameters
from_path
The path to the file to be downloaded.
rev
Optional previous rev value of the file to be downloaded.
Returns
A pair of ``(response, metadata)``:
response
A :class:`dropbox.rest.RESTResponse` that is the HTTP response for
the API request. It is a file-like object that can be read from. You
must call ``close()`` when you're done.
metadata
A dictionary containing the metadata of the file (see
https://www.dropbox.com/developers/core/docs#metadata for details).
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given path, or the file that was there was deleted.
- 200: Request was okay but response was malformed in some way.
"""
file_res = self.get_file(from_path, rev)
metadata = DropboxClient.__parse_metadata_as_dict(file_res)
return file_res, metadata
@staticmethod
def __parse_metadata_as_dict(dropbox_raw_response):
# Parses file metadata from a raw dropbox HTTP response, raising a
# dropbox.rest.ErrorResponse if parsing fails.
metadata = None
for header, header_val in dropbox_raw_response.getheaders().items():
if header.lower() == 'x-dropbox-metadata':
try:
metadata = json.loads(header_val)
except ValueError:
raise ErrorResponse(dropbox_raw_response)
if not metadata: raise ErrorResponse(dropbox_raw_response)
return metadata
def delta(self, cursor=None, path_prefix=None, include_media_info=False):
"""A way of letting you keep up with changes to files and folders in a
user's Dropbox. You can periodically call delta() to get a list of "delta
entries", which are instructions on how to update your local state to
match the server's state.
Parameters
cursor
On the first call, omit this argument (or pass in ``None``). On
subsequent calls, pass in the ``cursor`` string returned by the previous
call.
path_prefix
If provided, results will be limited to files and folders
whose paths are equal to or under ``path_prefix``. The ``path_prefix`` is
fixed for a given cursor. Whatever ``path_prefix`` you use on the first
``delta()`` must also be passed in on subsequent calls that use the returned
cursor.
include_media_info
If True, delta will return additional media info for photos and videos
(the time a photo was taken, the GPS coordinates of a photo, etc.). There
is a delay between when a file is uploaded to Dropbox and when this
information is available; delta will only include a file in the changelist
once its media info is ready. The value you use on the first ``delta()`` must
also be passed in on subsequent calls that use the returned cursor.
Returns
A dict with four keys:
entries
A list of "delta entries" (described below).
reset
If ``True``, you should your local state to be an empty folder
before processing the list of delta entries. This is only ``True`` only
in rare situations.
cursor
A string that is used to keep track of your current state.
On the next call to delta(), pass in this value to return entries
that were recorded since the cursor was returned.
has_more
If ``True``, then there are more entries available; you can
call delta() again immediately to retrieve those entries. If ``False``,
then wait at least 5 minutes (preferably longer) before checking again.
Delta Entries: Each entry is a 2-item list of one of following forms:
- [*path*, *metadata*]: Indicates that there is a file/folder at the given
path. You should add the entry to your local path. (The *metadata*
value is the same as what would be returned by the ``metadata()`` call.)
- If the new entry includes parent folders that don't yet exist in your
local state, create those parent folders in your local state. You
will eventually get entries for those parent folders.
- If the new entry is a file, replace whatever your local state has at
*path* with the new entry.
- If the new entry is a folder, check what your local state has at
*path*. If it's a file, replace it with the new entry. If it's a
folder, apply the new *metadata* to the folder, but do not modify
the folder's children.
- [*path*, ``None``]: Indicates that there is no file/folder at the *path* on
Dropbox. To update your local state to match, delete whatever is at *path*,
including any children (you will sometimes also get "delete" delta entries
for the children, but this is not guaranteed). If your local state doesn't
have anything at *path*, ignore this entry.
Remember: Dropbox treats file names in a case-insensitive but case-preserving
way. To facilitate this, the *path* strings above are lower-cased versions of
the actual path. The *metadata* dicts have the original, case-preserved path.
"""
path = "/delta"
params = {'include_media_info': include_media_info}
if cursor is not None:
params['cursor'] = cursor
if path_prefix is not None:
params['path_prefix'] = path_prefix
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def longpoll_delta(self, cursor, timeout=None):
"""A long-poll endpoint to wait for changes on an account. In conjunction with
:meth:`delta()`, this call gives you a low-latency way to monitor an account for
file changes.
Note that this call goes to ``api-notify.dropbox.com`` instead of ``api.dropbox.com``.
Unlike most other API endpoints, this call does not require OAuth authentication.
The passed-in cursor can only be acquired via an authenticated call to :meth:`delta()`.
Parameters
cursor
A delta cursor as returned from a call to :meth:`delta()`. Note that a cursor
returned from a call to :meth:`delta()` with ``include_media_info=True`` is
incompatible with ``longpoll_delta()`` and an error will be returned.
timeout
An optional integer indicating a timeout, in seconds. The default value is
30 seconds, which is also the minimum allowed value. The maximum is 480
seconds. The request will block for at most this length of time, plus up
to 90 seconds of random jitter added to avoid the thundering herd problem.
Care should be taken when using this parameter, as some network
infrastructure does not support long timeouts.
Returns
The connection will block until there are changes available or a timeout occurs.
The response will be a dictionary that looks like the following example::
{"changes": false, "backoff": 60}
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#longpoll-delta
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (generally due to an invalid parameter; check e.error for details).
"""
path = "/longpoll_delta"
params = {'cursor': cursor}
if timeout is not None:
params['timeout'] = timeout
url, params, headers = self.request(path, params, method='GET', notification_server=True)
return self.rest_client.GET(url, headers)
def create_copy_ref(self, from_path):
"""Creates and returns a copy ref for a specific file. The copy ref can be
used to instantly copy that file to the Dropbox of another account.
Parameters
path
The path to the file for a copy ref to be created on.
Returns
A dictionary that looks like the following example::
{"expires": "Fri, 31 Jan 2042 21:01:05 +0000", "copy_ref": "z1X6ATl6aWtzOGq0c3g5Ng"}
"""
path = "/copy_ref/%s%s" % (self.session.root, format_path(from_path))
url, params, headers = self.request(path, {}, method='GET')
return self.rest_client.GET(url, headers)
def add_copy_ref(self, copy_ref, to_path):
"""Adds the file referenced by the copy ref to the specified path
Parameters
copy_ref
A copy ref string that was returned from a create_copy_ref call.
The copy_ref can be created from any other Dropbox account, or from the same account.
path
The path to where the file will be created.
Returns
A dictionary containing the metadata of the new copy of the file.
"""
path = "/fileops/copy"
params = {'from_copy_ref': copy_ref,
'to_path': format_path(to_path),
'root': self.session.root}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def file_copy(self, from_path, to_path):
"""Copy a file or folder to a new location.
Parameters
from_path
The path to the file or folder to be copied.
to_path
The destination path of the file or folder to be copied.
This parameter should include the destination filename (e.g.
from_path: '/test.txt', to_path: '/dir/test.txt'). If there's
already a file at the to_path it will raise an ErrorResponse.
Returns
A dictionary containing the metadata of the new copy of the file or folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#fileops-copy
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 403: An invalid copy operation was attempted
(e.g. there is already a file at the given destination,
or trying to copy a shared folder).
- 404: No file was found at given from_path.
- 503: User over storage quota.
"""
params = {'root': self.session.root,
'from_path': format_path(from_path),
'to_path': format_path(to_path),
}
url, params, headers = self.request("/fileops/copy", params)
return self.rest_client.POST(url, params, headers)
def file_create_folder(self, path):
"""Create a folder.
Parameters
path
The path of the new folder.
Returns
A dictionary containing the metadata of the newly created folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#fileops-create-folder
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 403: A folder at that path already exists.
"""
params = {'root': self.session.root, 'path': format_path(path)}
url, params, headers = self.request("/fileops/create_folder", params)
return self.rest_client.POST(url, params, headers)
def file_delete(self, path):
"""Delete a file or folder.
Parameters
path
The path of the file or folder.
Returns
A dictionary containing the metadata of the just deleted file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#fileops-delete
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given path.
"""
params = {'root': self.session.root, 'path': format_path(path)}
url, params, headers = self.request("/fileops/delete", params)
return self.rest_client.POST(url, params, headers)
def file_move(self, from_path, to_path):
"""Move a file or folder to a new location.
Parameters
from_path
The path to the file or folder to be moved.
to_path
The destination path of the file or folder to be moved.
This parameter should include the destination filename (e.g. if
``from_path`` is ``'/test.txt'``, ``to_path`` might be
``'/dir/test.txt'``). If there's already a file at the
``to_path`` it will raise an ErrorResponse.
Returns
A dictionary containing the metadata of the new copy of the file or folder.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#fileops-move
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 403: An invalid move operation was attempted
(e.g. there is already a file at the given destination,
or moving a shared folder into a shared folder).
- 404: No file was found at given from_path.
- 503: User over storage quota.
"""
params = {'root': self.session.root,
'from_path': format_path(from_path),
'to_path': format_path(to_path)}
url, params, headers = self.request("/fileops/move", params)
return self.rest_client.POST(url, params, headers)
def metadata(self, path, list=True, file_limit=25000, hash=None,
rev=None, include_deleted=False, include_media_info=False):
"""Retrieve metadata for a file or folder.
A typical use would be::
folder_metadata = client.metadata('/')
print "metadata:", folder_metadata
which would return the metadata of the root folder. This
will look something like::
{
'bytes': 0,
'contents': [
{
'bytes': 0,
'icon': 'folder',
'is_dir': True,
'modified': 'Thu, 25 Aug 2011 00:03:15 +0000',
'path': '/Sample Folder',
'rev': '803beb471',
'revision': 8,
'root': 'dropbox',
'size': '0 bytes',
'thumb_exists': False
},
{
'bytes': 77,
'icon': 'page_white_text',
'is_dir': False,
'mime_type': 'text/plain',
'modified': 'Wed, 20 Jul 2011 22:04:50 +0000',
'path': '/magnum-opus.txt',
'rev': '362e2029684fe',
'revision': 221922,
'root': 'dropbox',
'size': '77 bytes',
'thumb_exists': False
}
],
'hash': 'efdac89c4da886a9cece1927e6c22977',
'icon': 'folder',
'is_dir': True,
'path': '/',
'root': 'app_folder',
'size': '0 bytes',
'thumb_exists': False
}
In this example, the root folder contains two things: ``Sample Folder``,
which is a folder, and ``/magnum-opus.txt``, which is a text file 77 bytes long
Parameters
path
The path to the file or folder.
list
Whether to list all contained files (only applies when
path refers to a folder).
file_limit
The maximum number of file entries to return within
a folder. If the number of files in the folder exceeds this
limit, an exception is raised. The server will return at max
25,000 files within a folder.
hash
Every folder listing has a hash parameter attached that
can then be passed back into this function later to save on
bandwidth. Rather than returning an unchanged folder's contents,
the server will instead return a 304.
rev
Optional revision of the file to retrieve the metadata for.
This parameter only applies for files. If omitted, you'll receive
the most recent revision metadata.
include_deleted
When listing contained files, include files that have been deleted.
include_media_info
If True, includes additional media info for photos and videos if
available (the time a photo was taken, the GPS coordinates of a photo,
etc.).
Returns
A dictionary containing the metadata of the file or folder
(and contained files if appropriate).
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#metadata
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 304: Current folder hash matches hash parameters, so contents are unchanged.
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at given path.
- 406: Too many file entries to return.
"""
path = "/metadata/%s%s" % (self.session.root, format_path(path))
params = {'file_limit': file_limit,
'list': 'true',
'include_deleted': include_deleted,
'include_media_info': include_media_info,
}
if not list:
params['list'] = 'false'
if hash is not None:
params['hash'] = hash
if rev:
params['rev'] = rev
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
def thumbnail(self, from_path, size='m', format='JPEG'):
"""Download a thumbnail for an image.
Parameters
from_path
The path to the file to be thumbnailed.
size
A string specifying the desired thumbnail size. Currently
supported sizes: ``"xs"`` (32x32), ``"s"`` (64x64), ``"m"`` (128x128),
``"l``" (640x480), ``"xl"`` (1024x768).
Check https://www.dropbox.com/developers/core/docs#thumbnails for
more details.
format
The image format the server should use for the returned
thumbnail data. Either ``"JPEG"`` or ``"PNG"``.
Returns
A :class:`dropbox.rest.RESTResponse` that is the HTTP response for
the API request. It is a file-like object that can be read from. You
must call ``close()`` when you're done.
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given from_path,
or files of that type cannot be thumbnailed.
- 415: Image is invalid and cannot be thumbnailed.
"""
assert format in ['JPEG', 'PNG'], \
"expected a thumbnail format of 'JPEG' or 'PNG', got %s" % format
path = "/thumbnails/%s%s" % (self.session.root, format_path(from_path))
url, params, headers = self.request(path, {'size': size, 'format': format},
method='GET', content_server=True)
return self.rest_client.request("GET", url, headers=headers, raw_response=True)
def thumbnail_and_metadata(self, from_path, size='m', format='JPEG'):
"""Download a thumbnail for an image alongwith its metadata.
Acts as a thin wrapper around thumbnail() (see :meth:`thumbnail()` comments for
more details)
Parameters
from_path
The path to the file to be thumbnailed.
size
A string specifying the desired thumbnail size. See :meth:`thumbnail()`
for details.
format
The image format the server should use for the returned
thumbnail data. Either ``"JPEG"`` or ``"PNG"``.
Returns
A pair of ``(response, metadata)``:
response
A :class:`dropbox.rest.RESTResponse` that is the HTTP response for
the API request. It is a file-like object that can be read from. You
must call ``close()`` when you're done.
metadata
A dictionary containing the metadata of the file whose thumbnail
was downloaded (see https://www.dropbox.com/developers/core/docs#metadata
for details).
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No file was found at the given from_path,
or files of that type cannot be thumbnailed.
- 415: Image is invalid and cannot be thumbnailed.
- 200: Request was okay but response was malformed in some way.
"""
thumbnail_res = self.thumbnail(from_path, size, format)
metadata = DropboxClient.__parse_metadata_as_dict(thumbnail_res)
return thumbnail_res, metadata
def search(self, path, query, file_limit=1000, include_deleted=False):
"""Search folder for filenames matching query.
Parameters
path
The folder to search within.
query
The query to search on (minimum 3 characters).
file_limit
The maximum number of file entries to return within a folder.
The server will return at max 1,000 files.
include_deleted
Whether to include deleted files in search results.
Returns
A list of the metadata of all matching files (up to
file_limit entries). For a detailed description of what
this call returns, visit:
https://www.dropbox.com/developers/core/docs#search
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
"""
path = "/search/%s%s" % (self.session.root, format_path(path))
params = {
'query': query,
'file_limit': file_limit,
'include_deleted': include_deleted,
}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def revisions(self, path, rev_limit=1000):
"""Retrieve revisions of a file.
Parameters
path
The file to fetch revisions for. Note that revisions
are not available for folders.
rev_limit
The maximum number of file entries to return within
a folder. The server will return at max 1,000 revisions.
Returns
A list of the metadata of all matching files (up to rev_limit entries).
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#revisions
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: No revisions were found at the given path.
"""
path = "/revisions/%s%s" % (self.session.root, format_path(path))
params = {
'rev_limit': rev_limit,
}
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
def restore(self, path, rev):
"""Restore a file to a previous revision.
Parameters
path
The file to restore. Note that folders can't be restored.
rev
A previous rev value of the file to be restored to.
Returns
A dictionary containing the metadata of the newly restored file.
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#restore
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: Unable to find the file at the given revision.
"""
path = "/restore/%s%s" % (self.session.root, format_path(path))
params = {
'rev': rev,
}
url, params, headers = self.request(path, params)
return self.rest_client.POST(url, params, headers)
def media(self, path):
"""Get a temporary unauthenticated URL for a media file.
All of Dropbox's API methods require OAuth, which may cause problems in
situations where an application expects to be able to hit a URL multiple times
(for example, a media player seeking around a video file). This method
creates a time-limited URL that can be accessed without any authentication,
and returns that to you, along with an expiration time.
Parameters
path
The file to return a URL for. Folders are not supported.
Returns
A dictionary that looks like the following example::
{'url': 'https://dl.dropboxusercontent.com/1/view/abcdefghijk/example',
'expires': 'Thu, 16 Sep 2011 01:01:25 +0000'}
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#media
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: Unable to find the file at the given path.
"""
path = "/media/%s%s" % (self.session.root, format_path(path))
url, params, headers = self.request(path, method='GET')
return self.rest_client.GET(url, headers)
def share(self, path, short_url=True):
"""Create a shareable link to a file or folder.
Shareable links created on Dropbox are time-limited, but don't require any
authentication, so they can be given out freely. The time limit should allow
at least a day of shareability, though users have the ability to disable
a link from their account if they like.
Parameters
path
The file or folder to share.
Returns
A dictionary that looks like the following example::
{'url': u'https://db.tt/c0mFuu1Y', 'expires': 'Tue, 01 Jan 2030 00:00:00 +0000'}
For a detailed description of what this call returns, visit:
https://www.dropbox.com/developers/core/docs#shares
Raises
A :class:`dropbox.rest.ErrorResponse` with an HTTP status of:
- 400: Bad request (may be due to many things; check e.error for details).
- 404: Unable to find the file at the given path.
"""
path = "/shares/%s%s" % (self.session.root, format_path(path))
params = {
'short_url': short_url,
}
url, params, headers = self.request(path, params, method='GET')
return self.rest_client.GET(url, headers)
class ChunkedUploader(object):
"""Contains the logic around a chunked upload, which uploads a
large file to Dropbox via the /chunked_upload endpoint.
"""
def __init__(self, client, file_obj, length):
self.client = client
self.offset = 0
self.upload_id = None
self.last_block = None
self.file_obj = file_obj
self.target_length = length
def upload_chunked(self, chunk_size = 4 * 1024 * 1024):
"""Uploads data from this ChunkedUploader's file_obj in chunks, until
an error occurs. Throws an exception when an error occurs, and can
be called again to resume the upload.
Parameters
chunk_size
The number of bytes to put in each chunk. (Default 4 MB.)
"""
while self.offset < self.target_length:
next_chunk_size = min(chunk_size, self.target_length - self.offset)
if self.last_block == None:
self.last_block = self.file_obj.read(next_chunk_size)
try:
(self.offset, self.upload_id) = self.client.upload_chunk(
StringIO(self.last_block), next_chunk_size, self.offset, self.upload_id)
self.last_block = None
except ErrorResponse as e:
# Handle the case where the server tells us our offset is wrong.
must_reraise = True
if e.status == 400:
reply = e.body
if "offset" in reply and reply['offset'] != 0 and reply['offset'] > self.offset:
self.last_block = None
self.offset = reply['offset']
must_reraise = False
if must_reraise:
raise
def finish(self, path, overwrite=False, parent_rev=None):
"""Commits the bytes uploaded by this ChunkedUploader to a file
in the users dropbox.
Parameters
path
The full path of the file in the Dropbox.
overwrite
Whether to overwrite an existing file at the given path. (Default ``False``.)
If overwrite is False and a file already exists there, Dropbox
will rename the upload to make sure it doesn't overwrite anything.
You need to check the metadata returned for the new name.
This field should only be True if your intent is to potentially
clobber changes to a file that you don't know about.
parent_rev
Optional rev field from the 'parent' of this upload.
If your intent is to update the file at the given path, you should
pass the parent_rev parameter set to the rev value from the most recent
metadata you have of the existing file at that path. If the server
has a more recent version of the file at the specified path, it will
automatically rename your uploaded file, spinning off a conflict.
Using this parameter effectively causes the overwrite parameter to be ignored.
The file will always be overwritten if you send the most recent parent_rev,
and it will never be overwritten if you send a less recent one.
"""
path = "/commit_chunked_upload/%s%s" % (self.client.session.root, format_path(path))
params = dict(
overwrite = bool(overwrite),
upload_id = self.upload_id
)
if parent_rev is not None:
params['parent_rev'] = parent_rev
url, params, headers = self.client.request(path, params, content_server=True)
return self.client.rest_client.POST(url, params, headers)
# Allow access of ChunkedUploader via DropboxClient for backwards compatibility.
DropboxClient.ChunkedUploader = ChunkedUploader
class DropboxOAuth2FlowBase(object):
def __init__(self, consumer_key, consumer_secret, locale=None, rest_client=RESTClient):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.locale = locale
self.rest_client = rest_client
def _get_authorize_url(self, redirect_uri, state):
params = dict(response_type='code',
client_id=self.consumer_key)
if redirect_uri is not None:
params['redirect_uri'] = redirect_uri
if state is not None:
params['state'] = state
return self.build_url(BaseSession.WEB_HOST, '/oauth2/authorize', params)
def _finish(self, code, redirect_uri):
url = self.build_url(BaseSession.API_HOST, '/oauth2/token')
params = {'grant_type': 'authorization_code',
'code': code,
'client_id': self.consumer_key,
'client_secret': self.consumer_secret,
}
if self.locale is not None:
params['locale'] = self.locale
if redirect_uri is not None:
params['redirect_uri'] = redirect_uri
response = self.rest_client.POST(url, params=params)
access_token = response["access_token"]
user_id = response["uid"]
return access_token, user_id
def build_path(self, target, params=None):
"""Build the path component for an API URL.
This method urlencodes the parameters, adds them
to the end of the target url, and puts a marker for the API
version in front.
Parameters
target
A target url (e.g. '/files') to build upon.
params
Optional dictionary of parameters (name to value).
Returns
The path and parameters components of an API URL.
"""
if sys.version_info < (3,) and type(target) == str:
target = target.encode("utf8")
target_path = urllib.parse.quote(target)
params = params or {}
params = params.copy()
if self.locale:
params['locale'] = self.locale
if params:
query_string = params_to_urlencoded(params)
return "/%s%s?%s" % (BaseSession.API_VERSION, target_path, query_string)
else:
return "/%s%s" % (BaseSession.API_VERSION, target_path)
def build_url(self, host, target, params=None):
"""Build an API URL.
This method adds scheme and hostname to the path
returned from build_path.
Parameters
target
A target url (e.g. '/files') to build upon.
params
Optional dictionary of parameters (name to value).
Returns
The full API URL.
"""
return "https://%s%s" % (host, self.build_path(target, params))
class DropboxOAuth2FlowNoRedirect(DropboxOAuth2FlowBase):
"""
OAuth 2 authorization helper for apps that can't provide a redirect URI
(such as the command-line example apps).
Example::
from dropbox.client import DropboxOAuth2FlowNoRedirect, DropboxClient
from dropbox import rest as dbrest
auth_flow = DropboxOAuth2FlowNoRedirect(APP_KEY, APP_SECRET)
authorize_url = auth_flow.start()
print "1. Go to: " + authorize_url
print "2. Click \\"Allow\\" (you might have to log in first)."
print "3. Copy the authorization code."
auth_code = raw_input("Enter the authorization code here: ").strip()
try:
access_token, user_id = auth_flow.finish(auth_code)
except dbrest.ErrorResponse, e:
print('Error: %s' % (e,))
return
c = DropboxClient(access_token)
"""
def __init__(self, consumer_key, consumer_secret, locale=None, rest_client=None):
"""
Construct an instance.
Parameters
consumer_key
Your API app's "app key"
consumer_secret
Your API app's "app secret"
locale
The locale of the user of your application. For example "en" or "en_US".
Some API calls return localized data and error messages; this setting
tells the server which locale to use. By default, the server uses "en_US".
rest_client
Optional :class:`dropbox.rest.RESTClient`-like object to use for making
requests.
"""
if rest_client is None: rest_client = RESTClient
super(DropboxOAuth2FlowNoRedirect, self).__init__(consumer_key, consumer_secret,
locale, rest_client)
def start(self):
"""
Starts the OAuth 2 authorization process.
Returns
The URL for a page on Dropbox's website. This page will let the user "approve"
your app, which gives your app permission to access the user's Dropbox account.
Tell the user to visit this URL and approve your app.
"""
return self._get_authorize_url(None, None)
def finish(self, code):
"""
If the user approves your app, they will be presented with an "authorization code". Have
the user copy/paste that authorization code into your app and then call this method to
get an access token.
Parameters
code
The authorization code shown to the user when they approved your app.
Returns
A pair of ``(access_token, user_id)``. ``access_token`` is a string that
can be passed to DropboxClient. ``user_id`` is the Dropbox user ID (string) of the
user that just approved your app.
Raises
The same exceptions as :meth:`DropboxOAuth2Flow.finish()`.
"""
return self._finish(code, None)
class DropboxOAuth2Flow(DropboxOAuth2FlowBase):
"""
OAuth 2 authorization helper. Use this for web apps.
OAuth 2 has a two-step authorization process. The first step is having the user authorize
your app. The second involves getting an OAuth 2 access token from Dropbox.
Example::
from dropbox.client import DropboxOAuth2Flow, DropboxClient
def get_dropbox_auth_flow(web_app_session):
redirect_uri = "https://my-web-server.org/dropbox-auth-finish")
return DropboxOAuth2Flow(APP_KEY, APP_SECRET, redirect_uri,
web_app_session, "dropbox-auth-csrf-token")
# URL handler for /dropbox-auth-start
def dropbox_auth_start(web_app_session, request):
authorize_url = get_dropbox_auth_flow(web_app_session).start()
redirect_to(authorize_url)
# URL handler for /dropbox-auth-finish
def dropbox_auth_finish(web_app_session, request):
try:
access_token, user_id, url_state = \\
get_dropbox_auth_flow(web_app_session).finish(request.query_params)
except DropboxOAuth2Flow.BadRequestException, e:
http_status(400)
except DropboxOAuth2Flow.BadStateException, e:
# Start the auth flow again.
redirect_to("/dropbox-auth-start")
except DropboxOAuth2Flow.CsrfException, e:
http_status(403)
except DropboxOAuth2Flow.NotApprovedException, e:
flash('Not approved? Why not?')
return redirect_to("/home")
except DropboxOAuth2Flow.ProviderException, e:
logger.log("Auth error: %s" % (e,))
http_status(403)
"""
def __init__(self, consumer_key, consumer_secret, redirect_uri, session,
csrf_token_session_key, locale=None, rest_client=None):
"""
Construct an instance.
Parameters
consumer_key
Your API app's "app key".
consumer_secret
Your API app's "app secret".
redirect_uri
The URI that the Dropbox server will redirect the user to after the user
finishes authorizing your app. This URI must be HTTPS-based and pre-registered with
the Dropbox servers, though localhost URIs are allowed without pre-registration and can
be either HTTP or HTTPS.
session
A dict-like object that represents the current user's web session (will be
used to save the CSRF token).
csrf_token_session_key
The key to use when storing the CSRF token in the session (for
example: "dropbox-auth-csrf-token").
locale
The locale of the user of your application. For example "en" or "en_US".
Some API calls return localized data and error messages; this setting
tells the server which locale to use. By default, the server uses "en_US".
rest_client
Optional :class:`dropbox.rest.RESTClient`-like object to use for making
requests.
"""
if rest_client is None: rest_client = RESTClient
super(DropboxOAuth2Flow, self).__init__(consumer_key, consumer_secret, locale, rest_client)
self.redirect_uri = redirect_uri
self.session = session
self.csrf_token_session_key = csrf_token_session_key
def start(self, url_state=None):
"""
Starts the OAuth 2 authorization process.
This function builds an "authorization URL". You should redirect your user's browser to
this URL, which will give them an opportunity to grant your app access to their Dropbox
account. When the user completes this process, they will be automatically redirected to
the ``redirect_uri`` you passed in to the constructor.
This function will also save a CSRF token to ``session[csrf_token_session_key]`` (as
provided to the constructor). This CSRF token will be checked on :meth:`finish()` to
prevent request forgery.
Parameters
url_state
Any data that you would like to keep in the URL through the
authorization process. This exact value will be returned to you by :meth:`finish()`.
Returns
The URL for a page on Dropbox's website. This page will let the user "approve"
your app, which gives your app permission to access the user's Dropbox account.
Tell the user to visit this URL and approve your app.
"""
csrf_token = base64.urlsafe_b64encode(os.urandom(16))
state = csrf_token
if url_state is not None:
state += "|" + url_state
self.session[self.csrf_token_session_key] = csrf_token
return self._get_authorize_url(self.redirect_uri, state)
def finish(self, query_params):
"""
Call this after the user has visited the authorize URL (see :meth:`start()`), approved your
app and was redirected to your redirect URI.
Parameters
query_params
The query parameters on the GET request to your redirect URI.
Returns
A tuple of ``(access_token, user_id, url_state)``. ``access_token`` can be used to
construct a :class:`DropboxClient`. ``user_id`` is the Dropbox user ID (string) of the
user that just approved your app. ``url_state`` is the value you originally passed in to
:meth:`start()`.
Raises
:class:`BadRequestException`
If the redirect URL was missing parameters or if the given parameters were not valid.
:class:`BadStateException`
If there's no CSRF token in the session.
:class:`CsrfException`
If the ``'state'`` query parameter doesn't contain the CSRF token from the user's
session.
:class:`NotApprovedException`
If the user chose not to approve your app.
:class:`ProviderException`
If Dropbox redirected to your redirect URI with some unexpected error identifier
and error message.
"""
csrf_token_from_session = self.session[self.csrf_token_session_key]
# Check well-formedness of request.
state = query_params.get('state')
if state is None:
raise self.BadRequestException("Missing query parameter 'state'.")
error = query_params.get('error')
error_description = query_params.get('error_description')
code = query_params.get('code')
if error is not None and code is not None:
raise self.BadRequestException("Query parameters 'code' and 'error' are both set; "
" only one must be set.")
if error is None and code is None:
raise self.BadRequestException("Neither query parameter 'code' or 'error' is set.")
# Check CSRF token
if csrf_token_from_session is None:
raise self.BadStateError("Missing CSRF token in session.")
if len(csrf_token_from_session) <= 20:
raise AssertionError("CSRF token unexpectedly short: %r" % (csrf_token_from_session,))
split_pos = state.find('|')
if split_pos < 0:
given_csrf_token = state
url_state = None
else:
given_csrf_token = state[0:split_pos]
url_state = state[split_pos+1:]
if not _safe_equals(csrf_token_from_session, given_csrf_token):
raise self.CsrfException("expected %r, got %r" % (csrf_token_from_session,
given_csrf_token))
del self.session[self.csrf_token_session_key]
# Check for error identifier
if error is not None:
if error == 'access_denied':
# The user clicked "Deny"
if error_description is None:
raise self.NotApprovedException("No additional description from Dropbox")
else:
raise self.NotApprovedException("Additional description from Dropbox: " +
error_description)
else:
# All other errors
full_message = error
if error_description is not None:
full_message += ": " + error_description
raise self.ProviderError(full_message)
# If everything went ok, make the network call to get an access token.
access_token, user_id = self._finish(code, self.redirect_uri)
return access_token, user_id, url_state
class BadRequestException(Exception):
"""
Thrown if the redirect URL was missing parameters or if the
given parameters were not valid.
The recommended action is to show an HTTP 400 error page.
"""
pass
class BadStateException(Exception):
"""
Thrown if all the parameters are correct, but there's no CSRF token in the session. This
probably means that the session expired.
The recommended action is to redirect the user's browser to try the approval process again.
"""
pass
class CsrfException(Exception):
"""
Thrown if the given 'state' parameter doesn't contain the CSRF
token from the user's session.
This is blocked to prevent CSRF attacks.
The recommended action is to respond with an HTTP 403 error page.
"""
pass
class NotApprovedException(Exception):
"""
The user chose not to approve your app.
"""
pass
class ProviderException(Exception):
"""
Dropbox redirected to your redirect URI with some unexpected error identifier and error
message.
The recommended action is to log the error, tell the user something went wrong, and let
them try again.
"""
pass
def _safe_equals(a, b):
if len(a) != len(b): return False
res = 0
for ca, cb in zip(a, b):
res |= ord(ca) ^ ord(cb)
return res == 0
_OAUTH2_ACCESS_TOKEN_PATTERN = re.compile(r'\A[-_~/A-Za-z0-9\.\+]+=*\Z')
# From the "Bearer" token spec, RFC 6750.
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
ArcherSys/ArcherSys
|
archersys/Lib/site-packages/dropbox/client.py
|
Python
|
mit
| 202,937
|
[
"VisIt"
] |
aebe852c6f83f4d8cde7897ea51ac9981062c9e87e201dd15cb453194559b606
|
import os.path as osp
from mastic.system import System, SystemType, AssociationType, Association
from rdkit import Chem
from mastic.interfaces.rdkit import RDKitMoleculeWrapper
trypsin_dir = osp.expanduser("~/Dropbox/lab/trypsin")
ben_pdb_path = osp.join(trypsin_dir, "BEN_Hs.pdb")
BEN_rdkit = Chem.MolFromPDBFile(ben_pdb_path, removeHs=False, sanitize=False)
BEN_rdkit_wrapper = RDKitMoleculeWrapper(BEN_rdkit, mol_name="BEN")
print("making molecule type for benzamidine")
BENType = BEN_rdkit_wrapper.make_molecule_type(find_features=True)
BEN_coords = BEN_rdkit_wrapper.get_conformer_coords(0)
member_coords = [BEN_coords]
member_types = [BENType]
system_attrs = {'name' : 'benzamidine-system'}
BenzamidineSystemType = SystemType("BenzamidineSystemType",
member_types=member_types,
**system_attrs)
MockAssocType = AssociationType('MockAssocType', system_type=BenzamidineSystemType,
selection_map={0 : None},
selection_types=[None],
name='uuuhhhh')
BenzamidineSystemType.add_association_type(MockAssocType)
bensys = BenzamidineSystemType.to_system(member_coords)
molecule = bensys.molecules[0]
association = bensys.associations[0]
|
salotz/mast
|
prototypes/system_example.py
|
Python
|
mit
| 1,341
|
[
"RDKit"
] |
463992978c6a657d3ff22418442d911ed4e8443e021cdac08993a37db2912e84
|
#!/usr/bin/env python
import os, argparse, tempfile, operator, pickle
from collections import defaultdict
import pybedtools, pysam
def rev_comp(str):
"""
Given a DNA string, returns the reverse complement
>>> rev_comp("AATTGGCC")
'GGCCAATT'
"""
rev_dic = {'A':'T','G':'C','C':'G','T':'A'}
return ''.join([rev_dic[i] for i in str[::-1]])
def get_kmers(file_path):
"""
Counts the number of occurences of each sequence in a FASTA file
and returns a dictionary with the format {sequence:count}, e.g. {'AATTCC':5}
Note: Ignores anything with Ns in, and treats all DNA as uppercase (i.e. not repeatmasked)
"""
kmers = defaultdict(int)
with open(file_path,'r') as fasta:
for i in fasta:
if i[0] != ">":
if 'N' not in i:
kmers[i.strip().upper()] +=1
return kmers
def generate_6mer_bed(bam_file, gdict):
"""
Loads data from a BAM file, and prints the 6-mers from the 5' ends of the reads
as a BED file to a temp file, returns the file path of this temp file
"""
outfile = tempfile.NamedTemporaryFile(delete=False)
samfile = pysam.AlignmentFile(bam_file, "rb")
for i in samfile:
# Ignore unmapped reads
if not i.is_unmapped:
chrom = samfile.getrname(i.reference_id)
if chrom in list(gdict.keys()):
# Determine which end of the read is the 5' end
if i.is_reverse:
strand = "-"
startbp, endbp = i.reference_end - 3, i.reference_end + 3
else:
strand = "+"
startbp, endbp = i.reference_start - 3, i.reference_start + 3
if startbp > 0 and endbp < gdict[chrom]:
print("\t".join((str(i) for i in (chrom, startbp, endbp, 0, 0, strand))), file=outfile)
outfile.close()
return outfile.name
def genome_dic(g_file):
"""
Make a dictionary of chromosome sizes from a .chrom.sizes file
e.g. {chrom_name:chrom_size}
"""
gdict = {}
with open(g_file) as ifile:
for i in ifile:
i = i.split()
gdict[i[0]] = int(i[1])
return gdict
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Calculates the 6-mer 5\' insertion bias for a NGS dataset')
parser.add_argument("regions", help="BED file of the regions you want to exclude from calculating the bias. This is usually the DHSs.")
parser.add_argument("reads", help="The sorted, indexed BAM file containing the DNase-seq data")
parser.add_argument("genome_sequence", help="The sorted, indexed FASTA file containing the genome sequence")
parser.add_argument("genome_size", help="The .chrom.sizes file containing chromosome sizes generated using something like \"mysql --user=genome --host=genome-mysql.cse.ucsc.edu -A -e \"select chrom, size from hg19.chromInfo\" > hg19.chrom.sizes\"")
parser.add_argument("output", help="output file prefix to write the observed/expected ratios to (will append .txt and .pickle)")
args = parser.parse_args()
test_bam = args.reads
test_bed = args.regions
genome_sequence = args.genome_sequence
genome = args.genome_size
outfile = args.output
# First, pull all the 6mers surrounding 5' ends
print("Determining transposition sites (roughly 60s per 1E6 reads)...")
bed_file_for_6mers = generate_6mer_bed(test_bam, genome_dic(genome))
all_6mers = pybedtools.BedTool(bed_file_for_6mers)
# Can't guarantee peaks are sorted, so sort them
peaks = pybedtools.BedTool(test_bed)
peaks = peaks.sort()
# Get the transposition sites outside of the peaks only
print("Filtering for those outside peaks")
bg_6mers = all_6mers.intersect(peaks,v=True)
print("Generating shuffled background")
shuf_bg_6mers = bg_6mers.shuffle(g=genome, noOverlapping=True, excl=peaks.fn)
print("Generating FASTA file for 6mers...")
genome_fasta = pybedtools.BedTool(genome_sequence)
observed_cuts = bg_6mers.sequence(fi=genome_fasta)
print("Generating FASTA file for shuffled 6mers...")
shuffled_cuts = shuf_bg_6mers.sequence(fi=genome_fasta)
print("Getting 6mers for observed...")
observed = get_kmers(observed_cuts.seqfn)
print("Getting 6mers for shuffled...")
expected = get_kmers(shuffled_cuts.seqfn)
print("Calculating...")
enriched = {i:observed[i]/float(expected[i]) for i in list(observed.keys())}
print("Dumping bias txt file...")
with open(outfile + ".txt", 'w') as ofile:
for i in sorted(list(enriched.items()), key=operator.itemgetter(1)):
print("\t".join(map(str,i)), file=ofile)
print("Writing bias pickle file...")
totalsum = float(sum(enriched.values()))
whatdic = {key:{'forward':val/totalsum,'reverse':enriched[rev_comp(key)]/totalsum} for key,val in enriched.items()}
with open(outfile + ".pickle", "w") as bias_file:
pickle.dump(whatdic,bias_file)
os.remove(bed_file_for_6mers)
|
jpiper/pyDNase
|
pyDNase/scripts/dnase_bias_estimator.py
|
Python
|
mit
| 4,662
|
[
"pysam"
] |
77f7d12ff3332794c6170121914566e4a5de511f014019ed0bcf9080475f3f74
|
#(c) 2012 Massachusetts Institute of Technology. All Rights Reserved
# Code written by: Maksim Imakaev (imakaev@mit.edu)
#TODO:(MIU) Write tests for this module!
"""
Binned data - analysis of HiC, binned to resolution.
Concepts
--------
class Binned Data allows low-level manipulation of multiple HiC datasets,
binned to the same resolution from the same genome.
When working with multiple datasets, all the filters will be synchronized,
so only bins present in all datasets will be considered for the analysis.
Removal of bins from one dataset will remove them from the others.
E.g. removing 1% of bins with lowest # of count might remove more than 1% of
total bins, when working with 2 or more datasets.
Class has significant knowledge about filters that have been applied.
If an essential filter was not applied, it will throw an exception;
if advised filter is not applied, it will throw a warning.
However, it does not guarantee dependencies, and you have to think yourself.
Most of the methods have an optional "force" argument that will
ignore dependencies.
We provide example scripts that show ideal protocols for certain types of
the analysis, but they don't cover the realm of all possible manipulations
that can be performed with this class.
Input data
----------
method :py:func:`SimpleLoad <binnedData.simpleLoad>` may be used to load
the data. It automatically checks for possible genome length mismatch.
This method works best with h5dict files, created by fragmentHiC.
In this case you just need to supply the filename.
It can also accept any dictionary-like object with the following keys,
where all but "heatmap" is optional.
* ["heatmap"] : all-by-all heatmap
* ["singles"] : vector of SS reads, optional
* ["frags"] : number of rsites per bin, optional
* ["resolution"] : resolution
All information about the genome, including GC content and restriction sites,
can be obtained from the Genome class.
Genomic tracks can be loaded using an automated parser that accepts bigWig
files and fixed step wiggle files.
See documentation for :py:func:`experimentalBinnedData.loadWigFile` that
describes exactly how the data is averaged and parsed.
Variables
---------
self.dataDict - dictionary with heatmaps; keys are provided when loading
the data.
self.singlesDict - dictionary with SS read vectors. Keys are the same.
self.fragsDict - dictionary with fragment density data
self.trackDict - dictionary with genomic tracks, such as GC content.
Custom tracks should be added here.
self.biasDict - dictionary with biases as calculated by
iterative correction (incomplete)
self.PCDict - dictionary with principal components of each datasets.
Keys as in dataDict
self.EigEict - dictionary with eigenvectors for each dataset.
Keys as in datadict.
Hierarchy of filters
--------------------
This hierarchy attempts to connect all logical dependencies between
filters into one diagram.
This includes both biological dependencies and programming dependencies.
As a result, it's incomplete and might be not 100% accurate.
Generally filters from the next group should be applied after filters
from previous groups, if any.
Examples of the logic are below:
* First, apply filters that don't depend on counts,
i.e. remove diagonal and low-coverage bins.
* Second, remove regions with poor coverage;
do this before chaining heatmaps with other filters.
* Fake translocations before truncating trans, as translocations are very
high-count regions, and truncTrans will truncate them, not actuall trans reads
* Faking reads currently requires zeros to be removed.
This will be changed later
* Fake cis counts after truncating trans, so that they don't get faked with
extremely high-count outliers in a trans-map
* Perform iterative correction after all the filters are applied
* Preform PCA after IC of trans data, and with zeros removed
1. Remove Diagonal, removeBySequencedCount
2. RemovePoorRegions, RemoveStandalone (this two filters are not transitive)
3. fakeTranslocations
4. truncTrans
5. fakeCis
6. iterative correction (does not require removeZeros)
7. removeZeros
8. PCA (Requires removeZeros)
9. RestoreZeros
Besides that, filter dependencies are:
* Faking reads requires: removeZeros
* PCA requires: removeZeros, fakeCis
* IC with SS requires: no previous iterative corrections, no removed cis reads
* IC recommends removal of poor regions
Other filter dependencies, including advised but not required filters, will be
issued as warnings during runtime of a program.
-------------------------------------------------------------------------------
API documentation
-----------------
"""
import os
from mirnylib import numutils
import warnings
from mirnylib.numutils import PCA, EIG, correct, \
ultracorrectSymmetricWithVector, isInteger, \
observedOverExpected, ultracorrect, adaptiveSmoothing, \
removeDiagonals, fillDiagonal
from mirnylib.genome import Genome
import numpy as np
from math import exp
from mirnylib.h5dict import h5dict
from scipy.stats.stats import spearmanr
from mirnylib.numutils import fakeCisImpl
class binnedData(object):
"""Base class to work with binned data, the most documented and
robust part of the code. Further classes for other analysis
are inherited from this class.
"""
def __init__(self, resolution, genome, readChrms=["#", "X"]):
"""
self.__init__ - initializes an empty dataset.
This method sets up a Genome object and resolution.
Genome object specifies genome version and inclusion/exclusion
of sex chromosomes.
Parameters
----------
resolution : int
Resolution of all datasets
genome : genome Folder or Genome object
"""
if type(genome) == str:
self.genome = Genome(genomePath=genome, readChrms=readChrms)
else:
self.genome = genome
assert hasattr(self.genome, "chrmCount")
if resolution is not None:
self.resolution = resolution
self.chromosomes = self.genome.chrmLens
self.genome.setResolution(self.resolution)
self._initChromosomes()
self.dataDict = {}
self.biasDict = {}
self.trackDict = {}
self.singlesDict = {}
self.fragsDict = {}
self.PCDict = {}
self.EigDict = {}
self.eigEigenvalueDict = {}
self.PCAEigenvalueDict = {}
self.dicts = [self.trackDict, self.biasDict, self.singlesDict,
self.fragsDict]
self.eigDicts = [self.PCDict, self.EigDict]
self._loadGC()
self.appliedOperations = {}
def _initChromosomes(self):
"internal: loads mappings from the genome class based on resolution"
self.chromosomeStarts = self.genome.chrmStartsBinCont
self.centromerePositions = self.genome.cntrMidsBinCont
self.chromosomeEnds = self.genome.chrmEndsBinCont
self.trackLength = self.genome.numBins
self.chromosomeCount = self.genome.chrmCount
self.chromosomeIndex = self.genome.chrmIdxBinCont
self.positionIndex = self.genome.posBinCont
self.armIndex = self.chromosomeIndex * 2 + \
np.array(self.positionIndex > self.genome.cntrMids
[self.chromosomeIndex], int)
def _giveMask(self):
"Returns index of all bins with non-zero read counts"
self.mask = np.ones(len(self.dataDict.values()[0]), np.bool)
for data in self.dataDict.values():
datasum = np.sum(data, axis=0)
datamask = datasum > 0
self.mask *= datamask
return self.mask
def _giveMask2D(self):
"""Returns outer product of _giveMask with itself,
i.e. bins with possibly non-zero counts"""
self._giveMask()
self.mask2D = self.mask[:, None] * self.mask[None, :]
return self.mask2D
def _loadGC(self):
"loads GC content at given resolution"
self.trackDict["GC"] = np.concatenate(self.genome.GCBin)
def _checkItertiveCorrectionError(self):
"""internal method for checking if iterative correction
might be bad to apply"""
for value in self.dataDict.values():
if isInteger(value) == True:
s = np.sum(value, axis=0)
sums = np.sort(s[s != 0])
if sums[0] < 100:
error = int(100. / np.sqrt(sums[0]))
message1 = "Lowest 5 sums of an array rows are: " + \
str(sums[:5])
warnings.warn("\n%s\nIterative correction will lead to \
about %d %% relative error for certain columns" %
(message1, error))
if sums[0] < 5:
raise StandardError("Iterative correction is \
very dangerous. Use force=true to override.")
else:
s = np.sum(value > 0, axis=0)
sums = np.sort(s[s != 0])
if sums[0] < min(100, len(value) / 2):
error = int(100. / np.sqrt(sums[0]))
print "Got floating-point array for correction. Rows with \
5 least entrees are:", sums[:5]
warnings.warn("\nIterative correction might lead to about\
%d %% relative error for certain columns" % error)
if sums[0] < 4:
raise StandardError("Iterative correction is \
very dangerous. Use force=true to override.")
def _checkAppliedOperations(self, neededKeys=[],
advicedKeys=[],
excludedKeys=[]):
"Internal method to check if all needed operations were applied"
if (True in [i in self.appliedOperations for i in excludedKeys]):
print "Operations that are not allowed:", excludedKeys
print "applied operations: ", self.appliedOperations
print "use 'force = True' to override this message"
raise StandardError("Prohibited filter was applied")
if (False in [i in self.appliedOperations for i in neededKeys]):
print "needed operations:", neededKeys
print "applied operations:", self.appliedOperations
print "use 'force = True' to override this message"
raise StandardError("Critical filter not applied")
if (False in [i in self.appliedOperations for i in advicedKeys]):
print "Adviced operations:", advicedKeys
print "Applied operations:", self.appliedOperations
warnings.warn("\nNot all adviced filters applied")
def _recoverOriginalReads(self, key):
"""Attempts to recover original read counts from the data
If data is integer, returns data.
If not, attepts to revert iterative correction
and return original copy.
This method does not modify the dataset!
"""
data = self.dataDict[key]
if "Corrected" not in self.appliedOperations:
if isInteger(data):
return data
else:
warnings.warn("Data was not corrected, but is not integer")
return None
else:
if key not in self.biasDict:
warnings.warn("Correction was applied, "
"but bias information is missing!")
return None
bias = self.biasDict[key]
data1 = data * bias[:, None]
data1 *= bias[None, :]
if isInteger(data1):
return data1
else:
warnings.warn("Attempted recovery of reads, but "
"data is not integer")
return None
def simpleLoad(self, in_data, name, chromosomeOrder=None):
"""Loads data from h5dict file or dict-like object
Parameters
----------
in_data : str or dict-like
h5dict filename or dictionary-like object with input data,
stored under the key "heatmap", and a vector of SS reads,
stored under the key "singles".
name : str
Key under which to store dataset in self.dataDict
chromosomeOrder : None or list
If file to load is a byChromosome map, use this to define chromosome order
"""
if type(in_data) == str:
path = os.path.abspath(os.path.expanduser(in_data))
if os.path.exists(path) == False:
raise IOError("HDF5 dict do not exist, %s" % path)
alldata = h5dict(path, mode="r")
else:
alldata = in_data
if type(alldata) == h5dict:
if ("0 0" in alldata.keys()) and ("heatmap" not in alldata.keys()):
if chromosomeOrder != None:
chromosomes = chromosomeOrder
else:
chromosomes = xrange(self.chromosomeCount)
datas = []
for i in chromosomes:
datas.append(np.concatenate([alldata["{0} {1}".format(i, j)] for j in chromosomes], axis=1))
newdata = {"heatmap": np.concatenate(datas)}
for i in alldata.keys():
newdata[i] = alldata[i]
alldata = newdata
self.dataDict[name] = np.asarray(alldata["heatmap"], dtype=np.double)
try:
self.singlesDict[name] = alldata["singles"]
except:
print "No SS reads found"
try:
if len(alldata["frags"]) == self.genome.numBins:
self.fragsDict[name] = alldata["frags"]
else:
print "Different bin number in frag dict"
except:
pass
if "resolution" in alldata:
if self.resolution != alldata["resolution"]:
print "resolution mismatch!!!"
print "--------------> Bye <-------------"
raise StandardError("Resolution mismatch! ")
if self.genome.numBins != len(alldata["heatmap"]):
print "Genome length mismatch!!!"
print "source genome", len(alldata["heatmap"])
print "our genome", self.genome.numBins
print "Check for readChrms parameter when you identify the genome"
raise StandardError("Genome size mismatch! ")
def export(self, name, outFilename, byChromosome=False, **kwargs):
"""
Exports current heatmaps and SS files to an h5dict.
Parameters
----------
name : str
Key for the dataset to export
outFilename : str
Where to export
byChromosome : bool or "cis" or "all"
save by chromosome heatmaps.
Ignore SS reads.
True means "all"
"""
if "out_filename" in kwargs.keys():
raise ValueError("out_filename replaced with outFilename!")
if name not in self.dataDict:
raise ValueError("No data {name}".format(name=name))
toexport = {}
if byChromosome is False:
toexport["heatmap"] = self.dataDict[name]
if name in self.singlesDict:
toexport["singles"] = self.singlesDict[name]
if name in self.fragsDict:
toexport["frags"] = self.fragsDict[name]
else:
hm = self.dataDict[name]
for i in xrange(self.genome.chrmCount):
for j in xrange(self.genome.chrmCount):
if (byChromosome == "cis") and (i != j):
continue
st1 = self.chromosomeStarts[i]
end1 = self.chromosomeEnds[i]
st2 = self.chromosomeStarts[j]
end2 = self.chromosomeEnds[j]
toexport["{0} {1}".format(i, j)] = hm[st1:end1,
st2:end2]
toexport["resolution"] = self.resolution
toexport["genome"] = self.genome.folderName
toexport["binNumber"] = len(self.chromosomeIndex)
toexport["genomeIdxToLabel"] = self.genome.idx2label
toexport["chromosomeStarts"] = self.chromosomeStarts
toexport["chromosomeIndex"] = self.chromosomeIndex
toexport["positionIndex"] = self.positionIndex
myh5dict = h5dict(outFilename, mode="w")
myh5dict.update(toexport)
def removeDiagonal(self, m=1):
"""Removes all bins on a diagonal, and bins that are up to m away
from the diagonal, including m.
By default, removes all bins touching the diagonal.
Parameters
----------
m : int, optional
Number of bins to remove
"""
for i in self.dataDict.keys():
self.dataDict[i] = np.asarray(
self.dataDict[i], dtype=np.double, order="C")
removeDiagonals(self.dataDict[i], m)
self.appliedOperations["RemovedDiagonal"] = True
self.removedDiagonalValue = m
def removeStandalone(self, offset=3):
"""removes standalone groups of bins
(groups of less-than-offset bins)
Parameters
----------
offset : int
Maximum length of group of bins to be removed
"""
diffs = np.diff(np.array(np.r_[False, self._giveMask(), False], int))
begins = np.nonzero(diffs == 1)[0]
ends = np.nonzero(diffs == -1)[0]
beginsmask = (ends - begins) <= offset
newbegins = begins[beginsmask]
newends = ends[beginsmask]
print "removing %d standalone bins" % np.sum(newends - newbegins)
mask = self._giveMask()
for i in xrange(len(newbegins)):
mask[newbegins[i]:newends[i]] = False
mask2D = mask[:, None] * mask[None, :]
antimask = np.nonzero(mask2D.flat == False)[0]
for i in self.dataDict.values():
i.flat[antimask] = 0
self.appliedOperations["RemovedStandalone"] = True
def removeBySequencedCount(self, sequencedFraction=0.5):
"""
Removes bins that have less than sequencedFraction*resolution
sequenced counts.
This filters bins by percent of sequenced counts,
and also removes the last bin if it's very short.
.. note:: this is not equivalent to mapability
Parameters
----------
sequencedFraction: float, optional, 0<x<1
Fraction of the bin that needs to be sequenced in order
to keep the bin
"""
self._checkAppliedOperations(excludedKeys="RemovedZeros")
binCutoff = int(self.resolution * sequencedFraction)
sequenced = np.concatenate(self.genome.mappedBasesBin)
mask = sequenced < binCutoff
nzmask = np.zeros(
len(mask), bool) # mask of regions with non-zero counts
for i in self.dataDict.values():
sumData = np.sum(i[mask], axis=1) > 0
nzmask[mask] = nzmask[mask] + sumData
i[mask, :] = 0
i[:, mask] = 0
print "Removing %d bins with <%lf %% coverage by sequenced reads" % \
((nzmask > 0).sum(), 100 * sequencedFraction)
self.appliedOperations["RemovedUnsequenced"] = True
pass
def removePoorRegions(self, names=None, cutoff=2, coverage=False, trans=False):
"""Removes "cutoff" percent of bins with least counts
Parameters
----------
names : list of str
List of datasets to perform the filter. All by default.
cutoff : int, 0<cutoff<100
Percent of lowest-counts bins to be removed
"""
statmask = np.zeros(len(self.dataDict.values()[0]), np.bool)
mask = np.ones(len(self.dataDict.values()[0]), np.bool)
if names is None:
names = self.dataDict.keys()
for i in names:
data = self.dataDict[i]
if trans:
data = data.copy()
data[self.chromosomeIndex[:, None] == self.chromosomeIndex[None, :]] = 0
datasum = np.sum(data, axis=0)
datamask = datasum > 0
mask *= datamask
if coverage == False:
countsum = np.sum(data, axis=0)
elif coverage == True:
countsum = np.sum(data > 0, axis=0)
else:
raise ValueError("coverage is true or false!")
newmask = countsum >= np.percentile(countsum[datamask], cutoff)
mask *= newmask
statmask[(newmask == False) * (datamask == True)] = True
print "removed {0} poor bins".format(statmask.sum())
inds = np.nonzero(mask == False)
for i in self.dataDict.values():
i[inds, :] = 0
i[:, inds] = 0
self.appliedOperations["RemovedPoor"] = True
def truncTrans(self, high=0.0005):
"""Truncates trans contacts to remove blowouts
Parameters
----------
high : float, 0<high<1, optional
Fraction of top trans interactions to be removed
"""
for i in self.dataDict.keys():
data = self.dataDict[i]
transmask = self.chromosomeIndex[:,
None] != self.chromosomeIndex[None, :]
lim = np.percentile(data[transmask], 100. * (1 - high))
print "dataset %s truncated at %lf" % (i, lim)
tdata = data[transmask]
tdata[tdata > lim] = lim
self.dataDict[i][transmask] = tdata
self.appliedOperations["TruncedTrans"] = True
def removeCis(self):
"sets to zero all cis contacts"
mask = self.chromosomeIndex[:, None] == self.chromosomeIndex[None, :]
for i in self.dataDict.keys():
self.dataDict[i][mask] = 0
self.appliedOperations["RemovedCis"] = True
print("All cis counts set to zero")
def fakeCisOnce(self, mask="CisCounts", silent=False):
"""Used to fake cis counts or any other region
with random trans counts.
If extra mask is supplied, it is used instead of cis counts.
This method draws fake contact once.
Use fakeCis() for iterative self-consistent faking of cis.
Parameters
----------
mask : NxN boolean array or "CisCounts"
Mask of elements to be faked.
If set to "CisCounts", cis counts will be faked
When mask is used, cis elements are NOT faked.
silent : bool
Do not print anything
"""
#TODO (MIU): check this method!
if silent == False:
print("All cis counts are substituted with matching trans count")
for key in self.dataDict.keys():
data = np.asarray(self.dataDict[key], order="C", dtype=float)
if mask == "CisCounts":
_mask = np.array(self.chromosomeIndex[:, None] ==
self.chromosomeIndex[None, :], int, order="C")
else:
assert mask.shape == self.dataDict.values()[0].shape
_mask = np.array(mask, dtype=int, order="C")
_mask[self.chromosomeIndex[:, None] ==
self.chromosomeIndex[None, :]] = 2
s = np.abs(np.sum(data, axis=0)) <= 1e-10
_mask[:, s] = 2
_mask[s, :] = 2
_mask = np.asarray(_mask, dtype=np.int64)
fakeCisImpl(data, _mask)
self.dataDict[key] = data
self.appliedOperations["RemovedCis"] = True
self.appliedOperations["FakedCis"] = True
def fakeCis(self, force=False, mask="CisCounts"):
"""This method fakes cis contacts in an interative way
It is done to achieve faking cis contacts that is
independent of normalization of the data.
Parameters
----------
Force : bool (optional)
Set this to avoid checks for iterative correction
mask : see fakeCisOnce
"""
self.removeCis()
self.iterativeCorrectWithoutSS(force=force)
self.fakeCisOnce(silent=True, mask=mask)
self.iterativeCorrectWithoutSS(force=force)
self.fakeCisOnce(silent=True, mask=mask)
self.iterativeCorrectWithoutSS(force=force)
print("All cis counts are substituted with faked counts")
print("Data is iteratively corrected as a part of faking cis counts")
def fakeTranslocations(self, translocationRegions):
"""
This method fakes reads corresponding to a translocation.
Parameters
----------
translocationRegions: list of tuples
List of tuples (chr1,start1,end1,chr2,start2,end2),
masking a high-count region around visible translocation.
If end1/end2 is None, it is treated as length of chromosome.
So, use (chr1,0,None,chr2,0,None) to remove inter-chromosomal
interaction entirely.
"""
self._checkAppliedOperations(excludedKeys="RemovedZeros")
mask = np.zeros((self.genome.numBins, self.genome.numBins), int)
resolution = self.genome.resolution
for i in translocationRegions:
st1 = self.genome.chrmStartsBinCont[i[0]]
st2 = self.genome.chrmStartsBinCont[i[3]]
beg1 = st1 + i[1] / resolution
if i[2] is not None:
end1 = st1 + i[2] / resolution + 1
else:
end1 = self.genome.chrmEndsBinCont[i[0]]
beg2 = st2 + i[4] / resolution
if i[5] is not None:
end2 = st2 + i[5] / resolution + 1
else:
end2 = self.genome.chrmEndsBinCont[i[3]]
mask[beg1:end1, beg2:end2] = 1
mask[beg2:end2, beg1:end1] = 1
self.fakeCisOnce(mask)
def correct(self, names=None):
"""performs single correction without SS
Parameters
----------
names : list of str or None
Keys of datasets to be corrected. If none, all are corrected.
"""
self.iterativeCorrectWithoutSS(names, M=1)
def iterativeCorrectWithoutSS(self, names=None, M=None, force=False,
tolerance=1e-5):
"""performs iterative correction without SS
Parameters
----------
names : list of str or None, optional
Keys of datasets to be corrected. By default, all are corrected.
M : int, optional
Number of iterations to perform.
force : bool, optional
Ignore warnings and pre-requisite filters
"""
if force == False:
self._checkItertiveCorrectionError()
self._checkAppliedOperations(advicedKeys=[
"RemovedDiagonal", "RemovedPoor"])
if names is None:
names = self.dataDict.keys()
for i in names:
data, dummy, bias = ultracorrectSymmetricWithVector(
self.dataDict[i], M=M, tolerance=tolerance)
self.dataDict[i] = data
self.biasDict[i] = bias
if i in self.singlesDict:
self.singlesDict[i] = self.singlesDict[i] / bias.astype(float)
self.appliedOperations["Corrected"] = True
def adaptiveSmoothing(self, smoothness, useOriginalReads="try",
names=None, rawReadDict=None):
"""
Performs adaptive smoothing of Hi-C datasets.
Adaptive smoothing attempts to smooth low-count, "sparce" part
of a Hi-C matrix, while keeping the contrast in a high-count
"diagonal" part of the matrix.
It does it by blurring each bin pair value into a gaussian, which
should encoumpass at least **smoothness** raw reads. However, only
half of reads from each bin pair is counted into this gaussian, while
full reads from neighboring bin pairs are counted.
To summarize:
If a bin pair contains #>2*smoothness reads, it is kept intact.
If a bin pair contains #<2*smoothness reads, reads around bin pair
are counted, and a bin pair is smoothed to a circle (gaussian),
containing smoothness - (#/2) reads.
A standalone read in a sparce part of a matrix is smoothed to a
circle (gaussian) that encoumpasses smoothness reads.
.. note::
This algorithm can smooth any heatmap, e.g. corrected one.
However, ideally it needs to know raw reads to correctly leverage
the contribution from different bins.
By default, it attempts to recover raw reads. However, it
can do so only after single iterative correction.
If used after fakeCis method, it won't use raw reads, unless
provided externally.
.. warning::
Note that if you provide raw reads externally, you would need
to make a copy of dataDict prior to filtering the data,
not just a reference to it. Like
>>>for i in keys: dataCopy[i] = self.dataDict[i].copy()
Parameters
----------
smoothness : float, positive. Often >1.
Parameter of smoothness as described above
useOriginalReads : bool or "try"
If True, requires to recover original reads for smoothness
If False, treats heatmap data as reads
If "try", attempts to recover original reads;
otherwise proceeds with heatmap data.
rawReadDict : dict
A copy of self.dataDict with raw reads
"""
if names is None:
names = self.dataDict.keys()
mask2D = self._giveMask2D()
#If diagonal was removed, we should remember about it!
if hasattr(self, "removedDiagonalValue"):
removeDiagonals(mask2D, self.removedDiagonalValue)
for name in names:
data = self.dataDict[name]
if useOriginalReads is not False:
if rawReadDict is not None:
#raw reads provided externally
reads = rawReadDict[name]
else:
#recovering raw reads
reads = self._recoverOriginalReads(name)
if reads is None:
#failed to recover reads
if useOriginalReads == True:
raise RuntimeError("Cannot recover original reads!")
else:
#raw reads were not requested
reads = None
if reads is None:
reads = data # Feed this to adaptive smoothing
smoothed = np.zeros_like(data, dtype=float)
N = self.chromosomeCount
for i in xrange(N):
for j in xrange(N):
st1 = self.chromosomeStarts[i]
st2 = self.chromosomeStarts[j]
end1 = self.chromosomeEnds[i]
end2 = self.chromosomeEnds[j]
cur = data[st1:end1, st2:end2]
curReads = reads[st1:end1, st2:end2]
curMask = mask2D[st1:end1, st2:end2]
s = adaptiveSmoothing(matrix=cur,
cutoff=smoothness,
alpha=0.5,
mask=curMask,
originalCounts=curReads)
smoothed[st1:end1, st2:end2] = s
self.dataDict[name] = smoothed
self.appliedOperations["Smoothed"] = True
def removeChromosome(self, chromNum):
"""removes certain chromosome from all tracks and heatmaps,
setting all values to zero
Parameters
----------
chromNum : int
Number of chromosome to be removed
"""
beg = self.genome.chrmStartsBinCont[chromNum]
end = self.genome.chrmEndsBinCont[chromNum]
for i in self.dataDict.values():
i[beg:end] = 0
i[:, beg:end] = 0
for mydict in self.dicts:
for value in mydict.values():
value[beg:end] = 0
for mydict in self.eigDicts:
for value in mydict.values():
value[beg:end] = 0
def removeZeros(self, zerosMask=None):
"""removes bins with zero counts
keeps chromosome starts, ends, etc. consistent
Parameters
----------
zerosMask : length N array or None, optional
If provided, this method removes a defined set of bins
By default, it removes bins with zero # counts.
"""
if zerosMask is not None:
s = zerosMask
else:
s = np.sum(self._giveMask2D(), axis=0) > 0
for i in self.dataDict.values():
s *= (np.sum(i, axis=0) > 0)
indices = np.zeros(len(s), int)
count = 0
for i in xrange(len(indices)):
if s[i] == True:
indices[i] = count
count += 1
else:
indices[i] = count
indices = np.r_[indices, indices[-1] + 1]
N = len(self.positionIndex)
for i in self.dataDict.keys():
a = self.dataDict[i]
if len(a) != N:
raise ValueError("Wrong dimensions of data %i: \
%d instead of %d" % (i, len(a), N))
b = a[:, s]
c = b[s, :]
self.dataDict[i] = c
for mydict in self.dicts:
for key in mydict.keys():
if len(mydict[key]) != N:
raise ValueError("Wrong dimensions of data {0}: {1} instead of {2}".format(key, len(mydict[key]), N))
mydict[key] = mydict[key][s]
for mydict in self.eigDicts:
for key in mydict.keys():
mydict[key] = mydict[key][:, s]
if len(mydict[key][0]) != N:
raise ValueError("Wrong dimensions of data %i: \
%d instead of %d" % (key, len(mydict[key][0]), N))
self.chromosomeIndex = self.chromosomeIndex[s]
self.positionIndex = self.positionIndex[s]
self.armIndex = self.armIndex[s]
self.chromosomeEnds = indices[self.chromosomeEnds]
self.chromosomeStarts = indices[self.chromosomeStarts]
self.centromerePositions = indices[self.centromerePositions]
self.removeZerosMask = s
if self.appliedOperations.get("RemovedZeros", False) == True:
warnings.warn("\nYou're removing zeros twice. \
You can't restore zeros now!")
self.appliedOperations["RemovedZeros"] = True
self.genome.setResolution(-1)
return s
def restoreZeros(self, value=np.NAN):
"""Restores zeros that were removed by removeZeros command.
.. warning:: You can restore zeros only if you used removeZeros once.
Parameters
----------
value : number-like, optional.
Value to fill in missing regions. By default, NAN.
"""
if not hasattr(self, "removeZerosMask"):
raise StandardError("Zeros have not been removed!")
s = self.removeZerosMask
N = len(s)
for i in self.dataDict.keys():
a = self.dataDict[i]
self.dataDict[i] = np.zeros((N, N), dtype=a.dtype) * value
tmp = np.zeros((N, len(a)), dtype=a.dtype) * value
tmp[s, :] = a
self.dataDict[i][:, s] = tmp
for mydict in self.dicts:
for key in mydict.keys():
a = mydict[key]
mydict[key] = np.zeros(N, dtype=a.dtype) * value
mydict[key][s] = a
for mydict in self.eigDicts:
#print mydict
for key in mydict.keys():
a = mydict[key]
mydict[key] = np.zeros((len(a), N), dtype=a.dtype) * value
mydict[key][:, s] = a
self.genome.setResolution(self.resolution)
self._initChromosomes()
self.appliedOperations["RemovedZeros"] = False
def doPCA(self, force=False):
"""performs PCA on the data
creates dictionary self.PCADict with results
Last column of PC matrix is first PC, second to last - second, etc.
Returns
-------
Dictionary of principal component matrices for different datasets
"""
neededKeys = ["RemovedZeros", "Corrected", "FakedCis"]
advicedKeys = ["TruncedTrans", "RemovedPoor"]
if force == False:
self._checkAppliedOperations(neededKeys, advicedKeys)
for i in self.dataDict.keys():
currentPCA, eigenvalues = PCA(self.dataDict[i])
self.PCAEigenvalueDict[i] = eigenvalues
for j in xrange(len(currentPCA)):
if spearmanr(currentPCA[j], self.trackDict["GC"])[0] < 0:
currentPCA[j] = -currentPCA[j]
self.PCDict[i] = currentPCA
return self.PCDict
def doEig(self, numPCs=3, force=False):
"""performs eigenvector expansion on the data
creates dictionary self.EigDict with results
Last row of the eigenvector matrix is the largest eigenvector, etc.
Returns
-------
Dictionary of eigenvector matrices for different datasets
"""
neededKeys = ["RemovedZeros", "Corrected", "FakedCis"]
advicedKeys = ["TruncedTrans", "RemovedPoor"]
if force == False:
self._checkAppliedOperations(neededKeys, advicedKeys)
for i in self.dataDict.keys():
currentEIG, eigenvalues = EIG(self.dataDict[i], numPCs=numPCs)
self.eigEigenvalueDict[i] = eigenvalues
for j in xrange(len(currentEIG)):
if spearmanr(currentEIG[j], self.trackDict["GC"])[0] < 0:
currentEIG[j] = -currentEIG[j]
self.EigDict[i] = currentEIG
return self.EigDict
def doCisPCADomains(
self, numPCs=3, swapFirstTwoPCs=False, useArms=True,
corrFunction=lambda x, y: spearmanr(x, y)[0],
domainFunction="default"):
"""Calculates A-B compartments based on cis data.
All PCs are oriented to have positive correlation with GC.
Writes the main result (PCs) in the self.PCADict dictionary.
Additionally, returns correlation coefficients with GC; by chromosome.
Parameters
----------
numPCs : int, optional
Number of PCs to compute
swapFirstTwoPCs : bool, by default False
Swap first and second PC if second has higher correlation with GC
useArms : bool, by default True
Use individual arms, not chromosomes
corr function : function, default: spearmanr
Function to compute correlation with GC.
Accepts two arrays, returns correlation
domain function : function, optional
Function to calculate principal components of a square matrix.
Accepts: N by N matrix
returns: numPCs by N matrix
Default does iterative correction, then observed over expected.
Then IC
Then calculates correlation matrix.
Then calculates PCA of correlation matrix.
other options: metaphasePaper (like in Naumova, Science 2013)
.. note:: Main output of this function is written to self.PCADict
Returns
-------
corrdict,lengthdict
Dictionaries with keys for each dataset.
Values of corrdict contains an M x numPCs array with correlation
coefficient for each chromosome (or arm) with non-zero length.
Values of lengthdict contain lengthds of chromosomes/arms.
These dictionaries can be used to calculate average correlation
coefficient by chromosome (or by arm).
"""
corr = corrFunction
if (type(domainFunction) == str):
domainFunction = domainFunction.lower()
if domainFunction in ["metaphasepaper", "default", "lieberman",
"erez", "geoff", "lieberman+", "erez+"]:
fname = domainFunction
def domainFunction(chrom):
#orig = chrom.copy()
M = len(chrom.flat)
toclip = 100 * min(0.999, (M - 10.) / M)
removeDiagonals(chrom, 1)
chrom = ultracorrect(chrom)
chrom = observedOverExpected(chrom)
chrom = np.clip(chrom, -1e10, np.percentile(chrom, toclip))
for i in [-1, 0, 1]:
fillDiagonal(chrom, 1, i)
if fname in ["default", "lieberman+", "erez+"]:
#upgrade of (Lieberman 2009)
# does IC, then OoE, then IC, then corrcoef, then PCA
chrom = ultracorrect(chrom)
chrom = np.corrcoef(chrom)
PCs = PCA(chrom, numPCs)[0]
return PCs
elif fname in ["lieberman", "erez"]:
#slight upgrade of (Lieberman 2009)
# does IC, then OoE, then corrcoef, then PCA
chrom = np.corrcoef(chrom)
PCs = PCA(chrom, numPCs)[0]
return PCs
elif fname in ["metaphasepaper", "geoff"]:
chrom = ultracorrect(chrom)
PCs = EIG(chrom, numPCs)[0]
return PCs
else:
raise
if domainFunction in ["lieberman-", "erez-"]:
#simplest function presented in (Lieberman 2009)
#Closest to (Lieberman 2009) that we could do
def domainFunction(chrom):
removeDiagonals(chrom, 1)
chrom = observedOverExpected(chrom)
chrom = np.corrcoef(chrom)
PCs = PCA(chrom, numPCs)[0]
return PCs
corrdict, lengthdict = {}, {}
#dict of per-chromosome correlation coefficients
for key in self.dataDict.keys():
corrdict[key] = []
lengthdict[key] = []
dataset = self.dataDict[key]
N = len(dataset)
PCArray = np.zeros((3, N))
for chrom in xrange(len(self.chromosomeStarts)):
if useArms == False:
begs = (self.chromosomeStarts[chrom],)
ends = (self.chromosomeEnds[chrom],)
else:
begs = (self.chromosomeStarts[chrom],
self.centromerePositions[chrom])
ends = (self.centromerePositions[chrom],
self.chromosomeEnds[chrom])
for end, beg in map(None, ends, begs):
if end - beg < 5:
continue
chrom = dataset[beg:end, beg:end]
GC = self.trackDict["GC"][beg:end]
PCs = domainFunction(chrom)
for PC in PCs:
if corr(PC, GC) < 0:
PC *= -1
if swapFirstTwoPCs == True:
if corr(PCs[0], GC) < corr(PCs[1], GC):
p0, p1 = PCs[0].copy(), PCs[1].copy()
PCs[0], PCs[1] = p1, p0
corrdict[key].append(tuple([corr(i, GC) for i in PCs]))
lengthdict[key].append(end - beg)
PCArray[:, beg:end] = PCs
self.PCDict[key] = PCArray
return corrdict, lengthdict
def cisToTrans(self, mode="All", filename="GM-all"):
"""
Calculates cis-to-trans ratio.
"All" - treating SS as trans reads
"Dummy" - fake SS reads proportional to cis reads with the same
total sum
"Matrix" - use heatmap only
"""
data = self.dataDict[filename]
cismap = self.chromosomeIndex[:, None] == self.chromosomeIndex[None, :]
cissums = np.sum(cismap * data, axis=0)
allsums = np.sum(data, axis=0)
if mode.lower() == "all":
cissums += self.singlesDict[filename]
allsums += self.singlesDict[filename]
elif mode.lower() == "dummy":
sm = np.mean(self.singlesDict[filename])
fakesm = cissums * sm / np.mean(cissums)
cissums += fakesm
allsums += fakesm
elif mode.lower() == "matrix":
pass
else:
raise
return cissums / allsums
class binnedDataAnalysis(binnedData):
"""
Class containing experimental features and data analysis scripts
"""
def plotScaling(self, name, label="BLA", color=None, plotUnit=1000000):
"plots scaling of a heatmap,treating arms separately"
import matplotlib.pyplot as plt
data = self.dataDict[name]
bins = numutils.logbins(
2, self.genome.maxChrmArm / self.resolution, 1.17)
s = np.sum(data, axis=0) > 0
mask = s[:, None] * s[None, :]
chroms = []
masks = []
for i in xrange(self.chromosomeCount):
beg = self.chromosomeStarts[i]
end = self.centromerePositions[i]
chroms.append(data[beg:end, beg:end])
masks.append(mask[beg:end, beg:end])
beg = self.centromerePositions[i]
end = self.chromosomeEnds[i]
chroms.append(data[beg:end, beg:end])
masks.append(mask[beg:end, beg:end])
observed = []
expected = []
for i in xrange(len(bins) - 1):
low = bins[i]
high = bins[i + 1]
obs = 0
exp = 0
for j in xrange(len(chroms)):
if low > len(chroms[j]):
continue
high2 = min(high, len(chroms[j]))
for k in xrange(low, high2):
obs += np.sum(np.diag(chroms[j], k))
exp += np.sum(np.diag(masks[j], k))
observed.append(obs)
expected.append(exp)
observed = np.array(observed, float)
expected = np.array(expected, float)
values = observed / expected
bins = np.array(bins, float)
bins2 = 0.5 * (bins[:-1] + bins[1:])
norm = np.sum(values * (bins[1:] - bins[:-1]) * (
self.resolution / float(plotUnit)))
args = [self.resolution * bins2 / plotUnit, values / (1. * norm)]
if color is not None:
args.append(color)
plt.plot(*args, label=label, linewidth=2)
def averageTransMap(self, name, **kwargs):
"plots and returns average inter-chromosomal inter-arm map"
import matplotlib.pyplot as plt
from mirnylib.plotting import removeBorder
data = self.dataDict[name]
avarms = np.zeros((80, 80))
avmasks = np.zeros((80, 80))
discardCutoff = 10
for i in xrange(self.chromosomeCount):
print i
for j in xrange(self.chromosomeCount):
for k in [-1, 1]:
for l in [-1, 1]:
if i == j:
continue
cenbeg1 = self.chromosomeStarts[i] + \
self.genome.cntrStarts[i] / self.resolution
cenbeg2 = self.chromosomeStarts[j] + \
self.genome.cntrStarts[j] / self.resolution
cenend1 = self.chromosomeStarts[i] + \
self.genome.cntrEnds[i] / self.resolution
cenend2 = self.chromosomeStarts[j] + \
self.genome.cntrEnds[j] / self.resolution
beg1 = self.chromosomeStarts[i]
beg2 = self.chromosomeStarts[j]
end1 = self.chromosomeEnds[i]
end2 = self.chromosomeEnds[j]
if k == 1:
bx = cenbeg1
ex = beg1 - 1
dx = -1
else:
bx = cenend1
ex = end1
dx = 1
if l == 1:
by = cenbeg2
ey = beg2 - 1
dy = -1
else:
by = cenend2
ey = end2
dy = 1
if abs(bx - ex) < discardCutoff:
continue
if bx < 0:
bx = None
if ex < 0:
ex = None
if abs(by - ey) < discardCutoff:
continue
if by < 0:
by = None
if ey < 0:
ey = None
arms = data[bx:ex:dx, by:ey:dy]
assert max(arms.shape) <= self.genome.maxChrmArm / \
self.genome.resolution + 2
mx = np.sum(arms, axis=0)
my = np.sum(arms, axis=1)
maskx = mx == 0
masky = my == 0
mask = (maskx[None, :] + masky[:, None]) == False
maskf = np.array(mask, float)
mlenx = (np.abs(np.sum(mask, axis=0)) > 1e-20).sum()
mleny = (np.abs(np.sum(mask, axis=1)) > 1e-20).sum()
if min(mlenx, mleny) < discardCutoff:
continue
add = numutils.zoomOut(arms, avarms.shape)
assert np.abs((arms.sum() - add.sum(
)) / arms.sum()) < 0.02
addmask = numutils.zoomOut(maskf, avarms.shape)
avarms += add
avmasks += addmask
avarms /= np.mean(avarms)
data = avarms / avmasks
data /= np.mean(data)
plt.imshow(np.log(numutils.trunc(
data)), cmap="jet", interpolation="nearest", **kwargs)
removeBorder()
return np.log(numutils.trunc(data))
def perArmCorrelation(self, data1, data2, doByArms=[]):
"""does inter-chromosomal spearman correlation
of two vectors for each chromosomes separately.
Averages over chromosomes with weight of chromosomal length
For chromosomes in "doByArms" treats arms as separatre chromosomes
returns average Spearman r correlation
"""
cr = 0
ln = 0
for i in xrange(self.chromosomeCount):
if i in doByArms:
beg = self.chromosomeStarts[i]
end = self.centromerePositions[i]
if end > beg:
cr += (abs(spearmanr(data1[beg:end], data2[beg:end]
)[0])) * (end - beg)
ln += (end - beg)
print spearmanr(data1[beg:end], data2[beg:end])[0]
beg = self.centromerePositions[i]
end = self.chromosomeEnds[i]
if end > beg:
cr += (abs(spearmanr(data1[beg:end], data2[beg:end]
)[0])) * (end - beg)
ln += (end - beg)
print spearmanr(data1[beg:end], data2[beg:end])[0]
else:
beg = self.chromosomeStarts[i]
end = self.chromosomeEnds[i]
if end > beg:
cr += (abs(spearmanr(data1[beg:end], data2[beg:end]
)[0])) * (end - beg)
ln += (end - beg)
return cr / ln
def divideOutAveragesPerChromosome(self):
"divides each interchromosomal map by it's mean value"
mask2D = self._giveMask2D()
for chrom1 in xrange(self.chromosomeCount):
for chrom2 in xrange(self.chromosomeCount):
for i in self.dataDict.keys():
value = self.dataDict[i]
submatrix = value[self.chromosomeStarts[chrom1]:
self.chromosomeEnds[chrom1],
self.chromosomeStarts[chrom2]:
self.chromosomeEnds[chrom2]]
masksum = np.sum(
mask2D[self.chromosomeStarts[chrom1]:
self.chromosomeEnds[chrom1],
self.chromosomeStarts[chrom2]:
self.chromosomeEnds[chrom2]])
valuesum = np.sum(submatrix)
mean = valuesum / masksum
submatrix /= mean
def interchromosomalValues(self, filename="GM-all", returnAll=False):
"""returns average inter-chromosome-interaction values,
ordered always the same way"""
values = self.chromosomeIndex[:, None] + \
self.chromosomeCount * self.chromosomeIndex[None, :]
values[self.chromosomeIndex[:, None] == self.chromosomeIndex[None,
:]] = self.chromosomeCount * self.chromosomeCount - 1
#mat_img(values)
uv = np.sort(np.unique(values))[1:-1]
probs = np.bincount(
values.ravel(), weights=self.dataDict[filename].ravel())
counts = np.bincount(values.ravel())
if returnAll == False:
return probs[uv] / counts[uv]
else:
probs[self.chromosomeCount * self.chromosomeCount - 1] = 0
values = probs / counts
values[counts == 0] = 0
#mat_img(values.reshape((22,22)))
return values.reshape((self.chromosomeCount, self.chromosomeCount))
class experimentalBinnedData(binnedData):
"Contains some poorly-implemented new features"
def projectOnEigenvalues(self, eigenvectors=[0]):
"""
Calculates projection of the data on a set of eigenvectors.
This is used to calculate heatmaps, reconstructed from eigenvectors.
Parameters
----------
eigenvectors : list of non-negative ints, optional
Zero-based indices of eigenvectors, to project onto
By default projects on the first eigenvector
Returns
-------
Puts resulting data in dataDict under DATANAME_projected key
"""
for name in self.dataDict.keys():
if name not in self.EigDict:
raise RuntimeError("Calculate eigenvectors first!")
PCs = self.EigDict[name]
if max(eigenvectors) >= len(PCs):
raise RuntimeError("Not enough eigenvectors."
"Increase numPCs in doEig()")
PCs = PCs[eigenvectors]
eigenvalues = self.eigEigenvalueDict[name][eigenvectors]
proj = reduce(lambda x, y: x + y,
[PCs[i][:, None] * PCs[i][None, :] * \
eigenvalues[i] for i in xrange(len(PCs))])
mask = PCs[0] != 0
mask = mask[:, None] * mask[None, :] # maks of non-zero elements
data = self.dataDict[name]
datamean = np.mean(data[mask])
proj[mask] += datamean
self.dataDict[name + "_projected"] = proj
def emulateCis(self):
"""if you want to have fun creating syntetic data,
this emulates cis contacts. adjust cis/trans ratio in the C code"""
from scipy import weave
transmap = self.chromosomeIndex[:,
None] == self.chromosomeIndex[None, :]
len(transmap)
for i in self.dataDict.keys():
data = self.dataDict[i] * 1.
N = len(data)
N
code = r"""
#line 1427 "binnedData.py"
using namespace std;
for (int i = 0; i < N; i++)
{
for (int j = 0; j<N; j++)
{
if (transmap[N * i + j] == 1)
{
data[N * i + j] = data[N * i +j] * 300 /(abs(i-j) + \
0.5);
}
}
}
"""
support = """
#include <math.h>
"""
weave.inline(code, ['transmap', 'data', "N"],
extra_compile_args=['-march=native -malign-double'],
support_code=support)
self.dataDict[i] = data
self.removedCis = False
self.fakedCis = False
def fakeMissing(self):
"""fakes megabases that have no reads. For cis reads fakes with cis
reads at the same distance. For trans fakes with random trans read
at the same diagonal.
"""
from scipy import weave
for i in self.dataDict.keys():
data = self.dataDict[i] * 1.
sm = np.sum(data, axis=0) > 0
mask = sm[:, None] * sm[None, :]
transmask = np.array(self.chromosomeIndex[:, None]
== self.chromosomeIndex[None, :], int)
#mat_img(transmask)
N = len(data)
N, transmask, mask # to remove warning
code = r"""
#line 1467 "binnedData.py"
using namespace std;
for (int i = 0; i < N; i++)
{
for (int j = i; j<N; j++)
{
if ((MASK2(i,j) == 0) )
{
for (int ss = 0; ss < 401; ss++)
{
int k = 0;
int s = rand() % (N - (j-i));
if ((mask[s * N + s + j - i] == 1) &&\
((transmask[s * N + s + j - i] ==\
transmask[i * N + j]) || (ss > 200)) )
{
data[i * N + j] = data[s * N + s + j - i];
data[j * N + i] = data[s * N + s + j - i];
break;
}
if (ss == 400) {printf("Cannot fake one point... \
skipping %d %d \n",i,j);}
}
}
}
}
"""
support = """
#include <math.h>
"""
for _ in xrange(5):
weave.inline(code, ['transmask', 'mask', 'data', "N"],
extra_compile_args=['-march=native'
' -malign-double -O3'],
support_code=support)
data = correct(data)
self.dataDict[i] = data
#mat_img(self.dataDict[i]>0)
def iterativeCorrectByTrans(self, names=None):
"""performs iterative correction by trans data only, corrects cis also
Parameters
----------
names : list of str or None, optional
Keys of datasets to be corrected. By default, all are corrected.
"""
self.appliedOperations["Corrected"] = True
if names is None:
names = self.dataDict.keys()
self.transmap = self.chromosomeIndex[:,
None] != self.chromosomeIndex[None, :]
#mat_img(self.transmap)
for i in names:
data = self.dataDict[i]
self.dataDict[i], self.biasDict[i] = \
numutils.ultracorrectSymmetricByMask(data, self.transmap, M=None)
try:
self.singlesDict[i] /= self.biasDict[i]
except:
print "bla"
def loadWigFile(self, filenames, label, control=None,
wigFileType="Auto", functionToAverage=np.log, internalResolution=1000):
byChromosome = self.genome.parseAnyWigFile(filenames=filenames,
control=control,
wigFileType=wigFileType,
functionToAverage=functionToAverage,
internalResolution=internalResolution)
self.trackDict[label] = np.concatenate(byChromosome)
def loadErezEigenvector1MB(self, erezFolder):
"Loads Erez chromatin domain eigenvector for HindIII"
if self.resolution != 1000000:
raise StandardError("Erez eigenvector is only at 1MB resolution")
if self.genome.folderName != "hg18":
raise StandardError("Erez eigenvector is for hg18 only!")
folder = os.path.join(erezFolder, "GM-combined.ctgDATA1.ctgDATA1."
"1000000bp.hm.eigenvector.tab")
folder2 = os.path.join(erezFolder, "GM-combined.ctgDATA1.ctgDATA1."
"1000000bp.hm.eigenvector2.tab")
eigenvector = np.zeros(self.genome.numBins, float)
for chrom in range(1, 24):
filename = folder.replace("DATA1", str(chrom))
if chrom in [4, 5]:
filename = folder2.replace("DATA1", str(chrom))
mydata = np.array([[float(j) for j in i.split(
)] for i in open(filename).readlines()])
eigenvector[self.genome.chrmStartsBinCont[chrom -
1] + np.array(mydata[:, 1], int)] = mydata[:, 2]
self.trackDict["Erez"] = eigenvector
def loadTanayDomains(self):
"domains, extracted from Tanay paper image"
if self.genome.folderName != "hg18":
raise StandardError("Tanay domains work only with hg18")
data = """0 - 17, 1 - 13.5, 2 - 6.5, 0 - 2, 2 - 2; x - 6.5, 0 - 6,\
1 - 13.5, 0 - 1.5, 1 - 14.5
1 - 8.5, 0 - 2.5, 1 - 14, 2 - 6; 0 - 1.5, 2 - 11.5, 1 - 35
1 - 14, 0-6, 2 - 11; 2 - 4.5, 1 - 5, 0 - 4, 1 -20.5, 0 - 2
0 - 3, 2 - 14; 2 - 5, 1 - 42
2 - 16; 2 - 7, 0 - 3, 1 - 18.5, 0 - 1, 1 - 13, 0 - 2.5
0 - 2, 1 - 6.5, 0 - 7.5, 2 - 4; 2 - 6, 1 - 31
0 - 2, 1 - 11, 2 - 7; 2 - 7.5, 1 - 5, 0 - 3, 1 - 19
2 - 9.5, 0 - 1, 2 - 5; 2 - 4, 1 - 27.5, 0 - 2.5
2 - 11.5, 0 - 2.5, x - 2.5; x - 5, 2 - 8, 0 - 3.5, 1 - 9, 0 - 6
2 - 13.5; 2 - 9, 0 - 3, 1 - 6, 0 - 3.5, 1 - 10.5
0 - 3.5, 2 - 15; 2 - 1, 0 - 7.5, 1 - 13, 0 - 1.5, 1 - 4
0 - 4, 2 - 8; 2 - 2, 0 - 5, 2 - 2.5, 1 - 13, 0 - 6.5, 1 - 3.5
x - 5.5; 2 - 8.5, 0 - 1, 2 - 7, 1 - 16
x - 5.5; 2 - 14.5, 0 - 6, 2 - 3, 1 - 2.5, 2 - 1, 0 - 3
x - 5.5; 2 - 6, 0 - 3.5, 2 - 1.5, 0 - 11.5, 2 - 5.5
0 - 11, 2 - 1; x - 2.5, 2 - 6.5, 0 - 3, 2 - 2, 0 - 3.5
0 - 4, 2 - 1.5, 0 - 1.5; 0 - 19
2 - 5; 2 - 20
0 - 9.5, x - 1.5; x - 1, 2 - 2, 0 - 8.5
0 - 2, 2 - 7; 0 - 8, 2 - 2, 0 - 1
x - 0.5; 2 - 8.5, 0 - 3
x - 4; 0 -12
x - 1.5, 1 - 13, 2 - 5.5; 2 - 2, 1 - 29"""
chroms = [i.split(";") for i in data.split("\n")]
result = []
for chrom in chroms:
result.append([])
cur = result[-1]
for arm in chrom:
for enrty in arm.split(","):
spentry = enrty.split("-")
if "x" in spentry[0]:
value = -1
else:
value = int(spentry[0])
cur += ([value] * int(2 * float(spentry[1])))
cur += [-1] * 2
#lenses = [len(i) for i in result]
domains = np.zeros(self.genome.numBins, int)
for i in xrange(self.genome.chrmCount):
for j in xrange((self.genome.chrmLens[i] / self.resolution)):
domains[self.genome.chrmStartsBinCont[i] + j] = \
result[i][(j * len(result[i]) / ((self.genome.chrmLens[i] /
self.resolution)))]
self.trackDict['TanayDomains'] = domains
|
bxlab/HiFive_Paper
|
Scripts/HiCLib/mirnylab-hiclib-460c3fbc0f72/src/hiclib/binnedData.py
|
Python
|
bsd-3-clause
| 64,389
|
[
"Gaussian"
] |
6c2c5e49ca350894a58e66149f788b5ccba90d78183be202d092d5c8c02fff74
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.orca.data.pandas.preprocessing import read_csv
from bigdl.orca.data.pandas.preprocessing import read_json
from bigdl.orca.data.pandas.preprocessing import read_parquet
|
intel-analytics/BigDL
|
python/orca/src/bigdl/orca/data/pandas/__init__.py
|
Python
|
apache-2.0
| 766
|
[
"ORCA"
] |
01726e1cb8f70987a116d40dc8c8ae9a67b305156b835c0e957e2c31d06a7d60
|
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy.sparse import hstack as sparse_hstack
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..exceptions import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
from ..utils.multiclass import check_classification_targets
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score function."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def decision_path(self, X):
"""Return the decision path in the forest
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
indicator : sparse csr array, shape = [n_samples, n_nodes]
Return a node indicator matrix where non zero elements
indicates that the samples goes through the nodes.
n_nodes_ptr : array of size (n_estimators + 1, )
The columns from indicator[n_nodes_ptr[i]:n_nodes_ptr[i+1]]
gives the indicator value for the i-th estimator.
"""
X = self._validate_X_predict(X)
indicators = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'decision_path', X,
check_input=False)
for tree in self.estimators_)
n_nodes = [0]
n_nodes.extend([i.shape[1] for i in indicators])
n_nodes_ptr = np.array(n_nodes).cumsum()
return sparse_hstack(indicators).tocsr(), n_nodes_ptr
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, accept_sparse="csc", dtype=DTYPE)
y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
check_classification_targets(y)
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'subsample', 'balanced_subsample')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated in 0.17 and"
"will be removed in 0.19. It was replaced by "
"class_weight='balanced_subsample' using the balanced"
"strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that
weights are computed based on the bootstrap sample for every tree
grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
whether to use out-of-bag samples to estimate
the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate
the generalization accuracy.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` are the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` are the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool, optional (default=False)
Whether to use out-of-bag samples to estimate the R^2 on unseen data.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, float, optional (default=2)
The minimum number of samples required to split an internal node:
- If int, then consider `min_samples_split` as the minimum number.
- If float, then `min_samples_split` is a percentage and
`ceil(min_samples_split * n_samples)` is the minimum
number of samples for each split.
min_samples_leaf : int, float, optional (default=1)
The minimum number of samples required to be at a leaf node:
- If int, then consider `min_samples_leaf` as the minimum number.
- If float, then `min_samples_leaf` is a percentage and
`ceil(min_samples_leaf * n_samples)` is the minimum
number of samples for each node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
|
kashif/scikit-learn
|
sklearn/ensemble/forest.py
|
Python
|
bsd-3-clause
| 64,796
|
[
"Brian"
] |
fb80c4c7414f1f4034854b3abdf695ef25b0193f44fb08a230602df33f3b7de9
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.