text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# proxy module
from __future__ import absolute_import
from mayavi.tools.server import *
|
enthought/etsproxy
|
enthought/mayavi/tools/server.py
|
Python
|
bsd-3-clause
| 88
|
[
"Mayavi"
] |
df62320f38346eafcee413b3482b2f0a69a27e2cb19ffae3f230388e8ddd75f6
|
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download..
from os import path
from commoncode.testcase import FileBasedTesting
from commoncode.ignore import is_ignored
from scancode.cli import resource_paths
class TestIgnoreFiles(FileBasedTesting):
test_data_dir = path.join(path.dirname(__file__), 'data')
def test_ignore_glob_path(self):
test = (
'common/src/test/sample.txt',
{'*/src/test/*': 'test ignore'},
{}
)
assert is_ignored(*test)
def test_ignore_single_path(self):
test = (
'common/src/test/sample.txt',
{'src/test/sample.txt': 'test ignore'},
{}
)
assert is_ignored(*test)
def test_ignore_single_file(self):
test = (
'common/src/test/sample.txt',
{'sample.txt': 'test ignore'},
{}
)
assert is_ignored(*test)
def test_ignore_glob_file(self):
test = (
'common/src/test/sample.txt',
{'*.txt': 'test ignore'},
{}
)
assert is_ignored(*test)
def test_resource_paths_with_single_file(self):
test_dir = self.extract_test_tar('ignore/user.tgz')
expected = [
'user',
'user/ignore.doc',
'user/src',
'user/src/ignore.doc',
'user/src/test',
'user/src/test/sample.txt'
]
test = [rel_path for abs_path, rel_path in resource_paths(test_dir,{'sample.doc': 'test ignore'})]
assert expected == sorted(test)
def test_resource_paths_with_multiple_files(self):
test_dir = self.extract_test_tar('ignore/user.tgz')
expected = [
'user',
'user/src',
'user/src/test',
'user/src/test/sample.doc',
'user/src/test/sample.txt'
]
test = [rel_path for abs_path, rel_path in resource_paths(test_dir,{'ignore.doc': 'test ignore'})]
assert expected == sorted(test)
def test_resource_paths_with_glob_file(self):
test_dir = self.extract_test_tar('ignore/user.tgz')
expected = [
'user',
'user/src',
'user/src/test',
'user/src/test/sample.txt'
]
test = [rel_path for abs_path, rel_path in resource_paths(test_dir,{'*.doc': 'test ignore'})]
assert expected == sorted(test)
def test_resource_paths_with_glob_path(self):
test_dir = self.extract_test_tar('ignore/user.tgz')
expected = [
'user',
'user/ignore.doc',
'user/src',
'user/src/ignore.doc'
]
test = [rel_path for abs_path, rel_path in resource_paths(test_dir,{'*/src/test': 'test ignore'})]
assert expected == sorted(test)
|
yashdsaraf/scancode-toolkit
|
tests/scancode/test_ignore_files.py
|
Python
|
apache-2.0
| 4,127
|
[
"VisIt"
] |
d83e1a794ef6fcac98c48cd97e0608c400cfc6d57651b76db9e26b7567793341
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkStripper(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkStripper(), 'Processing.',
('vtkPolyData',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkStripper.py
|
Python
|
bsd-3-clause
| 477
|
[
"VTK"
] |
1fc9962225c5426517b25ffe356e17fd3b8862508bf230ca14095a7e8412e3f3
|
import sys
import os
import os.path
import hashlib
import re
import fnmatch
import time
import shutil
import errno
import tempfile
import codecs
import cPickle as pickle
import stat
from stat import ST_MTIME
# We import only jumpscale as the j.system.fs is used before jumpscale is initialized. Thus the q cannot be imported yet
from JumpScale import j
import JumpScale.baselib.codetools #requirement for parsePath
from text import Text
toStr = Text.toStr
#from JumpScale.core.decorators import deprecated
# We do not use the j.system.platformtype here nor do we import the PlatformType as this would
# lead to circular imports and raise an exception
if not sys.platform.startswith('win'):
try:
import fcntl
except ImportError:
pass
_LOCKDICTIONARY = dict()
class LockException(Exception):
def __init__(self, message='Failed to get lock', innerException=None):
if innerException:
message += '\nProblem caused by:\n%s' % innerException
Exception.__init__(self, message)
self.innerException = innerException
class LockTimeoutException(LockException):
def __init__(self, message='Lock request timed out', innerException=None):
LockException.__init__(self, message, innerException)
class Exceptions:
LockException = LockException
LockTimeoutException = LockTimeoutException
def cleanupString(string, replacewith="_", regex="([^A-Za-z0-9])"):
'''Remove all non-numeric or alphanumeric characters'''
# Please don't use the logging system here. The logging system
# needs this method, using the logging system here would
# introduce a circular dependency. Be careful not to call other
# functions that use the logging system.
return re.sub(regex, replacewith, string)
def lock(lockname, locktimeout=60, reentry=False):
'''Take a system-wide interprocess exclusive lock. Default timeout is 60 seconds'''
j.logger.log('Lock with name: %s'% lockname,6)
try:
result = lock_(lockname, locktimeout, reentry)
except Exception, e:
raise LockException(innerException=e)
else:
if not result:
raise LockTimeoutException(message="Cannot acquire lock [%s]" % (lockname))
else:
return result
def lock_(lockname, locktimeout=60, reentry=False):
'''Take a system-wide interprocess exclusive lock.
Works similar to j.system.fs.lock but uses return values to denote lock
success instead of raising fatal errors.
This refactoring was mainly done to make the lock implementation easier
to unit-test.
'''
#TODO This no longer uses fnctl on Unix, why?
LOCKPATH = os.path.join(j.dirs.tmpDir, 'locks')
lockfile = os.path.join(LOCKPATH, cleanupString(lockname))
if reentry:
_LOCKDICTIONARY[lockname] = _LOCKDICTIONARY.setdefault(lockname, 0) + 1
if not islocked(lockname, reentry=reentry):
if not j.system.fs.exists(LOCKPATH):
j.system.fs.createDir(LOCKPATH)
j.system.fs.writeFile(lockfile, str(os.getpid()))
return True
else:
locked = False
for i in xrange(locktimeout + 1):
locked = islocked(lockname, reentry)
if not locked:
break
else:
time.sleep(1)
if not locked:
return lock_(lockname, locktimeout, reentry)
else:
return False
def islocked(lockname, reentry=False):
'''Check if a system-wide interprocess exclusive lock is set'''
isLocked = True
LOCKPATH = os.path.join(j.dirs.tmpDir, 'locks')
lockfile = os.path.join(LOCKPATH, cleanupString(lockname))
try:
# read the pid from the lockfile
if j.system.fs.exists(lockfile):
pid = open(lockfile,'rb').read()
else:
return False
except (OSError, IOError), e:
# failed to read the lockfile
if e.errno != errno.ENOENT: # exception is not 'file or directory not found' -> file probably locked
raise
else:
# open succeeded without exceptions, continue
# check if a process with pid is still running
if pid and pid.isdigit():
pid = int(pid)
if reentry and pid == os.getpid():
return False
if j.system.fs.exists(lockfile) and (not pid or not j.system.process.isPidAlive(pid)):
#cleanup system, pid not active, remove the lockfile
j.system.fs.remove(lockfile)
isLocked = False
return isLocked
def unlock(lockname):
"""Unlock system-wide interprocess lock"""
j.logger.log('UnLock with name: %s'% lockname,6)
try:
unlock_(lockname)
except Exception, msg:
raise RuntimeError("Cannot unlock [%s] with ERROR: %s" % (lockname, str(msg)))
def unlock_(lockname):
'''Unlock system-wide interprocess lock
Works similar to j.system.fs.unlock but uses return values to denote unlock
success instead of raising fatal errors.
This refactoring was mainly done to make the lock implementation easier
to unit-test.
'''
LOCKPATH = os.path.join(j.dirs.tmpDir, 'locks')
lockfile = os.path.join(LOCKPATH, cleanupString(lockname))
if lockname in _LOCKDICTIONARY:
_LOCKDICTIONARY[lockname] -= 1
if _LOCKDICTIONARY[lockname] > 0:
return
# read the pid from the lockfile
if j.system.fs.exists(lockfile):
try:
pid = open(lockfile,'rb').read()
except:
return
if int(pid) != os.getpid():
j.errorconditionhandler.raiseWarning("Lock %r not owned by this process" %lockname)
return
j.system.fs.remove(lockfile)
# else:
# j.console.echo("Lock %r not found"%lockname)
class FileLock(object):
'''Context manager for file-based locks
Context managers were introduced in Python 2.5, see the documentation on the
'with' statement for more information:
* http://www.python.org/dev/peps/pep-0343/
* http://pyref.infogami.com/with
@see: L{lock}
@see: L{unlock}
'''
def __init__(self, lock_name, reentry=False):
self.lock_name = lock_name
self.reentry = reentry
def __enter__(self):
lock(self.lock_name, reentry=self.reentry)
def __exit__(self, *exc_info):
unlock(self.lock_name)
def __call__(self, func):
def wrapper(*args, **kwargs):
lock(self.lock_name, reentry=self.reentry)
try:
return func(*args, **kwargs)
finally:
unlock(self.lock_name)
return wrapper
class SystemFS:
exceptions = Exceptions
def __init__(self):
self.logenable=True
self.loglevel=5
def log(self,msg,level=5,category=""):
# print msg
if level<self.loglevel+1 and self.logenable:
j.logger.log(msg,category="system.fs.%s"%category,level=level)
def copyFile(self, fileFrom, to ,createDirIfNeeded=False,skipProtectedDirs=False,overwriteFile=True):
"""Copy file
Copies the file from C{fileFrom} to the file or directory C{to}.
If C{to} is a directory, a file with the same basename as C{fileFrom} is
created (or overwritten) in the directory specified.
Permission bits are copied.
@param fileFrom: Source file path name
@type fileFrom: string
@param to: Destination file or folder path name
@type to: string
"""
if ((fileFrom is None) or (to is None)):
raise TypeError("No parameters given to system.fs.copyFile from %s, to %s" % (fileFrom, to))
if j.system.fs.isFile(fileFrom):
# Create target folder first, otherwise copy fails
if createDirIfNeeded:
target_folder = os.path.dirname(to)
self.createDir(target_folder)
if overwriteFile==False:
if self.exists(to):
return
if skipProtectedDirs:
if j.dirs.checkInProtectedDir(to):
raise RuntimeError("did not copyFile from:%s to:%s because in protected dir"%(fileFrom,to))
return
try:
shutil.copy(fileFrom, to)
self.log("Copied file from %s to %s" % (fileFrom,to),6)
except Exception,e:
raise RuntimeError("Could not copy file from %s to %s, error %s" % (fileFrom,to,e))
else:
raise RuntimeError("Can not copy file, file: %s does not exist in system.fs.copyFile" % ( fileFrom ) )
def moveFile(self, source, destin):
"""Move a File from source path to destination path
@param source: string (Source file path)
@param destination: string (Destination path the file should be moved to )
"""
self.log('Move file from %s to %s'% (source, destin),6)
if ((source is None) or (destin is None)):
raise TypeError("Not enough parameters given to system.fs.moveFile: move from %s, to %s" % (source, destin))
if not j.system.fs.isFile(source):
raise RuntimeError("The specified source path in system.fs.moveFile does not exist or is no file: %s" % source)
try:
self.move(source, destin)
except Exception,e:
raise RuntimeError("File could not be moved...in system.fs.moveFile: from %s to %s , Error %s" % (source, destin,str(e)))
def renameFile(self, filePath, new_name):
"""
OBSOLETE
"""
self.log("WARNING: renameFIle should not be used")
return self.move(filePath,new_name)
def removeIrrelevantFiles(self,path,followSymlinks=True):
ext=["pyc","bak"]
for path in self.listFilesInDir(path,recursive=True,followSymlinks=followSymlinks):
if self.getFileExtension(path) in ext:
self.remove(path)
def remove(self, path):
"""Remove a File
@param path: string (File path required to be removed
"""
self.log('Remove file with path: %s'%path,6)
if len(path)>0 and path[-1]==os.sep:
path=path[:-1]
if path is None:
raise TypeError('Not enough parameters passed to system.fs.removeFile: %s'%path)
if os.path.islink(path):
os.unlink(path)
if self.exists(path):
try:
os.remove(path)
except:
raise RuntimeError("File with path: %s could not be removed\nDetails: %s"%(path, sys.exc_type))
self.log('Done removing file with path: %s'%path)
def createEmptyFile(self, filename):
"""Create an empty file
@param filename: string (file path name to be created)
"""
self.log('creating an empty file with name & path: %s'%filename,9)
if filename is None:
raise ArithmeticError('Not enough parameters passed to system.fs.createEmptyFile: %s'%filename)
try:
open(filename, "w").close()
self.log('Empty file %s has been successfully created'%filename)
except Exception:
raise RuntimeError("Failed to create an empty file with the specified filename: %s"%filename)
def createDir(self, newdir,skipProtectedDirs=False):
"""Create new Directory
@param newdir: string (Directory path/name)
if newdir was only given as a directory name, the new directory will be created on the default path,
if newdir was given as a complete path with the directory name, the new directory will be created in the specified path
"""
if newdir.find("file://")<>-1:
raise RuntimeError("Cannot use file notation here")
self.log('Creating directory if not exists %s' % toStr(newdir), 8)
if skipProtectedDirs and j.dirs.checkInProtectedDir(newdir):
raise RuntimeError("did not create dir:%s because in protected dir"%newdir)
return
if newdir == '' or newdir == None:
raise TypeError('The newdir-parameter of system.fs.createDir() is None or an empty string.')
if self.isLink(newdir):
self.unlink(newdir)
if self.isDir(newdir):
self.log('Directory trying to create: [%s] already exists' % toStr(newdir), 8)
else:
head, tail = os.path.split(newdir)
if head and not j.system.fs.isDir(head):
self.createDir(head)
if tail:
try:
os.mkdir(newdir)
# print "mkdir:%s"%newdir
except OSError, e:
if e.errno != os.errno.EEXIST: #File exists
raise
self.log('Created the directory [%s]' % toStr(newdir), 8)
def copyDirTree(self, src, dst, keepsymlinks = False, eraseDestination = False, skipProtectedDirs=False, overwriteFiles=True,applyHrdOnDestPaths=None):
"""Recursively copy an entire directory tree rooted at src.
The dst directory may already exist; if not,
it will be created as well as missing parent directories
@param src: string (source of directory tree to be copied)
@param dst: string (path directory to be copied to...should not already exist)
@param keepsymlinks: bool (True keeps symlinks instead of copying the content of the file)
@param eraseDestination: bool (Set to True if you want to erase destination first, be carefull, this can erase directories)
@param overwriteFiles: if True will overwrite files, otherwise will not overwrite when destination exists
"""
if src.find("file://")<>-1 or dst.find("file://")<>-1:
raise RuntimeError("Cannot use file notation here")
self.log('Copy directory tree from %s to %s'% (src, dst),6)
if ((src is None) or (dst is None)):
raise TypeError('Not enough parameters passed in system.fs.copyDirTree to copy directory from %s to %s '% (src, dst))
if j.system.fs.isDir(src):
names = os.listdir(src)
if not j.system.fs.exists(dst):
self.createDir(dst,skipProtectedDirs=skipProtectedDirs)
errors = []
for name in names:
#is only for the name
if applyHrdOnDestPaths<>None:
name2=applyHrdOnDestPaths.applyOnContent(name)
else:
name2=name
srcname = j.system.fs.joinPaths(src, name)
dstname = j.system.fs.joinPaths(dst, name2)
if eraseDestination and self.exists( dstname ):
if self.isDir( dstname , False ) :
self.removeDirTree( dstname )
if self.isLink(dstname):
self.unlink( dstname )
if keepsymlinks and j.system.fs.isLink(srcname):
linkto = j.system.fs.readlink(srcname)
j.system.fs.symlink(linkto, dstname, overwriteFiles)
elif j.system.fs.isDir(srcname):
#print "1:%s %s"%(srcname,dstname)
j.system.fs.copyDirTree(srcname, dstname, keepsymlinks, eraseDestination,skipProtectedDirs=skipProtectedDirs,overwriteFiles=overwriteFiles,applyHrdOnDestPaths=applyHrdOnDestPaths )
else:
#print "2:%s %s"%(srcname,dstname)
self.copyFile(srcname, dstname ,createDirIfNeeded=False,skipProtectedDirs=skipProtectedDirs,overwriteFile=overwriteFiles)
else:
raise RuntimeError('Source path %s in system.fs.copyDirTree is not a directory'% src)
def removeDirTree(self, path, onlyLogWarningOnRemoveError=False):
"""Recursively delete a directory tree.
@param path: the path to be removed
"""
self.log('Removing directory tree with path: %s'%path,6)
if self.isLink(path):
self.remove(path)
if self.isFile(path):
self.remove(path)
if path is None:
raise ValueError('Path is None in system.fs.removeDir')
if(j.system.fs.exists(path)):
if(self.isDir(path)):
if onlyLogWarningOnRemoveError:
def errorHandler(shutilFunc, shutilPath, shutilExc_info):
self.log('WARNING: could not remove %s while recursively deleting %s' % (shutilPath, path), 2)
self.log('Trying to remove Directory tree with path: %s (warn on errors)'%path)
shutil.rmtree(path, onerror=errorHandler)
else:
self.log('Trying to remove Directory tree with path: %s' % path)
shutil.rmtree(path)
self.log('Directory tree with path: %s is successfully removed' % path)
else:
raise ValueError("Specified path: %s is not a Directory in system.fs.removeDirTree" % path)
def removeDir(self, path):
"""Remove a Directory
@param path: string (Directory path that should be removed)
"""
self.log('Removing the directory with path: %s'%path,6)
if path is None:
raise TypeError('Path is None in system.fs.removeDir')
if(j.system.fs.exists(path)):
if(j.system.fs.isDir(path)):
os.rmdir(path)
self.log('Directory with path: %s is successfully removed'%path)
else:
raise ValueError("Path: %s is not a Directory in system.fs.removeDir"% path)
else:
raise RuntimeError("Path: %s does not exist in system.fs.removeDir"% path)
def changeDir(self, path):
"""Changes Current Directory
@param path: string (Directory path to be changed to)
"""
self.log('Changing directory to: %s'%path,6)
if path is None:
raise TypeError('Path is not given in system.fs.changeDir')
if(j.system.fs.exists(path)):
if(j.system.fs.isDir(path)):
os.chdir(path)
newcurrentPath = os.getcwd()
self.log('Directory successfully changed to %s'%path)
return newcurrentPath
else:
raise ValueError("Path: %s in system.fs.changeDir is not a Directory"% path)
else:
raise RuntimeError("Path: %s in system.fs.changeDir does not exist"% path)
def moveDir(self, source, destin):
"""Move Directory from source to destination
@param source: string (Source path where the directory should be removed from)
@param destin: string (Destination path where the directory should be moved into)
"""
self.log('Moving directory from %s to %s'% (source, destin),6)
if ((source is None) or (destin is None)):
raise TypeError('Not enough passed parameters to moveDirectory from %s to %s in system.fs.moveDir '% (source, destin))
if(j.system.fs.isDir(source)):
j.system.fs.move(source, destin)
self.log('Directory is successfully moved from %s to %s'% (source, destin))
else:
raise RuntimeError("Specified Source path: %s does not exist in system.fs.moveDir"% source)
def joinPaths(self,*args):
"""Join one or more path components.
If any component is an absolute path, all previous components are thrown away, and joining continues.
@param path1: string
@param path2: string
@param path3: string
@param .... : string
@rtype: Concatenation of path1, and optionally path2, etc...,
with exactly one directory separator (os.sep) inserted between components, unless path2 is empty.
"""
args = [ toStr(x) for x in args ]
self.log('Join paths %s'%(str(args)),9)
if args is None:
raise TypeError('Not enough parameters %s'%(str(args)))
if j.system.platformtype.isWindows():
args2=[]
for item in args:
item=item.replace("/","\\")
while len(item)>0 and item[0]=="\\":
item=item[1:]
args2.append(item)
args=args2
try:
return os.path.join(*args)
except Exception,e:
raise RuntimeError("Failed to join paths: %s, Error %s "%(str(args),str(e)))
def getDirName(self, path,lastOnly=False,levelsUp=None):
"""
Return a directory name from pathname path.
@param path the path to find a directory within
@param lastOnly means only the last part of the path which is a dir (overrides levelsUp to 0)
@param levelsUp means, return the parent dir levelsUp levels up
e.g. ...getDirName("/opt/qbase/bin/something/test.py", levelsUp=0) would return something
e.g. ...getDirName("/opt/qbase/bin/something/test.py", levelsUp=1) would return bin
e.g. ...getDirName("/opt/qbase/bin/something/test.py", levelsUp=10) would raise an error
"""
self.log('Get directory name of path: %s' % path,9)
if path is None:
raise TypeError('Path is not passed in system.fs.getDirName')
dname=os.path.dirname(path)
dname=dname.replace("/",os.sep)
dname=dname.replace("//",os.sep)
dname=dname.replace("\\",os.sep)
if lastOnly:
dname=dname.split(os.sep)[-1]
return dname
if levelsUp<>None:
parts=dname.split(os.sep)
if len(parts)-levelsUp>0:
return parts[len(parts)-levelsUp-1]
else:
raise RuntimeError ("Cannot find part of dir %s levels up, path %s is not long enough" % (levelsUp,path))
return dname+os.sep
def getBaseName(self, path):
"""Return the base name of pathname path."""
# self.log('Get basename for path: %s'%path,9)
if path is None:
raise TypeError('Path is not passed in system.fs.getDirName')
try:
return os.path.basename(path.rstrip(os.path.sep))
except Exception,e:
raise RuntimeError('Failed to get base name of the given path: %s, Error: %s'% (path,str(e)))
def pathShorten(self, path):
"""
Clean path (change /var/www/../lib to /var/lib). On Windows, if the
path exists, the short path name is returned.
@param path: Path to clean
@type path: string
@return: Cleaned (short) path
@rtype: string
"""
cleanedPath = os.path.normpath(path)
if j.system.platformtype.isWindows() and self.exists(cleanedPath):
# Only execute on existing paths, otherwise an error will be raised
import win32api
cleanedPath = win32api.GetShortPathName(cleanedPath)
# Re-add '\' if original path had one
sep = os.path.sep
if path and path[-1] == sep and cleanedPath[-1] != sep:
cleanedPath = "%s%s" % (cleanedPath, sep)
return cleanedPath
def pathClean(self,path):
"""
goal is to get a equal representation in / & \ in relation to os.sep
"""
path=path.replace("/",os.sep)
path=path.replace("//",os.sep)
path=path.replace("\\",os.sep)
path=path.replace("\\\\",os.sep)
#path=self.pathNormalize(path)
path=path.strip()
return path
def pathDirClean(self,path):
path=path+os.sep
return self.pathClean(path)
def dirEqual(self,path1,path2):
return self.pathDirClean(path1)==self.pathDirClean(path1)
def pathNormalize(self, path,root=""):
"""
paths are made absolute & made sure they are in line with os.sep
@param path: path to normalize
@root is std the application you are in, can overrule
"""
if root=="":
root=self.getcwd()
path=self.pathClean(path)
if len(path)>0 and path[0]<>os.sep:
path=self.joinPaths(root,path)
return path
def pathRemoveDirPart(self,path,toremove,removeTrailingSlash=False):
"""
goal remove dirparts of a dirpath e,g, a basepath which is not needed
will look for part to remove in full path but only full dirs
"""
path = self.pathNormalize(path)
toremove = self.pathNormalize(toremove)
if self.pathClean(toremove)==self.pathClean(path):
return ""
path=self.pathClean(path)
path=path.replace(self.pathDirClean(toremove),"")
if removeTrailingSlash:
if len(path)>0 and path[0]==os.sep:
path=path[1:]
path=self.pathClean(path)
return path
def getParentDirName(self,path):
"""
returns parent of path (only for dirs)
returns empty string when there is no parent
"""
path=self.pathDirClean(path)
if len(path.split(os.sep))>2:
return j.system.fs.getDirName(path,lastOnly=True,levelsUp=1) #go 1 level up to find name of parent
else:
return ""
def processPathForDoubleDots(self,path):
"""
/root/somepath/.. would become /root
/root/../somepath/ would become /somepath
result will always be with / slashes
"""
# print "processPathForDoubleDots:%s"%path
path=self.pathClean(path)
path=path.replace("\\","/")
result=[]
for item in path.split("/"):
if item=="..":
if result==[]:
raise RuntimeError("Cannot processPathForDoubleDots for paths with only ..")
else:
result.pop()
else:
result.append(item)
return "/".join(result)
def getParent(self, path):
"""
Returns the parent of the path:
/dir1/dir2/file_or_dir -> /dir1/dir2/
/dir1/dir2/ -> /dir1/
@todo why do we have 2 implementations which are almost the same see getParentDirName()
"""
parts = path.split(os.sep)
if parts[-1] == '':
parts=parts[:-1]
parts=parts[:-1]
if parts==['']:
return os.sep
return os.sep.join(parts)
def getFileExtension(self,path):
extcand=path.split(".")
if len(extcand)>0:
ext=extcand[-1]
else:
ext=""
return ext
def chown(self,path,user):
from pwd import getpwnam
getpwnam(user)[2]
uid=getpwnam(user).pw_uid
gid=getpwnam(user).pw_gid
os.chown(path, uid, gid)
for root, dirs, files in os.walk(path):
for ddir in dirs:
path = os.path.join(root, ddir)
try:
os.chown(path, uid, gid)
except Exception,e:
if str(e).find("No such file or directory")==-1:
raise RuntimeError("%s"%e)
for file in files:
path = os.path.join(root, file)
try:
os.chown(path, uid, gid)
except Exception,e:
if str(e).find("No such file or directory")==-1:
raise RuntimeError("%s"%e)
def chmod(self,path,permissions):
"""
@param permissions e.g. 0o660 (USE OCTAL !!!)
"""
os.chmod(path,permissions)
for root, dirs, files in os.walk(path):
for ddir in dirs:
path = os.path.join(root, ddir)
try:
os.chmod(path,permissions)
except Exception,e:
if str(e).find("No such file or directory")==-1:
raise RuntimeError("%s"%e)
for file in files:
path = os.path.join(root, file)
try:
os.chmod(path,permissions)
except Exception,e:
if str(e).find("No such file or directory")==-1:
raise RuntimeError("%s"%e)
def parsePath(self,path, baseDir="",existCheck=True, checkIsFile=False):
"""
parse paths of form /root/tmp/33_adoc.doc into the path, priority which is numbers before _ at beginning of path
also returns filename
checks if path can be found, if not will fail
when filename="" then is directory which has been parsed
if basedir specified that part of path will be removed
example:
j.system.fs.parsePath("/opt/qbase3/apps/specs/myspecs/definitions/cloud/datacenter.txt","/opt/qbase3/apps/specs/myspecs/",existCheck=False)
@param path is existing path to a file
@param baseDir, is the absolute part of the path not required
@return list of dirpath,filename,extension,priority
priority = 0 if not specified
"""
#make sure only clean path is left and the filename is out
if existCheck and not self.exists(path):
raise RuntimeError("Cannot find file %s when importing" % path)
if checkIsFile and not self.isFile(path):
raise RuntimeError("Path %s should be a file (not e.g. a dir), error when importing" % path)
extension=""
if self.isDir(path):
name=""
path=self.pathClean(path)
else:
name=self.getBaseName(path)
path=self.pathClean(path)
#make sure only clean path is left and the filename is out
path=self.getDirName(path)
#find extension
regexToFindExt="\.\w*$"
if j.codetools.regex.match(regexToFindExt,name):
extension=j.codetools.regex.findOne(regexToFindExt,name).replace(".","")
#remove extension from name
name=j.codetools.regex.replace(regexToFindExt,regexFindsubsetToReplace=regexToFindExt, replaceWith="", text=name)
if baseDir<>"":
path=self.pathRemoveDirPart(path,baseDir)
if name=="":
dirOrFilename=j.system.fs.getDirName(path,lastOnly=True)
else:
dirOrFilename=name
#check for priority
regexToFindPriority="^\d*_"
if j.codetools.regex.match(regexToFindPriority,dirOrFilename):
#found priority in path
priority=j.codetools.regex.findOne(regexToFindPriority,dirOrFilename).replace("_","")
#remove priority from path
name=j.codetools.regex.replace(regexToFindPriority,regexFindsubsetToReplace=regexToFindPriority, replaceWith="", text=name)
else:
priority=0
return path,name,extension,priority #if name =="" then is dir
def getcwd(self):
"""get current working directory
@rtype: string (current working directory path)
"""
self.log('Get current working directory',9)
try:
return os.getcwd()
except Exception, e:
raise RuntimeError('Failed to get current working directory')
def readlink(self, path):
"""Works only for unix
Return a string representing the path to which the symbolic link points.
"""
while path[-1]=="/" or path[-1]=="\\":
path=path[:-1]
self.log('Read link with path: %s'%path,8)
if path is None:
raise TypeError('Path is not passed in system.fs.readLink')
if j.system.platformtype.isUnix():
try:
return os.readlink(path)
except Exception, e:
raise RuntimeError('Failed to read link with path: %s \nERROR: %s'%(path, str(e)))
elif j.system.platformtype.isWindows():
raise RuntimeError('Cannot readLink on windows')
def removeLinks(self,path):
"""
find all links & remove
"""
if not self.exists(path):
return
items=self._listAllInDir(path=path, recursive=True, followSymlinks=False,listSymlinks=True)
items=[item for item in items[0] if j.system.fs.isLink(item)]
for item in items:
j.system.fs.unlink(item)
def _listInDir(self, path,followSymlinks=True):
"""returns array with dirs & files in directory
@param path: string (Directory path to list contents under)
"""
if path is None:
raise TypeError('Path is not passed in system.fs.listDir')
if(j.system.fs.exists(path)):
if(j.system.fs.isDir(path)) or (followSymlinks and self.checkDirOrLink(path)):
names = os.listdir(path)
return names
else:
raise ValueError("Specified path: %s is not a Directory in system.fs.listDir"% path)
else:
raise RuntimeError("Specified path: %s does not exist in system.fs.listDir"% path)
def listFilesInDir(self, path, recursive=False, filter=None, minmtime=None, maxmtime=None,depth=None, case_sensitivity='os',exclude=[],followSymlinks=True,listSymlinks=False):
"""Retrieves list of files found in the specified directory
@param path: directory path to search in
@type path: string
@param recursive: recursively look in all subdirs
@type recursive: boolean
@param filter: unix-style wildcard (e.g. *.py) - this is not a regular expression
@type filter: string
@param minmtime: if not None, only return files whose last modification time > minmtime (epoch in seconds)
@type minmtime: integer
@param maxmtime: if not None, only return files whose last modification time < maxmtime (epoch in seconds)
@Param depth: is levels deep wich we need to go
@type maxmtime: integer
@Param exclude: list of std filters if matches then exclude
@rtype: list
"""
if depth<>None:
depth=int(depth)
self.log('List files in directory with path: %s' % path,9)
if depth==0:
depth=None
# if depth<>None:
# depth+=1
filesreturn,depth=self._listAllInDir(path, recursive, filter, minmtime, maxmtime,depth,type="f", case_sensitivity=case_sensitivity,exclude=exclude,followSymlinks=followSymlinks,listSymlinks=listSymlinks)
return filesreturn
def listFilesAndDirsInDir(self, path, recursive=False, filter=None, minmtime=None, maxmtime=None,depth=None,type="fd",followSymlinks=True,listSymlinks=False):
"""Retrieves list of files found in the specified directory
@param path: directory path to search in
@type path: string
@param recursive: recursively look in all subdirs
@type recursive: boolean
@param filter: unix-style wildcard (e.g. *.py) - this is not a regular expression
@type filter: string
@param minmtime: if not None, only return files whose last modification time > minmtime (epoch in seconds)
@type minmtime: integer
@param maxmtime: if not None, only return files whose last modification time < maxmtime (epoch in seconds)
@Param depth: is levels deep wich we need to go
@type maxmtime: integer
@param type is string with f & d inside (f for when to find files, d for when to find dirs)
@rtype: list
"""
if depth<>None:
depth=int(depth)
self.log('List files in directory with path: %s' % path,9)
if depth==0:
depth=None
# if depth<>None:
# depth+=1
filesreturn,depth=self._listAllInDir(path, recursive, filter, minmtime, maxmtime,depth,type=type,followSymlinks=followSymlinks,listSymlinks=listSymlinks)
return filesreturn
def _listAllInDir(self, path, recursive, filter=None, minmtime=None, maxmtime=None,depth=None,type="df", case_sensitivity='os',exclude=[],followSymlinks=True,listSymlinks=True):
"""
# There are 3 possible options for case-sensitivity for file names
# 1. `os`: the same behavior as the OS
# 2. `sensitive`: case-sensitive comparison
# 3. `insensitive`: case-insensitive comparison
"""
dircontent = self._listInDir(path)
filesreturn = []
if case_sensitivity.lower() == 'sensitive':
matcher = fnmatch.fnmatchcase
elif case_sensitivity.lower() == 'insensitive':
def matcher(fname, pattern):
return fnmatch.fnmatchcase(fname.lower(), pattern.lower())
else:
matcher = fnmatch.fnmatch
for direntry in dircontent:
fullpath = self.joinPaths(path, direntry)
if followSymlinks:
if self.isLink(fullpath):
fullpath=self.readlink(fullpath)
if self.isFile(fullpath) and "f" in type:
includeFile = False
if (filter is None) or matcher(direntry, filter):
if (minmtime is not None) or (maxmtime is not None):
mymtime = os.stat(fullpath)[ST_MTIME]
if (minmtime is None) or (mymtime > minmtime):
if (maxmtime is None) or (mymtime < maxmtime):
includeFile = True
else:
includeFile = True
if includeFile:
if exclude<>[]:
for excludeItem in exclude:
if matcher(direntry, excludeItem):
includeFile=False
if includeFile:
filesreturn.append(fullpath)
elif self.isDir(fullpath):
if "d" in type:
if not(listSymlinks==False and self.isLink(fullpath)):
filesreturn.append(fullpath)
if recursive:
if depth<>None and depth<>0:
depth=depth-1
if depth==None or depth<>0:
exclmatch=False
if exclude<>[]:
for excludeItem in exclude:
if matcher(fullpath, excludeItem):
exclmatch=True
if exclmatch==False:
if not(followSymlinks==False and self.isLink(fullpath)):
r,depth = self._listAllInDir(fullpath, recursive, filter, minmtime, maxmtime,depth=depth,type=type,exclude=exclude,followSymlinks=followSymlinks,listSymlinks=listSymlinks)
if len(r) > 0:
filesreturn.extend(r)
elif self.isLink(fullpath) and followSymlinks==False and listSymlinks:
filesreturn.append(fullpath)
return filesreturn,depth
def checkDirOrLink(self,fullpath):
"""
check if path is dir or link to a dir
"""
return self.checkDirOrLinkToDir(fullpath)
def checkDirOrLinkToDir(self,fullpath):
"""
check if path is dir or link to a dir
"""
if not self.isLink(fullpath) and os.path.isdir(fullpath):
return True
if self.isLink(fullpath):
link=self.readlink(fullpath)
if self.isDir(link):
return True
return False
def changeFileNames(self,toReplace,replaceWith,pathToSearchIn,recursive=True, filter=None, minmtime=None, maxmtime=None):
"""
@param toReplace e.g. {name}
@param replace with e.g. "jumpscale"
"""
paths=self.listFilesInDir(pathToSearchIn, recursive, filter, minmtime, maxmtime)
for path in paths:
path2=path.replace(toReplace,replaceWith)
if path2<>path:
self.renameFile(path,path2)
def replaceWordsInFiles(self,pathToSearchIn,templateengine,recursive=True, filter=None, minmtime=None, maxmtime=None):
"""
apply templateengine to list of found files
@param templateengine =te #example below
te=j.codetools.templateengine.new()
te.add("name",self.jpackages.name)
te.add("description",self.jpackages.description)
te.add("version",self.jpackages.version)
"""
paths=self.listFilesInDir(pathToSearchIn, recursive, filter, minmtime, maxmtime)
for path in paths:
templateengine.replaceInsideFile(path)
def listDirsInDir(self,path,recursive=False,dirNameOnly=False,findDirectorySymlinks=True):
""" Retrieves list of directories found in the specified directory
@param path: string represents directory path to search in
@rtype: list
"""
self.log('List directories in directory with path: %s, recursive = %s' % (path, str(recursive)),9)
#if recursive:
#if not j.system.fs.exists(path):
#raise ValueError('Specified path: %s does not exist' % path)
#if not j.system.fs.isDir(path):
#raise ValueError('Specified path: %s is not a directory' % path)
#result = []
#os.path.walk(path, lambda a, d, f: a.append('%s%s' % (d, os.path.sep)), result)
#return result
files=self._listInDir(path,followSymlinks=True)
filesreturn=[]
for file in files:
fullpath=os.path.join(path,file)
if (findDirectorySymlinks and self.checkDirOrLink(fullpath)) or self.isDir(fullpath):
if dirNameOnly:
filesreturn.append(file)
else:
filesreturn.append(fullpath)
if recursive:
filesreturn.extend(self.listDirsInDir(fullpath,recursive,dirNameOnly,findDirectorySymlinks))
return filesreturn
def listPyScriptsInDir(self, path,recursive=True, filter="*.py"):
""" Retrieves list of python scripts (with extension .py) in the specified directory
@param path: string represents the directory path to search in
@rtype: list
"""
result = []
for file in j.system.fs.listFilesInDir(path,recursive=recursive, filter=filter):
if file.endswith(".py"):
filename = file.split(os.sep)[-1]
scriptname = filename.rsplit(".", 1)[0]
result.append(scriptname)
return result
def move(self, source, destin):
"""Main Move function
@param source: string (If the specified source is a File....Calls moveFile function)
(If the specified source is a Directory....Calls moveDir function)
"""
if not j.system.fs.exists(source):
raise IOError('%s does not exist'%source)
shutil.move(source, destin)
def exists(self, path,followlinks=True):
"""Check if the specified path exists
@param path: string
@rtype: boolean (True if path refers to an existing path)
"""
if path is None:
raise TypeError('Path is not passed in system.fs.exists')
if os.path.exists(path) or os.path.islink(path):
if self.isLink(path) and followlinks:
#self.log('path %s exists' % str(path.encode("utf-8")),8)
relativelink = self.readlink(path)
newpath = self.joinPaths(self.getParent(path), relativelink)
return self.exists(newpath)
else:
return True
#self.log('path %s does not exist' % str(path.encode("utf-8")),8)
return False
def symlink(self, path, target, overwriteTarget=False):
"""Create a symbolic link
@param path: source path desired to create a symbolic link for
@param target: destination path required to create the symbolic link at
@param overwriteTarget: boolean indicating whether target can be overwritten
"""
self.log('Getting symlink for path: %s to target %s'% (path, target),7)
if ( path is None):
raise TypeError('Path is None in system.fs.symlink')
if target[-1]=="/":
target=target[:-1]
if overwriteTarget and (self.exists(target) or self.isLink(target)):
if self.isLink(target):
self.unlink(target)
elif self.isDir(target):
self.removeDirTree(target)
else:
self.remove(target)
dir = j.system.fs.getDirName(target)
if not j.system.fs.exists(dir):
j.system.fs.createDir(dir)
if j.system.platformtype.isUnix():
self.log( "Creating link from %s to %s" %( path, target) )
os.symlink(path, target)
elif j.system.platformtype.isWindows():
path=path.replace("+",":")
cmd="junction \"%s\" \"%s\"" % (self.pathNormalize(target).replace("\\","/"),self.pathNormalize(path).replace("\\","/"))
print cmd
j.system.process.execute(cmd)
def hardlinkFile(self, source, destin):
"""Create a hard link pointing to source named destin. Availability: Unix.
@param source: string
@param destin: string
@rtype: concatenation of dirname, and optionally linkname, etc.
with exactly one directory separator (os.sep) inserted between components, unless path2 is empty
"""
self.log('Create a hard link pointing to %s named %s'% (source, destin),7)
if (source is None):
raise TypeError('Source path is not passed in system.fs.hardlinkFile')
try:
if j.system.platformtype.isUnix():
return os.link(source, destin)
else:
raise RuntimeError('Cannot create a hard link on windows')
except:
raise RuntimeError('Failed to hardLinkFile from %s to %s'% (source, destin))
def checkDirParam(self,path):
if(path.strip()==""):
raise TypeError("path parameter cannot be empty.")
path=path.replace("//","/")
path=path.replace("\\\\","/")
path=path.replace("\\","/")
if path[-1]<>"/":
path=path+"/"
path=path.replace("/",os.sep)
return path
def isDir(self, path, followSoftlink=True):
"""Check if the specified Directory path exists
@param path: string
@param followSoftlink: boolean
@rtype: boolean (True if directory exists)
"""
if ( path is None):
raise TypeError('Directory path is None in system.fs.isDir')
if not followSoftlink and self.isLink( path ) :
return False
return self.checkDirOrLinkToDir(path)
def isEmptyDir(self, path):
"""Check if the specified directory path is empty
@param path: string
@rtype: boolean (True if directory is empty)
"""
if ( path is None):
raise TypeError('Directory path is None in system.fs.isEmptyDir')
try:
if(self._listInDir(path) == []):
self.log('path %s is an empty directory'%path,9)
return True
self.log('path %s is not an empty directory'%path,9)
return False
except:
raise RuntimeError('Failed to check if the specified path: %s is an empty directory...in system.fs.isEmptyDir'% path)
def isFile(self, path, followSoftlink = True):
"""Check if the specified file exists for the given path
@param path: string
@param followSoftlink: boolean
@rtype: boolean (True if file exists for the given path)
"""
self.log("isfile:%s" % path,8)
if ( path is None):
raise TypeError('File path is None in system.fs.isFile')
try:
if not followSoftlink and self.isLink( path ) :
self.log('path %s is a file'%path,8)
return True
if(os.path.isfile(path)):
self.log('path %s is a file'%path,8)
return True
self.log('path %s is not a file'%path,8)
return False
except:
raise RuntimeError('Failed to check if the specified path: %s is a file...in system.fs.isFile'% path)
def isExecutable(self, path):
statobj=self.statPath(path)
return not (stat.S_IXUSR & statobj.st_mode==0)
def isLink(self, path,checkJunction=False):
"""Check if the specified path is a link
@param path: string
@rtype: boolean (True if the specified path is a link)
"""
if path[-1]==os.sep:
path=path[:-1]
if ( path is None):
raise TypeError('Link path is None in system.fs.isLink')
if checkJunction and j.system.platformtype.isWindows():
cmd="junction %s" % path
try:
result=j.system.process.execute(cmd)
except Exception,e:
raise RuntimeError("Could not execute junction cmd, is junction installed? Cmd was %s."%cmd)
if result[0]<>0:
raise RuntimeError("Could not execute junction cmd, is junction installed? Cmd was %s."%cmd)
if result[1].lower().find("substitute name")<>-1:
return True
else:
return False
if(os.path.islink(path)):
self.log('path %s is a link'%path,8)
return True
self.log('path %s is not a link'%path,8)
return False
def isMount(self, path):
"""Return true if pathname path is a mount point:
A point in a file system where a different file system has been mounted.
"""
self.log('Check if path %s is a mount point'%path,8)
if path is None:
raise TypeError('Path is passed null in system.fs.isMount')
return os.path.ismount(path)
def statPath(self, path):
"""Perform a stat() system call on the given path
@rtype: object whose attributes correspond to the members of the stat structure
"""
if path is None:
raise TypeError('Path is None in system.fs.statPath')
try:
return os.stat(path)
except:
raise OSError('Failed to perform stat system call on the specific path: %s in system.fs.statPath' % (path))
def renameDir(self, dirname, newname,overwrite=False):
"""Rename Directory from dirname to newname
@param dirname: string (Directory original name)
@param newname: string (Directory new name to be changed to)
"""
self.log('Renaming directory %s to %s'% (dirname, newname),7)
if dirname == newname:
return
if ((dirname is None) or (newname is None)):
raise TypeError('Not enough parameters passed to system.fs.renameDir...[%s, %s]'%(dirname, newname))
if(self.isDir(dirname)):
if overwrite and self.exists(dirname):
self.removeDirTree(dirname)
os.rename(dirname, newname)
else:
raise ValueError('Path: %s is not a directory in system.fs.renameDir'%dirname)
def unlinkFile(self, filename):
"""Remove the file path (only for files, not for symlinks)
@param filename: File path to be removed
"""
self.log('Unlink file with path: %s'%filename, 6)
if (filename is None):
raise TypeError('File name is None in QSstem.unlinkFile')
if not self.isFile(filename):
raise RuntimeError("filename is not a file so cannot unlink")
try:
os.unlink(filename)
except:
raise OSError('Failed to unlink the specified file path: %s in system.fs.unlinkFile'% filename)
def unlink(self, filename):
'''Remove the given file if it's a file or a symlink
@param filename: File path to be removed
@type filename: string
'''
self.log('Unlink path: %s' % filename, 6)
if not filename:
raise TypeError('File name is None in system.fs.unlink')
try:
os.unlink(filename)
except:
raise OSError('Failed to unlink the specified file path: [%s] in system.ds.unlink' % filename)
def fileGetContents(self, filename):
"""Read a file and get contents of that file
@param filename: string (filename to open for reading )
@rtype: string representing the file contents
"""
if filename is None:
raise TypeError('File name is None in system.fs.fileGetContents')
self.log('Opened file %s for reading'% filename,6)
# self.log('Reading file %s'% filename,9)
with open(filename) as fp:
data = fp.read()
self.log('File %s is closed after reading'%filename,9)
return data
def fileGetUncommentedContents(self, filename):
"""Read a file and get uncommented contents of that file
@param filename: string (filename to open for reading )
@rtype: list of lines of uncommented file contents
"""
if filename is None:
raise TypeError('File name is None in system.fs.fileGetContents')
self.log('Opened file %s for reading'% filename,6)
# self.log('Reading file %s'% filename,9)
with open(filename) as fp:
data = fp.readlines()
uncommented = list()
for line in data:
if not line.startswith('#') and not line.startswith('\n'):
line = line.replace('\n', '')
uncommented.append(line)
self.log('File %s is closed after reading'%filename,9)
return uncommented
def fileGetTextContents(self, filename):
"""Read a UTF-8 file and get contents of that file. Takes care of the [BOM](http://en.wikipedia.org/wiki/Byte_order_mark)
@param filename: string (filename to open for reading)
@rtype: string representing the file contents
"""
if filename is None:
raise TypeError('File name is None in system.fs.fileGetTextContents')
with open(filename) as f:
s = f.read()
for bom in [codecs.BOM_UTF8]: # we can add more BOMs later:
if s.startswith(bom):
s = s.replace(bom, '', 1)
break
return s
def touch(self,paths,overwrite=True):
"""
can be single path or multiple (then list)
"""
if j.basetype.list.check(paths):
for item in paths:
self.touch(item,overwrite=overwrite)
path=paths
self.createDir(j.system.fs.getDirName(path))
if overwrite:
self.remove(path)
if not self.exists(path=path):
self.writeFile(path,"")
def writeFile(self,filename, contents, append=False):
"""
Open a file and write file contents, close file afterwards
@param contents: string (file contents to be written)
"""
if (filename is None) or (contents is None):
raise TypeError('Passed None parameters in system.fs.writeFile')
self.log('Opened file %s for writing'% filename,6)
if append==False:
fp = open(filename,"wb")
else:
fp = open(filename,"ab")
self.log('Writing contents in file %s'%filename,9)
try:
#if filename.find("avahi")<>-1:
# ipshell()
fp.write(contents) #@todo P1 will this also raise an error and not be catched by the finally
finally:
fp.close()
def fileSize(self, filename):
"""Get Filesize of file in bytes
@param filename: the file u want to know the filesize of
@return: int representing file size
"""
self.log('Getting filesize of file: %s'%filename,8)
if not self.exists(filename):
raise RuntimeError("Specified file: %s does not exist"% filename)
try:
return os.path.getsize(filename)
except Exception, e:
raise OSError("Could not get filesize of %s\nError: %s"%(filename,str(e)))
def writeObjectToFile(self,filelocation,obj):
"""
Write a object to a file(pickle format)
@param filelocation: location of the file to which we write
@param obj: object to pickle and write to a file
"""
if not filelocation or not obj:
raise ValueError("You should provide a filelocation or a object as parameters")
self.log("Creating pickle and write it to file: %s" % filelocation,6)
try:
pcl = pickle.dumps(obj)
except Exception, e:
raise Exception("Could not create pickle from the object \nError: %s" %(str(e)))
j.system.fs.writeFile(filelocation,pcl)
if not self.exists(filelocation):
raise Exception("File isn't written to the filesystem")
def readObjectFromFile(self,filelocation):
"""
Read a object from a file(file contents in pickle format)
@param filelocation: location of the file
@return: object
"""
if not filelocation:
raise ValueError("You should provide a filelocation as a parameter")
self.log("Opening file %s for reading" % filelocation,6)
contents = j.system.fs.fileGetContents(filelocation)
self.log("creating object",9)
try:
obj = pickle.loads(contents)
except Exception, e:
raise Exception("Could not create the object from the file contents \n Error: %s" %(str(e)))
return obj
def md5sum(self, filename):
"""Return the hex digest of a file without loading it all into memory
@param filename: string (filename to get the hex digest of it)
@rtype: md5 of the file
"""
self.log('Get the hex digest of file %s without loading it all into memory'%filename,8)
if filename is None:
raise('File name is None in system.fs.md5sum')
try:
try:
fh = open(filename)
digest = hashlib.md5()
while 1:
buf = fh.read(4096)
if buf == "":
break
digest.update(buf)
finally:
fh.close()
return digest.hexdigest()
except Exception, e:
raise RuntimeError("Failed to get the hex digest of the file %sin system.fs.md5sum. Error: %s" % (filename,str(e)))
def walkExtended(self, root, recurse=0, dirPattern='*' , filePattern='*', followSoftLinks = True, dirs=True, files=True ):
"""
Extended Walk version: seperate dir and file pattern
@param root : start directory to start the search.
@type root : string
@param recurse : search also in subdirectories.
@type recurse : number
@param dirPattern : search pattern to match directory names. Wildcards can be included.
@type dirPattern : string
@param filePattern : search pattern to match file names. Wildcards can be included.
@type filePattern : string
@param followSoftLinks : determine if links must be followed.
@type followSoftLinks : boolean
@param dirs : determine to return dir results.
@type dirs : boolean
@param files : determine to return file results.
@type files : boolean
@return : List of files and / or directories that match the search patterns.
@rtype : list of strings
General guidelines in the usage of the method be means of some examples come next. For the example in /tmp there is
* a file test.rtt
* and ./folder1/subfolder/subsubfolder/small_test/test.rtt
To find the first test you can use
j.system.fs.walkExtended('/tmp/', dirPattern="*tmp*", filePattern="*.rtt")
To find only the second one you could use
j.system.fs.walkExtended('tmp', recurse=0, dirPattern="*small_test*", filePattern="*.rtt", dirs=False)
"""
self.log('Scanning directory (walk) %s'%root,6)
result = []
try:
names = os.listdir(root)
except os.error:
return result #@todo P2 is this correct?
dirPattern = dirPattern or '*'
dirPatList = dirPattern.split(';')
filePattern = filePattern or '*'
filePatList = filePattern.split(';')
for name in names:
fullname = os.path.normpath(os.path.join(root, name))
if self.isFile(fullname, followSoftLinks):
fileOK = False
dirOK = False
for fPat in filePatList:
if (fnmatch.fnmatch(name,fPat)):
fileOK = True
for dPat in dirPatList:
if (fnmatch.fnmatch(os.path.dirname(fullname),dPat)):
dirOK = True
if fileOK and dirOK and files:
result.append(fullname)
if self.isDir(fullname, followSoftLinks):
for dPat in dirPatList:
if (fnmatch.fnmatch(name,dPat) and dirs):
result.append(fullname)
if recurse:
result = result + self.walkExtended(root = fullname,
recurse = recurse,
dirPattern = dirPattern,
filePattern = filePattern,
followSoftLinks = followSoftLinks,
dirs = dirs,
files = files )
return result
#WalkExtended = deprecated('j.system.fs.WalkExtended','j.system.fs.walkExtended', '3.2')(walkExtended)
def walk(self, root, recurse=0, pattern='*', return_folders=0, return_files=1, followSoftlinks = True,unicode=False ):
"""This is to provide ScanDir similar function
It is going to be used wherever some one wants to list all files and subfolders
under one given directly with specific or none matchers
"""
if unicode:
os.path.supports_unicode_filenames=True
self.log('Scanning directory (walk)%s'%root,6)
# initialize
result = []
# must have at least root folder
try:
names = os.listdir(root)
except os.error:
return result
# expand pattern
pattern = pattern or '*'
pat_list = pattern.split(';')
# check each file
for name in names:
fullname = os.path.normpath(os.path.join(root, name))
# grab if it matches our pattern and entry type
for pat in pat_list:
if (fnmatch.fnmatch(name, pat)):
if ( self.isFile(fullname, followSoftlinks) and return_files ) or (return_folders and self.isDir(fullname, followSoftlinks)):
result.append(fullname)
continue
# recursively scan other folders, appending results
if recurse:
if self.isDir(fullname) and not self.isLink(fullname):
result = result + self.walk( fullname, recurse, pattern, return_folders, return_files, followSoftlinks )
return result
#Walk = deprecated('j.system.fs.Walk', 'j.system.fs.walk', '3.2')(walk)
def convertFileDirnamesUnicodeToAscii(self,rootdir,spacesToUnderscore=False):
os.path.supports_unicode_filenames=True
def visit(arg,dirname,names):
dirname2=j.system.string.decodeUnicode2Asci(dirname)
for name in names:
name2=j.system.string.decodeUnicode2Asci(name)
if name2<>name:
##print "name not unicode"
source=os.path.join(dirname,name)
if spacesToUnderscore:
dirname=dirname.replace(" ","_")
name2=name2.replace(" ","_")
if os.path.isdir(source):
j.system.fs.renameDir(source,j.system.fs.joinPaths(dirname,name2))
if os.path.isfile(source):
# #print "renamefile"
j.system.fs.renameFile(source,j.system.fs.joinPaths(dirname,name2))
if dirname2<>dirname:
#dirname not unicode
##print "dirname not unicode"
if spacesToUnderscore:
dirname2=dirname2.replace(" ","_")
if j.system.fs.isDir(dirname):
j.system.fs.renameDir(dirname,dirname2)
arg={}
os.path.walk(rootdir, visit,arg)
def convertFileDirnamesSpaceToUnderscore(self,rootdir):
def visit(arg,dirname,names):
if dirname.find(" ")<>-1:
#dirname has space inside
dirname2=dirname.replace(" ","_")
if j.system.fs.isDir(dirname):
j.system.fs.renameDir(dirname,dirname2)
arg={}
os.path.walk(rootdir, visit,arg)
def getTmpDirPath(self):
"""
create a tmp dir name and makes sure the dir exists
"""
tmpdir=j.system.fs.joinPaths(j.dirs.tmpDir,j.base.idgenerator.generateRandomInt(1,100000000))
j.system.fs.createDir(tmpdir)
return tmpdir
def getTmpFilePath(self,cygwin=False):
"""Generate a temp file path
Located in temp dir of qbase
@rtype: string representing the path of the temp file generated
"""
#return tempfile.mktemp())
tmpdir=j.dirs.tmpDir
fd, path = tempfile.mkstemp(dir=tmpdir)
try:
real_fd = os.fdopen(fd)
real_fd.close()
except (IOError, OSError):
pass
if cygwin:
path=path.replace("\\","/")
path=path.replace("//","/")
return path
def getTempFileName(self, dir=None, prefix=''):
"""Generates a temp file for the directory specified
@param dir: Directory to generate the temp file
@param prefix: string to start the generated name with
@rtype: string representing the generated temp file path
"""
if dir==None:
return j.system.fs.joinPaths(j.dirs.tmpDir,prefix+str(j.base.idgenerator.generateRandomInt(0,1000000000000))+".tmp")
else:
dir = dir or j.dirs.tmpDir
return tempfile.mktemp('', prefix, dir)
def isAsciiFile(self, filename, checksize=4096):
"""Read the first <checksize> bytes of <filename>.
Validate that only valid ascii characters (32-126), \r, \t, \n are
present in the file"""
BLOCKSIZE = 4096
dataread = 0
if checksize == 0:
checksize = BLOCKSIZE
fp = open(filename,"r")
isAscii = True
while dataread < checksize:
data = fp.read(BLOCKSIZE)
if not data:
break
dataread += len(data)
for x in data:
if not ((ord(x)>=32 and ord(x)<=126) or x=='\r' or x=='\n' or x=='\t'):
isAscii = False
break
if not isAscii:
break
fp.close()
return isAscii
def isBinaryFile(self, filename, checksize=4096):
return not self.isAsciiFile(filename, checksize)
lock = staticmethod(lock)
lock_ = staticmethod(lock_)
islocked = staticmethod(islocked)
unlock = staticmethod(unlock)
unlock_ = staticmethod(unlock_)
def validateFilename(self, filename, platform=None):
'''Validate a filename for a given (or current) platform
Check whether a given filename is valid on a given platform, or the
current platform if no platform is specified.
Rules
=====
Generic
-------
Zero-length filenames are not allowed
Unix
----
Filenames can contain any character except 0x00. We also disallow a
forward slash ('/') in filenames.
Filenames can be up to 255 characters long.
Windows
-------
Filenames should not contain any character in the 0x00-0x1F range, '<',
'>', ':', '"', '/', '\', '|', '?' or '*'. Names should not end with a
dot ('.') or a space (' ').
Several basenames are not allowed, including CON, PRN, AUX, CLOCK$,
NUL, COM[1-9] and LPT[1-9].
Filenames can be up to 255 characters long.
Information sources
===================
Restrictions are based on information found at these URLs:
* http://en.wikipedia.org/wiki/Filename
* http://msdn.microsoft.com/en-us/library/aa365247.aspx
* http://www.boost.org/doc/libs/1_35_0/libs/filesystem/doc/portability_guide.htm
* http://blogs.msdn.com/brian_dewey/archive/2004/01/19/60263.aspx
@param filename: Filename to check
@type filename: string
@param platform: Platform to validate against
@type platform: L{PlatformType}
@returns: Whether the filename is valid on the given platform
@rtype: bool
'''
from JumpScale.core.enumerators import PlatformType
platform = platform or PlatformType.findPlatformType()
if not filename:
return False
#When adding more restrictions to check_unix or check_windows, please
#update the validateFilename documentation accordingly
def check_unix(filename):
if len(filename) > 255:
return False
if '\0' in filename or '/' in filename:
return False
return True
def check_windows(filename):
if len(filename) > 255:
return False
if os.path.splitext(filename)[0] in ('CON', 'PRN', 'AUX', 'CLOCK$', 'NUL'):
return False
if os.path.splitext(filename)[0] in ('COM%d' % i for i in xrange(1, 9)):
return False
if os.path.splitext(filename)[0] in ('LPT%d' % i for i in xrange(1, 9)):
return False
#ASCII characters 0x00 - 0x1F are invalid in a Windows filename
#We loop from 0x00 to 0x20 (xrange is [a, b[), and check whether
#the corresponding ASCII character (which we get through the chr(i)
#function) is in the filename
for c in xrange(0x00, 0x20):
if chr(c) in filename:
return False
for c in ('<', '>', ':', '"', '/', '\\', '|', '?', '*'):
if c in filename:
return False
if filename.endswith((' ', '.', )):
return False
return True
if platform.isWindows():
return check_windows(filename)
if platform.isUnix():
return check_unix(filename)
raise NotImplementedError('Filename validation on given platform not supported')
def fileConvertLineEndingCRLF(self,file):
'''Convert CRLF line-endings in a file to LF-only endings (\r\n -> \n)
@param file: File to convert
@type file: string
'''
self.log("fileConvertLineEndingCRLF "+file, 8)
content=j.system.fs.fileGetContents(file)
lines=content.split("\n")
out=""
for line in lines:
line=line.replace("\n","")
line=line.replace("\r","")
out=out+line+"\n"
self.writeFile(file,out)
def find(self, startDir,fileregex):
"""Search for files or folders matching a given pattern
this is a very weard function, don't use is better to use the list functions
make sure you do changedir to the starting dir first
example: find("*.pyc")
@param fileregex: The regex pattern to match
@type fileregex: string
"""
j.system.fs.changeDir(startDir)
import glob
return glob.glob(fileregex)
def grep(self, fileregex, lineregex):
"""Search for lines matching a given regex in all files matching a regex
@param fileregex: Files to search in
@type fileregex: string
@param lineregex: Regex pattern to search for in each file
@type lineregex: string
"""
import glob, re, os
for filename in glob.glob(fileregex):
if os.path.isfile(filename):
f = open(filename, 'r')
for line in f.xreadlines():
if re.match(lineregex, line):
print "%s: %s" % (filename, line)
cleanupString = staticmethod(cleanupString)
def constructDirPathFromArray(self,array):
path=""
for item in array:
path=path+os.sep+item
path=path+os.sep
if j.system.platformtype.isUnix():
path=path.replace("//","/")
path=path.replace("//","/")
return path
def constructFilePathFromArray(self,array):
path=self.constructDirPathFromArray(array)
if path[-1]=="/":
path=path[0:-1]
return path
def pathToUnicode(self, path):
"""
Convert path to unicode. Use the local filesystem encoding. Will return
path unmodified if path already is unicode.
Use this to convert paths you received from the os module to unicode.
@param path: path to convert to unicode
@type path: basestring
@return: unicode path
@rtype: unicode
"""
from jumpscale import Dirs
return Dirs.pathToUnicode(path)
def targzCompress(self, sourcepath, destinationpath,followlinks=False,destInTar="",pathRegexIncludes=['.[a-zA-Z0-9]*'], \
pathRegexExcludes=[], contentRegexIncludes=[], contentRegexExcludes=[], depths=[],\
extrafiles=[]):
"""
@param sourcepath: Source directory .
@param destination: Destination filename.
@param followlinks: do not tar the links, follow the link and add that file or content of directory to the tar
@param pathRegexIncludes: / Excludes match paths to array of regex expressions (array(strings))
@param contentRegexIncludes: / Excludes match content of files to array of regex expressions (array(strings))
@param depths: array of depth values e.g. only return depth 0 & 1 (would mean first dir depth and then 1 more deep) (array(int))
@param destInTar when not specified the dirs, files under sourcedirpath will be added to root of
tar.gz with this param can put something in front e.g. /qbase3/ prefix to dest in tgz
@param extrafiles is array of array [[source,destpath],[source,destpath],...] adds extra files to tar
(TAR-GZ-Archive *.tar.gz)
"""
import os.path
import tarfile
if not j.system.fs.isDir(sourcepath):
raise RuntimeError("Cannot find file (exists but is not a file or dir) %s" % sourcepath)
self.log("Compressing directory %s to %s"%(sourcepath, destinationpath))
if not j.system.fs.exists(j.system.fs.getDirName(destinationpath)):
j.system.fs.createDir(j.system.fs.getDirName(destinationpath))
t = tarfile.open(name = destinationpath, mode = 'w:gz')
if not(followlinks<>False or destInTar<>"" or pathRegexIncludes<>['.*'] or pathRegexExcludes<>[] \
or contentRegexIncludes<>[] or contentRegexExcludes<>[] or depths<>[]):
t.add(sourcepath, "/")
else:
def addToTar(params,path):
tarfile=params["t"]
destInTar=params["destintar"]
destpath=j.system.fs.joinPaths(destInTar,j.system.fs.pathRemoveDirPart(path, sourcepath))
if j.system.fs.isLink(path) and followlinks:
path=j.system.fs.readlink(path)
self.log("fs.tar: add file %s to tar" % path,7)
# print "fstar: add file %s to tar" % path
if not (j.system.platformtype.isWindows() and j.system.windows.checkFileToIgnore(path)):
if self.isFile(path) or self.isLink(path):
tarfile.add(path,destpath)
else:
raise RuntimeError("Cannot add file %s to destpath"%destpath)
params={}
params["t"]=t
params["destintar"]=destInTar
j.system.fswalker.walk(root=sourcepath, callback=addToTar, arg=params,\
recursive=True, includeFolders=False, \
pathRegexIncludes=pathRegexIncludes, pathRegexExcludes=pathRegexExcludes, contentRegexIncludes=contentRegexIncludes, \
contentRegexExcludes=contentRegexExcludes, depths=depths,followlinks=False)
if extrafiles<>[]:
for extrafile in extrafiles:
source=extrafile[0]
destpath=extrafile[1]
t.add(source,j.system.fs.joinPaths(destInTar,destpath))
t.close()
def gzip(self,sourceFile,destFile):
import gzip
f_in = open(sourceFile, 'rb')
f_out = gzip.open(destFile, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
def gunzip(self,sourceFile,destFile):
import gzip
self.createDir(self.getDirName(destFile))
f_in = gzip.open(sourceFile, 'rb')
f_out = open(destFile, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
def targzUncompress(self,sourceFile,destinationdir,removeDestinationdir=True):
"""
compress dirname recursive
@param sourceFile: file to uncompress
@param destinationpath: path of to destiniation dir, sourcefile will end up uncompressed in destination dir
"""
if removeDestinationdir:
j.system.fs.removeDirTree(destinationdir)
if not j.system.fs.exists(destinationdir):
j.system.fs.createDir(destinationdir)
import tarfile
if not j.system.fs.exists(destinationdir):
j.system.fs.createDir(destinationdir)
# The tar of python does not create empty directories.. this causes manny problem while installing so we choose to use the linux tar here
if j.system.platformtype.isWindows():
tar = tarfile.open(sourceFile)
tar.extractall(destinationdir)
tar.close()
#todo find better alternative for windows
else:
cmd = "tar xzf '%s' -C '%s'" % (sourceFile, destinationdir)
j.system.process.execute(cmd)
|
Jumpscale/jumpscale6_core
|
lib/JumpScale/core/system/fs.py
|
Python
|
bsd-2-clause
| 79,683
|
[
"VisIt"
] |
8671614de14574cee7ea25bf26e39a981347e9318b9f9dfa8018e05fc32fd430
|
"""This package contains the spectral shape item."""
import numpy as np
from glotaran.model import model_attribute, model_attribute_typed
from glotaran.parameter import Parameter
@model_attribute(properties={
'amplitude': Parameter,
'location': Parameter,
'width': Parameter,
}, has_type=True)
class SpectralShapeGaussian:
"""A gaussian spectral shape"""
def calculate(self, axis: np.ndarray) -> np.ndarray:
"""calculate calculates the shape.
Parameters
----------
axis: np.ndarray
The axies to calculate the shape on.
Returns
-------
shape: numpy.ndarray
"""
matrix = self.amplitude * np.exp(
-np.log(2) * np.square(2 * (axis - self.location)/self.width))
return matrix
@model_attribute(properties={
}, has_type=True)
class SpectralShapeOne:
"""A gaussian spectral shape"""
def calculate(self, axis: np.ndarray) -> np.ndarray:
"""calculate calculates the shape.
Parameters
----------
axis: np.ndarray
The axies to calculate the shape on.
Returns
-------
shape: numpy.ndarray
"""
return np.ones((axis.shape[0]))
@model_attribute(properties={
}, has_type=True)
class SpectralShapeZero:
"""A gaussian spectral shape"""
def calculate(self, axis: np.ndarray) -> np.ndarray:
"""calculate calculates the shape.
Only works after calling calling 'fill'.
Parameters
----------
axis: np.ndarray
The axies to calculate the shape on.
Returns
-------
shape: numpy.ndarray
"""
return np.zeros((axis.shape[0]))
@model_attribute_typed(types={
'gaussian': SpectralShapeGaussian,
'one': SpectralShapeOne,
'zero': SpectralShapeZero,
})
class SpectralShape:
"""Base class for spectral shapes"""
|
glotaran/glotaran
|
glotaran/builtin/models/kinetic_spectrum/spectral_shape.py
|
Python
|
gpl-3.0
| 1,929
|
[
"Gaussian"
] |
d99fa270a4730356e7f69e21719762db635e86494441fcd9061324c127b9f568
|
#!/usr/bin/env python
import numpy as np
from math import floor
from numpy import linalg as la
from numpy import matlib as matlib
import matplotlib.pyplot as plt
import argparse
import os
import pdb
from scipy import spatial
import time
import operator
import random
import warnings
FISHER = 0
RANDOM_CLASSIFIER = 0
def binaryClassify_fisher(train_features, train_truth, test_features, test_truth, classa, classb, max_iterations):
print 5000-max_iterations
if max_iterations <= 0:
print "Something went wrong, the maximum recursion depth was exceeded"
quit()
max_iterations -= 1
#perform a classification on the data
if FISHER or RANDOM_CLASSIFIER:
train_classification_result, test_classification_result = FisherClassifier(train_features, train_truth, test_features, classa, classb)
else: #linear regression
train_classification_result, test_classification_result = LinearRegression(train_features, train_truth, test_features, classa, classb)
#check to see if either classification is "pure"
#select the items which are really in class 0
class_a_samples = train_classification_result[train_truth == classa]
class_b_samples = train_classification_result[train_truth == classb]
print "class_a_train_rate %f"%(float(np.sum(class_a_samples))/class_a_samples.shape[0])
print "class_b_train_rate %f"%(1.0 - float(np.sum(class_b_samples))/class_b_samples.shape[0])
class_a_test_samples = test_classification_result[test_truth == classa]
class_b_test_samples = test_classification_result[test_truth == classb]
print "class_a_test_rate %f"%(float(np.sum(class_a_test_samples))/class_a_test_samples.shape[0])
print "class_b_test_rate %f"%(1.0 - float(np.sum(class_b_test_samples))/class_b_test_samples.shape[0])
#sum errors from every recursion
num_a_errors = 0
num_b_errors = 0
if not np.all(class_a_samples == classa) and not np.all(class_a_samples == classb) and not np.all(test_truth[test_classification_result == classa]) and np.any(test_truth[test_classification_result == classa]) and not np.all(train_truth[train_classification_result == classa]) and np.any(train_truth[train_classification_result == classa]):
#recurse on this branch
new_train_features = train_features[train_classification_result==classa]
new_train_truth = train_truth[train_classification_result == classa]
new_test_features = test_features[test_classification_result==classa]
new_test_truth = test_truth[test_classification_result == classa]
print "left test: %4d, left train: %4d"%(new_test_truth.shape[0],new_train_truth.shape[0])
tempa, tempb = binaryClassify_fisher(new_train_features, new_train_truth, new_test_features, new_test_truth, classa, classb, max_iterations)
num_a_errors += tempa
num_b_errors += tempb
else:
#the sample was "pure" so result results on it
#compute the number of errors in this dataset
#check what the previous output was (we only have to check the first element since they are all the same
num_a_errors += test_truth[test_truth==classa].shape[0] - np.sum(test_classification_result[test_truth == classa] == classa)
if not np.all(class_b_samples == classb) and not np.all(class_b_samples == classa) and not np.all(test_truth[test_classification_result == classb]) and np.any(test_truth[test_classification_result == classb]) and not np.all(train_truth[train_classification_result == classb]) and np.any(train_truth[train_classification_result == classb]):
#recurse on this branch
new_train_features = train_features[train_classification_result==classb]
new_train_truth = train_truth[train_classification_result == classb]
new_test_features = test_features[test_classification_result==classb]
new_test_truth = test_truth[test_classification_result == classb]
print "right test: %4d, right train: %4d"%(new_test_truth.shape[0],new_train_truth.shape[0])
tempa,tempb = binaryClassify_fisher(new_train_features, new_train_truth, new_test_features, new_test_truth, classa, classb, max_iterations)
num_a_errors += tempa
num_b_errors += tempb
else:
#the sample was "pure" so result results on it
#compute the number of errors in this dataset
#check what the previous output was (we only have to check the first element since they are all the same
num_b_errors += test_truth[test_truth == classb].shape[0] - np.sum(test_classification_result[test_truth == classb] == classb)
return num_a_errors, num_b_errors
def LinearRegression(train_features, train_truth, test_features, classa, classb):
train_truth_internal = np.matrix(train_truth.copy()).T
#make classification 0-centered
train_truth_internal[train_truth_internal == 0] = -1
filter = la.inv(train_features.T * train_features + np.eye(train_features.shape[1])*1e-10) * train_features.T * train_truth_internal
test_classification = (test_features * filter)
train_classification = (train_features * filter)
test_rtn = test_classification.copy()
train_rtn = train_classification.copy()
test_rtn[test_classification <= 0] = classa
train_rtn[train_classification <= 0] = classa
test_rtn[test_classification >0] = classb
train_rtn[train_classification >0] = classb
return np.array(train_rtn)[:,0],np.array(test_rtn)[:,0]
def FisherClassifier(train_features, train_truth, test_features, classa, classb):
with warnings.catch_warnings():
warnings.filterwarnings('error')
'''
:param features:
:param classification:
:param test_data:
:return:
'''
# separate classes
class_a_features = train_features[train_truth == classa]
class_b_features = train_features[train_truth == classb]
try:
class_a_mean = np.mean(class_a_features, 0).T
class_a_cov = np.cov(class_a_features.T)
class_b_mean = np.mean(class_b_features, 0).T
class_b_cov = np.cov(class_b_features.T)
except Warning:
#there was no covariance computed, so just assign everything to one class
if class_b_features.shape[0] < 2:
#send eveything to class a
return [np.ones(train_features.shape[0])*classa, np.ones(test_features.shape[0])*classa]
else:
return [np.ones(train_features.shape[0])*classb, np.ones(test_features.shape[0])*classb]
# compute the Fisher criteria projection to one dimension
element_classified_a = False
element_classified_b = False
while not element_classified_a or not element_classified_b:
if FISHER:
try:
projection = la.inv(class_a_cov + class_b_cov + np.eye(class_a_cov.shape[0])*10e-15) * (class_a_mean - class_b_mean)
except:
pdb.set_trace()
else: #if RANDOM_CLASSIFIER
projection = np.matrix(np.zeros(class_a_cov.shape[0]))
for idx in range(projection.shape[0]):
projection[idx] = random.random()
projection = projection.T
projection = projection / la.norm(projection)
element_classified_a = False
element_classified_b = False
# project all of the data
class_a_projection = class_a_features * projection
class_b_projection = class_b_features * projection
class_a_gauss_build = GaussianBuild(class_a_projection)
class_b_gauss_build = GaussianBuild(class_b_projection)
#classify the test data
test_classification_result = []
for sample in test_features:
try:
sample_projection = sample * projection
except ValueError:
pdb.set_trace()
class_a_prob = ComputeGaussianProbability(class_a_gauss_build[0], class_a_gauss_build[1], sample_projection)
class_b_prob = ComputeGaussianProbability(class_b_gauss_build[0], class_b_gauss_build[1], sample_projection)
if class_a_prob > class_b_prob:
test_classification_result.append(classa)
else:
test_classification_result.append(classb)
#classify the train data
train_classification_result = []
for sample in train_features:
try:
sample_projection = sample * projection
except ValueError:
pdb.set_trace()
class_a_prob = ComputeGaussianProbability(class_a_gauss_build[0], class_a_gauss_build[1], sample_projection)
class_b_prob = ComputeGaussianProbability(class_b_gauss_build[0], class_b_gauss_build[1], sample_projection)
if class_a_prob > class_b_prob:
train_classification_result.append(classa)
element_classified_a = True
else:
train_classification_result.append(classb)
element_classified_b = True
if FISHER:
break
return [np.array(train_classification_result).T,np.array(test_classification_result).T]
def GaussianBuild(features):
"""
computes the mean and covariance for a dataset
:param features: s x f np.matrix (s samples by f features)
:param classification: s x 1 np.ndarray
:param class_id: scalar value to find
:return: [covariance(f x f),mean (f x 1)]
"""
#print 'Of ', features.shape, 'Elements, ', features.shape
cov_mat = np.cov(features.T)
mean_mat = np.mean(features.T)
return [cov_mat, mean_mat]
def ComputeGaussianProbability(cov_mat, mean_mat, sample):
"""
computes the probability of a particular sample belonging to a particular gaussian distribution
:param cov_mat: f x f np.matrix (f features)
:param mean_mat: f x 1 np.matrix
:param sample: f x 1 np.matrix
:return:
"""
mean_mat = np.matrix(mean_mat).T
sample = sample.T
# sample = meanMat
non_invertible = True
eye_scale = 0.0
try:
cov_mat_inverse = 1.0 / cov_mat
except Warning:
cov_mat_inverse = 1
cov_mat = 1
probability = 1.0 / (np.sqrt(la.norm(2 * np.pi * cov_mat)))
probability *= np.exp(-0.5 * (sample - mean_mat).T * cov_mat_inverse * (sample - mean_mat))
return probability
def ParseData(raw_data):
raw_data = raw_data.rstrip('\n')
raw_data_list = raw_data.split('\n')
data_list = list()
for raw_data_point in raw_data_list:
raw_data_point = raw_data_point.rstrip()
point = raw_data_point.split(' ')
data_list.append([float(x) for x in point])
data_list.pop()
data_list_np = np.array(data_list)
return data_list_np
def main():
parser = argparse.ArgumentParser(description='Process input')
parser.add_argument('-t', '--training_file', type=str, help='submit data to train against')
parser.add_argument('-d', '--traintest_file', type=str, help='List indicating which data is training vs. test')
args = parser.parse_args()
print os.getcwd()
# Check if Arguments allow execution
if (not args.training_file):
print "Error: No training Data or model present!"
return -1
with open(args.traintest_file) as file:
raw_data = file.read()
traintest_data = np.array(ParseData(raw_data)[:,0])
if args.training_file:
# trainagainst training file
if not os.path.isfile(args.training_file):
print "Error: Training file doesn't exist!"
return -1
# train
with open(args.training_file) as file:
# read file contents
raw_data = file.read()
# parse data
train_data = ParseData(raw_data)
test_truth = train_data[traintest_data==1,-1]
test_features = np.matrix(np.array(train_data[traintest_data==1,0:-1]))
train_truth = train_data[traintest_data==0,-1]
train_features = np.matrix(np.array(train_data[traintest_data==0,0:-1]))
#make the data homogeneous
homogeneous_col = np.ones([train_features.shape[0],1]);
train_features = np.append(train_features, homogeneous_col,axis=1)
homogeneous_col = np.ones([test_features.shape[0],1]);
test_features = np.append(test_features, homogeneous_col, axis=1)
try:
test_features
train_features
except NameError:
print "You must provide test and training data"
quit()
#iteratively call the classifier to build a binary tree until the classification is perfect or a timeout is reached
max_iterations = 5000
#sort the data into test and training sets
with open(args.traintest_file) as file:
raw_data = file.read()
traintest_data = ParseData(raw_data)
num_a_errors,num_b_errors = binaryClassify_fisher(train_features, train_truth, test_features, test_truth, 0, 1, max_iterations)
classa = 0
classb = 1
print "Total a errors: %d of %d, %% error:%f"%(num_a_errors,test_truth[test_truth==classa].shape[0],float(num_a_errors)/test_truth[test_truth==classa].shape[0])
print "Total b errors: %d of %d, %% error:%f"%(num_b_errors,test_truth[test_truth==classb].shape[0],float(num_b_errors)/test_truth[test_truth==classb].shape[0])
if __name__ == '__main__':
main()
|
lukefrasera/cs775Homework
|
tim_midterm/scripts/decisionTree.py
|
Python
|
lgpl-3.0
| 13,150
|
[
"Gaussian"
] |
c73cc0b6123636eee61093b95038201e595325c6885668374a7b80ee35e3b628
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkPostScriptWriter(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkPostScriptWriter(), 'Writing vtkPostScript.',
('vtkPostScript',), (),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
chrisidefix/devide
|
modules/vtk_basic/vtkPostScriptWriter.py
|
Python
|
bsd-3-clause
| 492
|
[
"VTK"
] |
93ae8c2264bac6c16414d89685158424db7b63916a6f24c95888c332156bd49d
|
#
# QAPI event generator
#
# Copyright (c) 2014 Wenchao Xia
# Copyright (c) 2015 Red Hat Inc.
#
# Authors:
# Wenchao Xia <wenchaoqemu@gmail.com>
# Markus Armbruster <armbru@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2.
# See the COPYING file in the top-level directory.
from qapi import *
def gen_event_send_proto(name, arg_type):
return 'void qapi_event_send_%(c_name)s(%(param)s)' % {
'c_name': c_name(name.lower()),
'param': gen_params(arg_type, 'Error **errp')}
def gen_event_send_decl(name, arg_type):
return mcgen('''
%(proto)s;
''',
proto=gen_event_send_proto(name, arg_type))
def gen_event_send(name, arg_type):
ret = mcgen('''
%(proto)s
{
QDict *qmp;
Error *err = NULL;
QMPEventFuncEmit emit;
''',
proto=gen_event_send_proto(name, arg_type))
if arg_type and arg_type.members:
ret += mcgen('''
QmpOutputVisitor *qov;
Visitor *v;
QObject *obj;
''')
ret += mcgen('''
emit = qmp_event_get_func_emit();
if (!emit) {
return;
}
qmp = qmp_event_build_dict("%(name)s");
''',
name=name)
if arg_type and arg_type.members:
ret += mcgen('''
qov = qmp_output_visitor_new();
g_assert(qov);
v = qmp_output_get_visitor(qov);
g_assert(v);
/* Fake visit, as if all members are under a structure */
visit_start_struct(v, NULL, "", "%(name)s", 0, &err);
''',
name=name)
ret += gen_err_check()
ret += gen_visit_fields(arg_type.members, need_cast=True)
ret += mcgen('''
visit_end_struct(v, &err);
if (err) {
goto out;
}
obj = qmp_output_get_qobject(qov);
g_assert(obj != NULL);
qdict_put_obj(qmp, "data", obj);
''')
ret += mcgen('''
emit(%(c_enum)s, qmp, &err);
''',
c_enum=c_enum_const(event_enum_name, name))
if arg_type and arg_type.members:
ret += mcgen('''
out:
qmp_output_visitor_cleanup(qov);
''')
ret += mcgen('''
error_propagate(errp, err);
QDECREF(qmp);
}
''')
return ret
class QAPISchemaGenEventVisitor(QAPISchemaVisitor):
def __init__(self):
self.decl = None
self.defn = None
self._event_names = None
def visit_begin(self, schema):
self.decl = ''
self.defn = ''
self._event_names = []
def visit_end(self):
self.decl += gen_enum(event_enum_name, self._event_names)
self.defn += gen_enum_lookup(event_enum_name, self._event_names)
self._event_names = None
def visit_event(self, name, info, arg_type):
self.decl += gen_event_send_decl(name, arg_type)
self.defn += gen_event_send(name, arg_type)
self._event_names.append(name)
(input_file, output_dir, do_c, do_h, prefix, dummy) = parse_command_line()
c_comment = '''
/*
* schema-defined QAPI event functions
*
* Copyright (c) 2014 Wenchao Xia
*
* Authors:
* Wenchao Xia <wenchaoqemu@gmail.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
h_comment = '''
/*
* schema-defined QAPI event functions
*
* Copyright (c) 2014 Wenchao Xia
*
* Authors:
* Wenchao Xia <wenchaoqemu@gmail.com>
*
* This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
* See the COPYING.LIB file in the top-level directory.
*
*/
'''
(fdef, fdecl) = open_output(output_dir, do_c, do_h, prefix,
'qapi-event.c', 'qapi-event.h',
c_comment, h_comment)
fdef.write(mcgen('''
#include "qemu-common.h"
#include "%(prefix)sqapi-event.h"
#include "%(prefix)sqapi-visit.h"
#include "qapi/qmp-output-visitor.h"
#include "qapi/qmp-event.h"
''',
prefix=prefix))
fdecl.write(mcgen('''
#include "qapi/error.h"
#include "qapi/qmp/qdict.h"
#include "%(prefix)sqapi-types.h"
''',
prefix=prefix))
event_enum_name = c_name(prefix + "QAPIEvent", protect=False)
schema = QAPISchema(input_file)
gen = QAPISchemaGenEventVisitor()
schema.visit(gen)
fdef.write(gen.defn)
fdecl.write(gen.decl)
close_output(fdef, fdecl)
|
coloft/qemu
|
scripts/qapi-event.py
|
Python
|
gpl-2.0
| 4,247
|
[
"VisIt"
] |
99af5fbc5cda5e47ee16a32504d5d66fe9026b260c7a580e58e142aab56d2650
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for translation_helper.py."""
import unittest
import os
import sys
import translation_helper
here = os.path.realpath(__file__)
testdata_path = os.path.normpath(os.path.join(here, '..', '..', 'testdata'))
class TcHelperTest(unittest.TestCase):
def test_get_translatable_grds(self):
grds = translation_helper.get_translatable_grds(
testdata_path, ['test.grd', 'not_translated.grd', 'internal.grd'],
os.path.join(testdata_path,
'translation_expectations_without_unlisted_file.pyl'))
self.assertEqual(1, len(grds))
# There should be no references to not_translated.grd (mentioning the
# filename here so that it doesn't appear unused).
grd = grds[0]
self.assertEqual(os.path.join(testdata_path, 'test.grd'), grd.path)
self.assertEqual(testdata_path, grd.dir)
self.assertEqual('test.grd', grd.name)
self.assertEqual([os.path.join(testdata_path, 'part.grdp')], grd.grdp_paths)
self.assertEqual([], grd.structure_paths)
self.assertEqual([os.path.join(testdata_path, 'test_en-GB.xtb')],
grd.xtb_paths)
self.assertEqual({'en-GB': os.path.join(testdata_path, 'test_en-GB.xtb')},
grd.lang_to_xtb_path)
self.assertTrue(grd.appears_translatable)
self.assertEquals(['en-GB'], grd.expected_languages)
# The expectations list an untranslatable file (not_translated.grd), but the
# grd list doesn't contain it.
def test_missing_untranslatable(self):
TRANSLATION_EXPECTATIONS = os.path.join(
testdata_path, 'translation_expectations_without_unlisted_file.pyl')
with self.assertRaises(Exception) as context:
translation_helper.get_translatable_grds(
testdata_path, ['test.grd', 'internal.grd'], TRANSLATION_EXPECTATIONS)
self.assertEqual(
'%s needs to be updated. Please fix these issues:\n'
' - not_translated.grd is listed in the translation expectations, '
'but this grd file does not exist.' % TRANSLATION_EXPECTATIONS,
str(context.exception))
# The expectations list an internal file (internal.grd), but the grd list
# doesn't contain it.
def test_missing_internal(self):
TRANSLATION_EXPECTATIONS = os.path.join(
testdata_path, 'translation_expectations_without_unlisted_file.pyl')
with self.assertRaises(Exception) as context:
translation_helper.get_translatable_grds(
testdata_path, ['test.grd', 'not_translated.grd'],
TRANSLATION_EXPECTATIONS)
self.assertEqual(
'%s needs to be updated. Please fix these issues:\n'
' - internal.grd is listed in translation expectations as an internal '
'file to be ignored, but this grd file does not exist.' %
TRANSLATION_EXPECTATIONS, str(context.exception))
# The expectations list a translatable file (test.grd), but the grd list
# doesn't contain it.
def test_missing_translatable(self):
TRANSLATION_EXPECTATIONS = os.path.join(
testdata_path, 'translation_expectations_without_unlisted_file.pyl')
with self.assertRaises(Exception) as context:
translation_helper.get_translatable_grds(
testdata_path, ['not_translated.grd', 'internal.grd'],
TRANSLATION_EXPECTATIONS)
self.assertEqual(
'%s needs to be updated. Please fix these issues:\n'
' - test.grd is listed in the translation expectations, but this grd '
'file does not exist.' % TRANSLATION_EXPECTATIONS,
str(context.exception))
# The grd list contains a file (part.grdp) that's not listed in translation
# expectations.
def test_expectations_not_updated(self):
TRANSLATION_EXPECTATIONS = os.path.join(
testdata_path, 'translation_expectations_without_unlisted_file.pyl')
with self.assertRaises(Exception) as context:
translation_helper.get_translatable_grds(
testdata_path,
['test.grd', 'part.grdp', 'not_translated.grd', 'internal.grd'],
TRANSLATION_EXPECTATIONS)
self.assertEqual(
'%s needs to be updated. Please fix these issues:\n'
' - part.grdp appears to be translatable (because it contains <file> '
'or <message> elements), but is not listed in the translation '
'expectations.' % TRANSLATION_EXPECTATIONS, str(context.exception))
if __name__ == '__main__':
unittest.main()
|
scheib/chromium
|
tools/translation/helper/translation_helper_unittest.py
|
Python
|
bsd-3-clause
| 4,529
|
[
"xTB"
] |
269e2ab663d382282831fb207c6cf8758d4b9ee811bf314bd11c183bb58ad484
|
from argparse import ArgumentParser
import os
import shutil
import sys
import yaml
import MDAnalysis
from data import Data
from plots import Plots
from molecule import Molecule
from draw import Draw
from figure import Figure
from analysis.hbonds import HBonds
from analysis.residence_time import Residence_time
from analysis.rmsf import RMSF_measurements
from analysis.salt_bridges import SaltBridges
from analysis.pistacking import PiStacking
from analysis.sasa import SASA
from timeit import default_timer as timer
from ligand_description import LigDescr
class Lintools(object):
"""This class controls the behaviour of all other classes (Data,Plots,Molecule,Figure)
of lintools and inherits and transfers them resulting in a final SVG file that contains
the protein-ligand interactions.
It also controls the analysis (Residence_time and HBonds classes).
Takes:
* topology * - topology file
* trajectory * - trajectory file(s)
* mol_file * - MOL file of the ligand
* ligand * - MDAnalysis atomgroup of ligand that is going to be analysed
* offset * - residue offset which determines by how many numbers the protein residue numbering
should be offset (e.g. with offset = 30 the first residue will be changed from 1 to 30, 2 - 31, etc.)
* cutoff * - cutoff distance in angstroms that defines the native contacts (default - 3.5A)
* start_frame * - start frame(s) for trajectory analysis (can be different for each trajectory)
* end_frame * - end frame(s) for trajectory analysis (can be different for each trajectory)
* skip * - number of frames to skip (can be different for each trajectory)
* analysis_cutoff * - a fraction of time a residue has to fullfil the analysis parameters for (default - 0.3)
* diagram_type * - string of the selected diagram type (e.g. "amino" or "clocks")
* output_name * - name of the folder with results and the final SVG file
"""
__version__ = "09.2016"
def __init__(self,topology,trajectory,mol_file,ligand,offset,cutoff,start_frame,end_frame,skip,analysis_cutoff,diagram_type,output_name,cfg):
"""Defines the input variables."""
self.topology = os.path.abspath(topology)
try:
self.trajectory = []
for traj in trajectory:
self.trajectory.append(os.path.abspath(traj))
except Exception:
self.trajectory = []
if mol_file!=None:
self.mol_file = os.path.abspath(mol_file)
else:
self.mol_file = mol_file
self.ligand = ligand
self.offset = offset
self.cutoff = cutoff
if cfg==False:
self.start = [None if start_frame==[None] else int(start_frame[i]) for i in range(len(trajectory))]
self.end = [None if end_frame==[None] else int(end_frame[i]) for i in range(len(trajectory))]
self.skip = [None if skip==[None] else int(skip[i]) for i in range(len(trajectory))]
else:
self.start = start_frame
self.end = end_frame
self.skip = skip
self.analysis_cutoff = analysis_cutoff
self.diagram_type = diagram_type
self.output_name = output_name
def data_input_and_res_time_analysis(self):
"""
Loads the data into Data() - renumbers the residues, imports mol file in rdkit.
If there are trajectories to analyse, the residues that will be plotted are determined
from Residence_time() analysis.
"""
self.topol_data = Data()
self.topol_data.load_data(self.topology,self.mol_file,self.ligand,self.offset)
if len(self.trajectory) == 0:
self.topol_data.analyse_topology(self.topology,self.cutoff)
else:
self.res_time = Residence_time(self.topol_data,self.trajectory, self.start, self.end, self.skip,self.topology, self.ligand,self.offset)
self.res_time.measure_residence_time(self.cutoff)
self.res_time.define_residues_for_plotting_traj(self.analysis_cutoff)
self.topol_data.find_the_closest_atoms(self.topology)
def analysis_of_prot_lig_interactions(self):
"""
The classes and function that deal with protein-ligand interaction analysis.
"""
self.hbonds = HBonds(self.topol_data,self.trajectory,self.start,self.end,self.skip,self.analysis_cutoff,distance=3)
self.pistacking = PiStacking(self.topol_data,self.trajectory,self.start,self.end,self.skip, self.analysis_cutoff)
self.sasa = SASA(self.topol_data,self.trajectory)
self.lig_descr = LigDescr(self.topol_data)
if self.trajectory!=[]:
self.rmsf = RMSF_measurements(self.topol_data,self.topology,self.trajectory,self.ligand,self.start,self.end,self.skip)
self.salt_bridges = SaltBridges(self.topol_data,self.trajectory,self.lig_descr,self.start,self.end,self.skip,self.analysis_cutoff)
def plot_residues(self):
"""
Calls Plot() that plots the residues with the required diagram_type.
"""
self.plots = Plots(self.topol_data,self.diagram_type)
def draw_figure(self,data_for_color=None, data_for_size=None, data_for_clouds=None, rot_bonds=None, color_for_clouds="Blues", color_type_color="viridis"):
"""
Draws molecule through Molecule() and then puts the final figure together with
Figure().
"""
self.molecule = Molecule(self.topol_data)
self.draw = Draw(self.topol_data,self.molecule,self.hbonds,self.pistacking,self.salt_bridges,self.lig_descr)
self.draw.draw_molecule(data_for_color, data_for_size, data_for_clouds, rot_bonds, color_for_clouds, color_type_color)
self.figure = Figure(self.molecule,self.topol_data,self.draw)
self.figure.add_bigger_box()
self.figure.manage_the_plots()
self.figure.draw_white_circles()
self.figure.put_everything_together()
self.figure.write_final_draw_file(self.output_name)
def save_files(self):
"""Saves all output from LINTools run in a single directory named after the output name."""
while True:
try:
os.mkdir(self.output_name)
except Exception as e:
self.output_name = raw_input("This directory already exists - please enter a new name:")
else:
break
self.workdir = os.getcwd()
os.chdir(self.workdir+"/"+self.output_name)
def write_config_file(self, cfg):
if cfg!=None:
#copy the config file to results directory
shutil.copy("../"+cfg, "lintools.config")
else:
#If there was no config file, write one
cfg_dir = {'input':{
'topology':self.topology,
'trajectory':self.trajectory,
'mol file':self.mol_file,
'ligand':self.ligand,
'traj start':self.start,
'traj end':self.end,
'traj skip': self.skip,
'offset': self.offset,
'distance cutoff': self.cutoff,
'analysis cutoff': self.analysis_cutoff,
'diagram type': self.diagram_type,
'output name': self.output_name},
'representation':{
'data to show in color':None,
'data to show as size':None,
'data to show as cloud':None,
'rotatable bonds':None,
'cloud color scheme':'Blues',
'atom color scheme':'viridis',
'clock color scheme':'summer'}
}
with open("lintools.config","wb") as ymlfile:
yaml.dump(cfg_dir,ymlfile,default_flow_style=False)
def remove_files(self):
"""Removes intermediate files."""
file_list = ["molecule.svg","lig.pdb","HIS.pdb","PHE.pdb","TRP.pdb","TYR.pdb","lig.mol","test.xtc"]
for residue in self.topol_data.dict_of_plotted_res.keys():
file_list.append(residue[1]+residue[2]+".svg")
for f in file_list:
if os.path.isfile(f)==True:
os.remove(f)
if __name__ == '__main__':
#################################################################################################################
parser = ArgumentParser(description='Analysis and visualisation tool for protein ligand interactions. Requires rdkit, shapely, MDAnalysis.')
parser.add_argument('-cfg', '--config', dest = 'config', default=None, help='Configuration file')
parser.add_argument('-t', '--topology', dest = 'topology', default=None, help='Topology file')
parser.add_argument('-x', '--trajectory', dest = "trajectory", nargs="*", default=[], help='Trajectory file(s)')
parser.add_argument('-o', '--outname', dest = "output_name", help='Name for output folder and file')
parser.add_argument('-c', '--cutoff', dest = "cutoff", default = 3.5, help='Cutoff distance in angstroms.')
parser.add_argument('-ac', '--analysis_cutoff', dest = "analysis_cutoff", default=0.3, help='Analysis cutoff - a feature has to appear for at least a fraction of the simulation to be plotted.')
args = parser.parse_args()
####################################################################################################################
if args.config!=None:
#If config file exists, args.will be ingnored
print "#####################################################################"
print "WARNING"
print "The arguments from command line will be ignored,"
print "if you want to make changes, do so in the configuration file."
print " "
print " "
print "######################################################################"
with open(args.config, "r") as ymlfile:
cfg = yaml.load(ymlfile)
## Check config file input - mainly topology and output file, also handling bad input
lintools = Lintools(cfg['input']['topology'],cfg['input']['trajectory'],cfg['input']['mol file'],cfg['input']['ligand'],cfg['input']['offset'],float(cfg['input']['distance cutoff']),cfg['input']['traj start'],cfg['input']['traj end'],cfg['input']['traj skip'],cfg['input']['analysis cutoff'],cfg['input']['diagram type'],cfg['input']['output name'],cfg=True)
lintools.save_files()
lintools.data_input_and_res_time_analysis()
lintools.analysis_of_prot_lig_interactions()
lintools.plot_residues(cfg['representation']['clock color scheme'])
lintools.write_config_file(args.config)
lintools.draw_figure(cfg['representation']['data to show in color'], cfg['representation']['data to show as size'], cfg['representation']['data to show as cloud'], cfg['representation']['rotatable bonds'],cfg['representation']['cloud color scheme'], cfg['representation']['atom color scheme'])
lintools.remove_files()
else:
assert len(args.topology) >0, "No topology file provided for analysis."
assert len(args.output_name)>0,"No output name provided."
def find_ligand_name():
"""Users select a ligand to analyse from a numbered list."""
gro = MDAnalysis.Universe(args.topology)
list_of_non_ligands=["SOL","NA","CL","HOH","ARG","LYS","HIS","ASP","GLU","SER","THR", "ASN","GLN","PHE","TYR","TRP","CYS","GLY","PRO","ALA","VAL","ILE","LEU","MET"]
potential_ligands={}
i=0
for residue in gro.residues:
if residue.atoms.resnames[0] not in list_of_non_ligands:
try:
if residue.atoms.altLocs[0]==str("") or residue.atoms.altLocs[0]==None:
potential_ligands[i]=residue.atoms
else:
#Deal with ligands that have alternative locations
altloc = str(residue.atoms.altLocs[1])
resid = residue.atoms.resids[0]
new_residue = residue.select_atoms("resid "+str(resid)+" and altloc "+str(altloc))
potential_ligands[i] = new_residue
except Exception as e:
potential_ligands[i]=residue.atoms
i+=1
print "# Nr # Name # Resnumber # Chain ID"
for lig in potential_ligands:
print lig, potential_ligands[lig].resnames[0], potential_ligands[lig].resids[0], potential_ligands[lig].segids[0]
while True:
raw = raw_input( "Choose a ligand to analyse:")
try:
if int(raw) in [x[0] for x in enumerate(potential_ligands.keys())] :
break
else:
print "Error. No such group "+str(raw)
except ValueError:
print "Error. No such group "+str(raw)
pass
ligand_name=potential_ligands[int(raw)]
return "resid "+str(ligand_name.resids[0])+" and segid "+str(ligand_name.segids[0])
def find_diagram_type():
"""User selects diagram type for the residue plots."""
available_diagrams={1:"amino", 2:"domains",3:"clock"}
for diagram in available_diagrams:
print diagram, " : ", available_diagrams[diagram]
while True:
raw_d = raw_input( "Choose diagram type:")
try:
if int(raw_d)-1 in [x[0] for x in enumerate(available_diagrams.keys())] :
break
else:
print "Error. No such group "+str(raw_d)
except ValueError:
print "Error. No such group "+str(raw_d)
pass
diagram_type=available_diagrams[int(raw_d)]
return diagram_type
ligand_name = find_ligand_name()
diagram_type = find_diagram_type()
lintools = Lintools(args.topology,args.trajectory,None,ligand_name,0,args.cutoff,[None],[None],[None],float(args.analysis_cutoff),diagram_type,args.output_name,cfg=False)
lintools.save_files()
lintools.data_input_and_res_time_analysis()
lintools.analysis_of_prot_lig_interactions()
lintools.plot_residues()
lintools.write_config_file(None)
lintools.draw_figure()
lintools.remove_files()
|
ldomic/lintools
|
lintools/lintools.py
|
Python
|
gpl-3.0
| 14,779
|
[
"MDAnalysis",
"RDKit"
] |
a750f1408044759d4c3fc5578d1bfbaf136801f7fac8bb9da18a7bc354967c49
|
import math
import numpy as np
import matplotlib.pyplot as plt
from neuron import Neuron
def gabor1D(phi, m):
sigmaX = 1
scalar = 1.0/(2*math.pi*sigmaX)
k = 2
width = 10
g = np.zeros(m)
for i in range(m):
x = (i - m/2.0) / m * width
g[i] = scalar * math.exp(-x*x/(2*sigmaX*sigmaX)) * math.cos(k*x-phi)
return g
def sine(k, m):
g = np.zeros(m)
for i in range(m):
x = (i - m/2.0) / m * k
g[i] = math.sin(x)
return g
def cosine(k, m):
g = np.zeros(m)
for i in range(m):
x = (i - m/2.0) / m * k
g[i] = math.cos(x)
return g
class Network(object):
def __init__(self, m, n):
self.m = m
self.n = n
self.X = gabor1D(0, m)
self.S = [Neuron() for i in range(n)]
self.A = np.zeros((m, n))
for i in range(n):
phi = i * 2.0 * math.pi / n
#if i < n/2:
# self.A[:,i] = cosine(i, m)
#else:
# self.A[:,i] = sine(i-n/2+1, m)
self.A[:,i] = gabor1D(phi, m).transpose()
self.alpha = 250000000
self.sigma = 0.001
excitatory = np.dot(self.A.transpose(), self.X) / self.m
# Half-wave square
excitatory = np.array([x*x if x >= 0 else 0 for x in excitatory])
self.excitatory = excitatory
squaredA = np.dot(self.A.transpose(), self.A)
# Half-wave square
for i in range(self.n):
for j in range(self.n):
x = squaredA[i, j]
squaredA[i, j] = x*x/self.n if x >= 0 else 0
# Neurons do not inhibit themselves
for i in range(self.n):
squaredA[i, i] = 0
self.squaredA = squaredA
#plt.plot(self.A.transpose()[0])
#plt.show()
def update(self, dt):
binaryS = np.array([n.getBinaryValue() for n in self.S])
inhibitory = np.dot(self.squaredA, binaryS)
addVector = self.alpha * (self.excitatory - inhibitory - self.sigma * self.sigma)
for i in range(self.n):
self.S[i].voltage += addVector[i]*dt
self.S[i].decay(dt)
def getSpikingIndices(self):
x = []
for i in range(self.n):
if self.S[i].getBinaryValue():
x.append(i)
return x
|
nspotrepka/neuron-inference
|
network.py
|
Python
|
mit
| 2,308
|
[
"NEURON"
] |
3393c54924b30953e420c60959fcc2ff10d711193bb828b1e12d409d45506c16
|
"""Adds a few finishing touches to the IPHAS DR2 binary FITS catalogues.
It will add TDISP keywords, sanitise TUNIT keywords, add checksums,
and add origin information.
"""
import numpy as np
from astropy.io import fits
from astropy import log
def augment(filename_origin, filename_target):
log.info('Opening {0}'.format(filename_origin))
f = fits.open(filename_origin)
for i in np.arange(1, f[1].header['TFIELDS']+1, 1): # Loop over all columns
name = f[1].header['TTYPE{0}'.format(i)]
# Set an appropriate TDISP keyword for floating points
if name in ['ra', 'dec', 'l', 'b']:
f[1].header['TDISP{0}'.format(i)] = 'F9.5'
if name in ['posErr', 'pStar', 'pGalaxy', 'pNoise', 'pSaturated',
'rGauSig', 'rEll', 'rSeeing',
'iGauSig', 'iEll', 'iSeeing',
'haGauSig', 'haEll', 'haSeeing',
'seeing']:
f[1].header['TDISP{0}'.format(i)] = 'F4.2'
if name in ['mergedClassStat', 'rmi', 'rmha',
'r', 'rErr', 'rPeakMag', 'rPeakMagErr',
'rAperMag1', 'rAperMag1Err', 'rAperMag3', 'rAperMag3Err',
'rClassStat',
'i', 'iErr', 'iPeakMag', 'iPeakMagErr',
'iAperMag1', 'iAperMag1Err', 'iAperMag3', 'iAperMag3Err',
'iClassStat', 'iXi', 'iEta',
'ha', 'haErr', 'haPeakMag', 'haPeakMagErr',
'haAperMag1', 'haAperMag1Err', 'haAperMag3', 'haAperMag3Err',
'haClassStat', 'haXi', 'haEta',
'r2', 'rErr2', 'i2', 'iErr2', 'ha2', 'haErr2']:
f[1].header['TDISP{0}'.format(i)] = 'F5.2'
if name in ['rPa', 'iPa', 'haPa']:
f[1].header['TDISP{0}'.format(i)] = 'F5.1'
if name in ['rMJD', 'iMJD', 'haMJD']:
f[1].header['TDISP{0}'.format(i)] = 'F11.5'
if name in ['rX', 'rY', 'iX', 'iY', 'haX', 'haY']:
f[1].header['TDISP{0}'.format(i)] = 'F7.2'
# Bring unit definitions in line with the FITS standard
try:
unit = f[1].header['TUNIT{0}'.format(i)]
if unit == 'degrees':
f[1].header['TUNIT{0}'.format(i)] = 'deg'
if unit == 'Magnitude':
f[1].header['TUNIT{0}'.format(i)] = 'mag'
if unit == 'Pixels':
f[1].header['TUNIT{0}'.format(i)] = 'pixel'
if unit == 'Arcsec':
f[1].header['TUNIT{0}'.format(i)] = 'arcsec'
if unit in ['Sigma', 'Number', 'Flag', 'N-sigma',
'bitmask', 'Julian days', 'String']:
del f[1].header['TUNIT{0}'.format(i)]
except KeyError:
pass
# Make the header more informative
f[1].header['EXTNAME'] = 'CATALOG'
f[1].header['ORIGIN'] = 'IPHAS'
f[1].header['PHOTSYS'] = 'VEGA'
f[1].header['REFERENC'] = 'Barentsen et al (2014)'
f[1].header['PRODCATG'] = 'SCIENCE.CATALOGTILE'
f[1].header['COMMENT'] = ' _____ _____ _ _ _____ '
f[1].header['COMMENT'] = '|_ _| __ \| | | | /\ / ____|'
f[1].header['COMMENT'] = ' | | | |__) | |__| | / \ | (___ '
f[1].header['COMMENT'] = ' | | | ___/| __ | / /\ \ \___ \ '
f[1].header['COMMENT'] = ' _| |_| | | | | |/ ____ \ ____) |'
f[1].header['COMMENT'] = '|_____|_| |_| |_/_/ \_\_____/ '
f[1].header['COMMENT'] = ''
f[1].header['COMMENT'] = 'This catalogue is part of IPHAS DR2.'
f[1].header['COMMENT'] = 'For more information, visit http://www.iphas.org.'
log.info('Writing {0}'.format(filename_target))
f.writeto(filename_target, checksum=True,
clobber=True)
if __name__ == '__main__':
DR2 = '/car-data/gb/iphas-dr2-rc6/concatenated'
##for l in [215]:
for l in np.arange(25, 220, 5):
for part in ['a', 'b']:
origin = DR2+'/full-compressed/iphas-dr2-{0}{1}.fits.gz'.format(l, part)
target = DR2+'/full-augmented/iphas-dr2-{0}{1}.fits.gz'.format(l, part)
augment(origin, target)
origin = DR2+'/light-compressed/iphas-dr2-{0}{1}-light.fits.gz'.format(l, part)
target = DR2+'/light-augmented/iphas-dr2-{0}{1}-light.fits.gz'.format(l, part)
augment(origin, target)
|
barentsen/iphas-dr2
|
scripts/release-preparation/augment-catalogue.py
|
Python
|
mit
| 4,349
|
[
"VisIt"
] |
c088eb5398b2db804d4ec31214cdece92c22da6b7e0f91fce02d1c331d9bcf00
|
# -*- coding: UTF-8 -*-
"""
``RSEM``
-----------------------
:Authors: Liron Levin
:Affiliation: Bioinformatics core facility
:Organization: National Institute of Biotechnology in the Negev, Ben Gurion University.
Short Description
~~~~~~~~~~~~~~~~~~~~
A module for running RSEM
Requires
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* fastq file in
``self.sample_data[sample]["fastq.F"]``
``self.sample_data[sample]["fastq.R"]``
``self.sample_data[sample]["fastq.S"]``
* or bam file in
``self.sample_data[sample]["bam"]``
Output
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* puts output bam files (if the input is fastq) in:
``self.sample_data[sample]["bam"]``
* puts the location of RSEM results in:
``self.sample_data[sample]["RSEM"]``
``self.sample_data[sample]["genes.results"]``
``self.sample_data[sample]["isoforms.results"]``
Parameters that can be set
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. csv-table::
:header: "Parameter", "Values", "Comments"
:widths: 15, 10, 10
"mode", "transcriptome/genome ", "Is the reference is a genome or a transcriptome?"
"gff3","None","Use if the mode is genome and the annotation file is in gff3 format"
Comments
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* This module was tested on:
``RSEM v1.2.25``
``bowtie2 v2.2.6``
Lines for parameter file
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Step_Name: # Name of this step
module: RSEM # Name of the module used
base: # Name of the step [or list of names] to run after [must be after a bam file generator step or merge with fastq files]
script_path: # Command for running the RSEM script
qsub_params:
-pe: # Number of CPUs to reserve for this analysis
mode: # transcriptome or genome
annotation: # For Genome mode: the location of GTF file [the default] , for GFF3 use the gff3 flag. For Transcriptome mode: transcript-to-gene-map file.
# If annotation is set to Trinity the transcript-to-gene-map file will be generated using the from_Trinity_to_gene_map script
# If not set will use only the reference file as unrelated transcripts
from_Trinity_to_gene_map_script_path: # If the mode is transcriptome and the reference was assembled using Trinity it is possible to generate the transcript-to-gene-map file automatically using this script
# If annotation is set to Trinity and this line is empty or missing it will try using the module's associated script
gff3: # Use if the mode is genome and the annotation file is in gff3 format
mapper: # bowtie/bowtie2/star
mapper_path: # Location of mapper script
rsem_prepare_reference_script_path: # Location of preparing reference script
plot_stat: # Generate statistical plots
plot_stat_script_path: # Location of statistical plot generating script
reference: # The reference genome/transcriptome location [FASTA file]. If empty will search for project level fasta.nucl
rsem_generate_data_matrix_script_path: # Location of the final matrix generating script
# If this line is empty or missing it will try using the module's associated script
redirects:
--append-names: # RSEM will append gene_name/transcript_name to the result files
--estimate-rspd: # Enables RSEM to learn from the data how the reads are distributed across a transcript
-p: # Number of CPUs to use in this analysis
--bam: # Will use bam files and not fastq
--no-bam-output:
--output-genome-bam: # Alignments in genomic coordinates (only if mode is genome)
References
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Li, Bo, and Colin N. Dewey. "RSEM: accurate transcript quantification from RNA-Seq data with or without a reference genome." BMC bioinformatics 12.1 (2011): 323.
"""
import os
import sys
import re
from neatseq_flow.PLC_step import Step,AssertionExcept
__author__ = "Levin Levin"
__version__= "1.2.0"
class Step_RSEM(Step):
def step_specific_init(self):
""" Called on intiation
Good place for parameter testing.
Wrong place for sample data testing
"""
self.shell = "bash"
self.file_tag = ""
assert "mode" in list(self.params.keys()) , \
"you should provide mode type [transcriptome or genome] in step %s\n" % self.get_step_name()
assert "mapper" in list(self.params.keys()) , \
"you should provide mapper type [bowtie, bowtie2 or star] in step %s\n" % self.get_step_name()
assert "mapper_path" in list(self.params.keys()) , \
"you should provide mapper script location in step %s\n" % self.get_step_name()
assert not ("--output-genome-bam" in list(self.params["redir_params"].keys())) &("transcriptome" in self.params["mode"]) , \
"you can't use '--output-genome-bam' option when the mode is 'transcriptome' in step %s\n" % self.get_step_name()
assert "rsem_prepare_reference_script_path" in list(self.params.keys()) , \
"you should provide rsem_prepare_reference script location in step %s\n" % self.get_step_name()
if "plot_stat" in list(self.params.keys()):
assert "plot_stat_script_path" in list(self.params.keys()) , \
"you should provide plot_stat script location in step %s\n" % self.get_step_name()
import inspect
self.module_location=os.path.dirname(os.path.abspath(inspect.getsourcefile(lambda:0)))
def step_sample_initiation(self):
""" A place to do initiation stages following setting of sample_data
"""
if "--bam" not in list(self.params["redir_params"].keys()):
# Assert that all samples have reads files:
for sample in self.sample_data["samples"]:
assert {"fastq.F", "fastq.R", "fastq.S"} & set(self.sample_data[sample].keys()), "Sample %s does not have read files in step %s\n if you have bam files use --bam\n" % (sample, self.name)
else:
for sample in self.sample_data["samples"]:
if "bam" not in list(self.sample_data[sample].keys()):
sys.exit("No Mapping or bam file information!!! \n")
if "reference" not in list(self.params.keys()):
if 'REFERENCE' in list(self.sample_data["project_data"].keys()):
self.params["reference"] = self.sample_data["project_data"]['REFERENCE']
elif 'fasta.nucl' in list(self.sample_data["project_data"].keys()):
self.params["reference"] = self.sample_data["project_data"]['fasta.nucl']
if 'gene_trans_map' in list(self.sample_data["project_data"].keys()):
if "mode" in list(self.params.keys()):
if 'transcriptome' == self.params["mode"].lower():
if 'annotation' not in list(self.params.keys()):
self.params["annotation"] = self.sample_data["project_data"]['gene_trans_map']
if "mode" in list(self.params.keys()):
if 'genome' == self.params["mode"].lower():
if 'annotation' not in list(self.params.keys()):
if 'gff3' in list(self.params.keys()):
gff = list({'gff3','GFF3','gff'} & set(self.sample_data["project_data"].keys()))
if len(gff)>0:
self.params["annotation"] = self.sample_data["project_data"][gff[0]]
else:
gtf= list({'GTF','gtf'} & set(self.sample_data["project_data"].keys()))
if len(gtf)>0:
self.params["annotation"] = self.sample_data["project_data"][gtf[0]]
assert "reference" in list(self.params.keys()) , \
"you should provide reference file in step or have a fasta.nucl/reference file type in project level %s\n" % self.get_step_name()
pass
def create_spec_preliminary_script(self):
""" Add script to run BEFORE all other steps
"""
#Preparing reference genome/transcriptome
#Creating new folder for the reference files
REF_dir = self.make_folder_for_sample("Reference")
if "annotation" not in list(self.params.keys()):
self.params["annotation"] = None
elif self.params["annotation"] =='':
self.params["annotation"] = None
#initiating new script
self.script = ""
if ("transcriptome" in self.params["mode"]) and (self.params["annotation"] != None):
if "Trinity" in self.params["annotation"]:
gene_map_command = "%s %%s %%%%s \n\n"
if "from_Trinity_to_gene_map_script_path" not in list(self.params.keys()):
self.params["from_Trinity_to_gene_map_script_path"]= '''awk '/^>/{transcript=substr($1,2,length($1)); gsub(/_i.+$/,"",$1) ; print substr($1,2,length($1)) "\t" transcript ; next}' '''
gene_map_command = " %s < %%s > %%%%s \n\n"
# if "Create_map_from_Trinity.py" not in os.listdir(self.module_location):
# #sys.exit("you should provide from_Trinity_to_gene_map_script_path !!! \n")
# self.params["from_Trinity_to_gene_map_script_path"]= "awk '/^>/{transcript=substr($1,2,length($1)); gsub(/_i.+$/,"",$1) ; print substr($1,2,length($1)) "\t" transcript ; next}'"
# gene_map_command = " %s < %%s > %%%%s \n\n"
# else:
# self.params["from_Trinity_to_gene_map_script_path"]= "python %s " % os.path.join(self.module_location,"Create_map_from_Trinity.py")
if self.params["from_Trinity_to_gene_map_script_path"]!=None:
#preparing a transcript_to_gene map file from the reference transcriptome file [if it was created by Trinity]
self.script +=gene_map_command % self.params["from_Trinity_to_gene_map_script_path"] \
% self.params["reference"] \
% os.sep.join([REF_dir.rstrip(os.sep),"transcript_to_gene_map.map"])
#update the annotation slot to the new transcript_to_gene_map annotation file
self.params["annotation"]=os.sep.join([REF_dir.rstrip(os.sep),"transcript_to_gene_map.map"])
#The main part of generating the reference files
self.script +=self.params["rsem_prepare_reference_script_path"]+" \\\n\t"
if ("transcriptome" in self.params["mode"]) and (self.params["annotation"] != None):
#If the reference is a transcriptome use the transcript_to_gene_map annotation file
self.script +="-transcript-to-gene-map %s \\\n\t" % self.params["annotation"]
elif ("genome" in self.params["mode"]) and (self.params["annotation"] != None):
if "gff3" not in list(self.params.keys()):
#If the reference is a genome use the gtf annotation file
self.script +="--gtf %s \\\n\t" % self.params["annotation"]
else:
#If the reference is a genome and the --gff3 flag is set, use the gff3 annotation file
self.script +="--gff3 %s \\\n\t" % self.params["annotation"]
# else:
# sys.exit("mode can only be transcriptome or genome !!! \n")
if self.params["mapper_path"]!=None:
self.script +="--%s --%%s-path %%%%s \\\n\t" % self.params["mapper"] \
% self.params["mapper"] \
% self.params["mapper_path"]
else:
self.script +="--%s \\\n\t" % self.params["mapper"]
self.script +="%s \\\n\t%%s \n\n" % self.params["reference"] \
% os.sep.join([REF_dir.rstrip(os.sep),"REF"])
#update the reference slot to the new reference folder location and the reference files prefix
self.params["reference"]= os.sep.join([REF_dir.rstrip(os.sep),"REF"])
pass
def create_spec_wrapping_up_script(self):
""" Add stuff to check and agglomerate the output data
"""
# Make a merge file of all results:
if "rsem_generate_data_matrix_script_path" not in list(self.params.keys()):
if "Merge_RSEM.py" in os.listdir(self.module_location):
self.params["rsem_generate_data_matrix_script_path"]="python %s " % os.path.join(self.module_location,"Merge_RSEM.py")
elif self.params["rsem_generate_data_matrix_script_path"]==None:
if "Merge_RSEM.py" in os.listdir(self.module_location):
self.params["rsem_generate_data_matrix_script_path"]="python %s " % os.path.join(self.module_location,"Merge_RSEM.py")
if "rsem_generate_data_matrix_script_path" in list(self.params.keys()):
if self.params["rsem_generate_data_matrix_script_path"]!=None:
# Make a dir for the results file:
results_dir = self.make_folder_for_sample("Results")
#Running the file merge script
self.script = ""
for sample in self.sample_data["samples"]:
self.script +="cp '%s' '%%s' \n\n" % (self.sample_data[sample]["RSEM"]+'.genes.results') \
% results_dir
for sample in self.sample_data["samples"]:
self.script +="cp '%s' '%%s' \n\n" % (self.sample_data[sample]["RSEM"]+'.isoforms.results') \
% results_dir
self.script +="\n\n"
self.script +="cd '%s' \n\n" % results_dir
self.script +="%s \\\n\t" % self.params["rsem_generate_data_matrix_script_path"]
# for sample in self.sample_data["samples"]:
# self.script +="%s \\\n\t" % (self.sample_data[sample]["RSEM"]+'.genes.results')
self.script +="%s \\\n\t" % '*.genes.results'
self.script +="> %s \n\n" % os.sep.join([results_dir.rstrip(os.sep),"GeneMat.results"])
self.script +="%s \\\n\t" % self.params["rsem_generate_data_matrix_script_path"]
# for sample in self.sample_data["samples"]:
# self.script +="%s \\\n\t" % (self.sample_data[sample]["RSEM"]+'.isoforms.results')
self.script +="%s \\\n\t" % '*.isoforms.results'
self.script +="> %s \n\n" % os.sep.join([results_dir.rstrip(os.sep),"IsoMat.results"])
if "plot_stat" in list(self.params.keys()):
for sample in self.sample_data["samples"]:
self.script +="%s '%%s' '%%%%s' \n\n" % (self.params["plot_stat_script_path"]) \
% (self.sample_data[sample]["RSEM"]) \
% (results_dir+sample+"_diagnostic.pdf")
# if "del_unsorted_bam" in self.params.keys():
# for sample in self.sample_data["samples"]:
# try: # Does a unsorted_bam slot exist?
# self.sample_data[sample]["unsorted_bam"]
# except KeyError: # If failed...
# pass
# else: #Delete unsorted bams
# self.script +="rm -f %s \n\n" % self.sample_data[sample]["unsorted_bam"]
def build_scripts(self):
""" This is the actual script building function
Most, if not all, editing should be done here
HOWEVER, DON'T FORGET TO CHANGE THE CLASS NAME AND THE FILENAME!
"""
for sample in self.sample_data["samples"]: # Getting list of samples out of samples_hash
# Make a dir for the current sample:
sample_dir = self.make_folder_for_sample(sample)
# Name of specific script:
self.spec_script_name = self.set_spec_script_name(sample)
self.script = "("
# This line should be left before every new script. It sees to local issues.
# Use the dir it returns as the base_dir for this step.
use_dir = self.local_start(sample_dir)
# Define location and prefix for output files:
output_prefix = sample + "_RSEM"
# Get constant part of script:
self.script += self.get_script_const()
# Adding the mapper type and script location
if self.params["mapper_path"]!='None':
self.script +="--%s --%%s-path %%%%s \\\n\t" % self.params["mapper"] \
% self.params["mapper"] \
% self.params["mapper_path"]
else:
self.script +="--%s \\\n\t" % self.params["mapper"]
#Check if to use bam or fastq files
if "--bam" not in list(self.params["redir_params"].keys()):
#if fastq check if it is a paired-end
if len({"fastq.F", "fastq.R"} & set(self.sample_data[sample].keys()))==2:
self.script +="--paired-end \\\n\t"
#Add the fastq files
for i in list(self.sample_data[sample].keys()):
if i in ["fastq.F", "fastq.R", "fastq.S"]:
self.script +="%s \\\n\t" % self.sample_data[sample][i]
#self.script +=" \\\n\t"
#Append the new bam file location to the bam slot
if "--output-genome-bam" in list(self.params["redir_params"].keys()):
#if the --output-genome-bam option is present use the genome sorted bam
#self.sample_data[sample]["bam"]=os.sep.join([sample_dir.rstrip(os.sep),sample+".genome.sorted.bam"])
self.sample_data[sample]["bam"]=os.sep.join([sample_dir.rstrip(os.sep),sample+".genome.bam"])
#remember the unsorted bam as well
#self.sample_data[sample]["unsorted_bam"]=os.sep.join([sample_dir.rstrip(os.sep),sample+".genome.bam"])
else:
# the default is the transcript sorted bam
#self.sample_data[sample]["bam"]=os.sep.join([sample_dir.rstrip(os.sep),sample+".transcript.sorted.bam"])
self.sample_data[sample]["bam"]=os.sep.join([sample_dir.rstrip(os.sep),sample+".transcript.bam"])
#remember the unsorted bam as well
#self.sample_data[sample]["unsorted_bam"]=os.sep.join([sample_dir.rstrip(os.sep),sample+".transcript.bam"])
else:
#Add the bam file
self.script +="%s \\\n\t" % self.sample_data[sample]["bam"]
#The output information at the end
self.script +="%s \\\n\t%%s \\\n\t " % self.params["reference"] % os.sep.join([use_dir.rstrip(os.sep),sample])
#Generate log file:
self.script += "> %s.out ) >& %%s.log\n\n" % os.sep.join([use_dir.rstrip(os.sep),output_prefix]) \
% os.sep.join([use_dir.rstrip(os.sep),output_prefix])
#Append the location of RSEM results
self.sample_data[sample]["RSEM"]=os.sep.join([sample_dir.rstrip(os.sep),sample])
self.sample_data[sample]["genes.results"]=self.sample_data[sample]['RSEM']+'.genes.results'
self.sample_data[sample]["isoforms.results"]=self.sample_data[sample]['RSEM']+'.isoforms.results'
# Wrapping up function. Leave these lines at the end of every iteration:
self.local_finish(use_dir,sample_dir) # Sees to copying local files to final destination (and other stuff)
self.create_low_level_script()
def set_Sample_data_dir(self,category,info,data):
if category not in list(self.keys()):
self[category] = {}
if info not in list(self[category].keys()):
self[category][info] = {}
self[category][info] = data
|
bioinfo-core-BGU/neatseq-flow_modules
|
neatseq_flow_modules/Liron/RSEM_module/RSEM.py
|
Python
|
gpl-3.0
| 21,867
|
[
"Bowtie"
] |
2242f5006a20a7c2515d7de4389df5bca4ba019d52f620d91602c7556a168d51
|
"""Simple molecular dynamics.
A block of 27 cubic unit cells of Cu is set up, a single atom is given
a significant momentum, and constant energy molecular dynamics is
performed.
"""
from numpy import *
from asap3 import Atoms, EMT, units, PickleTrajectory
from ase.lattice.cubic import FaceCenteredCubic
from asap3.md.verlet import VelocityVerlet
# Create the atoms
atoms = FaceCenteredCubic(size=(3,3,3), symbol="Cu", pbc=False)
# Give the first atom a non-zero momentum
atoms[0].set_momentum(array([0, -11.3, 0]))
# Associate the EMT potential with the atoms
atoms.set_calculator(EMT())
# Now do molecular dynamics, printing kinetic, potential and total
# energy every ten timesteps.
dyn = VelocityVerlet(atoms, 5.0*units.fs)
# Make a trajectory writing output
trajectory = PickleTrajectory("TrajectoryMD-output.traj", "w", atoms)
# Attach it to the dynamics, so it is informed every fifth time a
# timestep is made.
dyn.attach(trajectory, interval=5)
# Now do 1000 timesteps.
dyn.run(1000)
print "The output is in the NetCDF file TrajectoryMD-output.traj"
|
auag92/n2dm
|
Asap-3.8.4/Examples/TrajectoryMD.py
|
Python
|
mit
| 1,070
|
[
"ASE",
"NetCDF"
] |
5ac65b7b131d0adff016770c1c844244ddc14059b95b248e43cb1749847c7c41
|
# Copyright (C) 2010-2019 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest as ut
import espressomd
import numpy as np
import itertools
import collections
from tests_common import check_non_bonded_loop_trace
class RandomPairTest(ut.TestCase):
"""This test creates a system of random particles.
Then the interaction pairs for a certain cutoff
are calculated by brute force in python (pairs_n2),
and compared to the pairs returned by the cell
systems, which should be identical. This check is
repeated for all valid combinations of periodicities.
"""
system = espressomd.System(box_l=3 * [10.])
def setUp(self):
s = self.system
s.time_step = .1
s.cell_system.skin = 0.0
s.min_global_cut = 1.5
n_part = 500
np.random.seed(2)
s.part.add(pos=s.box_l * np.random.random((n_part, 3)))
self.all_pairs = []
dist_func = self.system.distance
for pair in self.system.part.pairs():
if dist_func(pair[0], pair[1]) < 1.5:
self.all_pairs.append((pair[0].id, pair[1].id))
self.all_pairs = set(self.all_pairs)
self.assertGreater(len(self.all_pairs), 0)
def tearDown(self):
self.system.part.clear()
def pairs_n2(self, dist):
# Go through list of all possible pairs for full periodicity
# and skip those that are not within the desired distance
# for the current periodicity
pairs = []
parts = self.system.part
for p in self.all_pairs:
if self.system.distance(parts.by_id(
p[0]), parts.by_id(p[1])) <= dist:
pairs.append(p)
return set(pairs)
def check_duplicates(self, l):
for e in collections.Counter(l).values():
self.assertEqual(e, 1)
def check_pairs(self, n2_pairs):
cs_pairs = self.system.cell_system.get_pairs(1.5)
self.check_duplicates(cs_pairs)
self.assertGreater(len(cs_pairs), 0)
self.assertEqual(n2_pairs ^ set(cs_pairs), set())
def check_dd(self, n2_pairs):
self.system.cell_system.set_regular_decomposition()
self.check_pairs(n2_pairs)
def check_n_squared(self, n2_pairs):
self.system.cell_system.set_n_square()
self.check_pairs(n2_pairs)
def test(self):
periods = [0, 1]
self.system.periodicity = True, True, True
check_non_bonded_loop_trace(self.system)
for periodicity in itertools.product(periods, periods, periods):
self.system.periodicity = periodicity
n2_pairs = self.pairs_n2(1.5)
self.check_dd(n2_pairs)
self.check_n_squared(n2_pairs)
if __name__ == '__main__':
ut.main()
|
espressomd/espresso
|
testsuite/python/random_pairs.py
|
Python
|
gpl-3.0
| 3,429
|
[
"ESPResSo"
] |
251d3e23f13f979acd29095cdad6ba9fa6e4349007e98c5db1f660505d47fcc2
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
from pyscf import gto, scf, mcscf
'''
Symmetry is not immutable
CASSCF solver can have different symmetry to the symmetry used by the
reference Hartree-Fock calculation.
'''
mol = gto.Mole()
mol.build(
atom = 'Cr 0 0 0',
basis = 'cc-pvtz',
spin = 6,
symmetry = True,
)
myhf = scf.RHF(mol)
myhf.irrep_nelec = {'A1g': (5,3), 'E1gx': (1,0), 'E1gy': (1,0),
'E2gx': (1,0), 'E2gy': (1,0)}
myhf.kernel()
myhf.mol.build(0, 0, symmetry='D2h')
mymc = mcscf.CASSCF(myhf, 9, 6)
mymc.kernel()
|
gkc1000/pyscf
|
examples/mcscf/20-change_symmetry.py
|
Python
|
apache-2.0
| 595
|
[
"PySCF"
] |
9c44fe2cdec38ffceb8b5ecf80401f903f2560a576ac80c73de70edcbdc33717
|
"""\
fermi_dirac.py: Utilities for finite temperature Fermi-Dirac occupations.
This program is part of the PyQuante quantum chemistry program suite.
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
import sys
from NumWrap import matrixmultiply,transpose
from math import exp,log
from Constants import Kboltz
from LA2 import mkdens
import logging
def mkdens_fermi(nel,orbe,orbs,e_temp):
"""
mkdens_fermi(nel,orbe,orbs,e_temp)
Create a density matrix from the orbitals, Orbs, and the Fermi-Dirac
occupations, Occs, derived from the orbital energies, Orbe, given the
electron temperature, e_temp.
D = Orbs*Occs(Orbe)Orbs^T
Arguments:
nel Number of electrons in the system
orbe The orbital energies
orbs The orbitals
e_temp The electron temperature
"""
efermi = get_efermi(nel,orbe,e_temp)
occs = get_fermi_occs(efermi,orbe,e_temp)
D = mkdens_occs(orbs,occs)
entropy = get_entropy(occs,e_temp)
return D,entropy
def mkdens_occs(c,occs,**opts):
"Density matrix from a set of occupations (e.g. from FD expression)."
tol = opts.get('tol',1e-5)
verbose = opts.get('verbose',False)
# Determine how many orbs have occupations greater than 0
norb = 0
for fi in occs:
if fi < tol: break
norb += 1
if verbose:
print "mkdens_occs: %d occupied orbitals found" % norb
# Determine how many doubly occupied orbitals we have
nclosed = 0
for i in range(norb):
if abs(1.-occs[i]) > tol: break
nclosed += 1
if verbose:
print "mkdens_occs: %d closed-shell orbitals found" % nclosed
D = mkdens(c,0,nclosed)
for i in range(nclosed,norb):
D = D + occs[i]*matrixmultiply(c[:,i:i+1],transpose(c[:,i:i+1]))
return D
def get_fermi_occ(efermi,en,temp):
kT = Kboltz*temp
x = (en-efermi)/kT
if x < -50.: return 1.
elif x > 50.: return 0
return 1/(1+exp(x))
def get_entropy(occs,temp):
kT = Kboltz*temp
entropy = 0
for fi in occs:
if abs(fi) < 1e-10: break # stop summing when occs get small
if fi > 1e-10:
entropy += kT*fi*log(fi)
if (1-fi) > 1e-10:
entropy += kT*(1.-fi)*log(1.-fi)
return entropy
def get_fermi_occs(efermi,orbe,temp):
occs = []
for en in orbe:
occs.append(get_fermi_occ(efermi,en,temp))
return occs
def get_t0_occs(nel,nbf):
occs = [0]*nbf
nc,no = divmod(nel,2)
for i in range(nc): occs[i] = 1.
for i in range(nc,nc+no): occs[i] = 0.5
return occs
def get_efermi(nel,orbe,temp,**opts):
"Bisection method to get Fermi energy from Fermi-Dirac dist"
tol = opts.get('tol',1e-9)
verbose = opts.get('verbose',True)
elow,ehigh = orbe[0]-100.,orbe[-1]
nlow = 2*sum(get_fermi_occs(elow,orbe,temp))
nhigh = 2*sum(get_fermi_occs(ehigh,orbe,temp))
if nlow > nel:
logging.error("elow incorrect %f -> %f " % (elow,nlow))
raise Exception("elow incorrect %f -> %f " % (elow,nlow))
if nhigh < nel:
logging.error("ehigh incorrect %f -> %f " % (ehigh,nhigh))
raise Exception("ehigh incorrect %f -> %f " % (ehigh,nhigh))
for i in range(100):
efermi = (elow+ehigh)/2
n = 2*sum(get_fermi_occs(efermi,orbe,temp))
if abs(n-nel) < tol:
break
elif n < nel:
elow = efermi
else:
ehigh = efermi
else:
print "get_fd_occs: Too many iterations"
return efermi
|
certik/pyquante
|
PyQuante/fermi_dirac.py
|
Python
|
bsd-3-clause
| 3,668
|
[
"DIRAC"
] |
869c930689734fb0cd11f6a804ecaa5983b212d7e5c4db2ecce63dd63a9019aa
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import json
import frappe.utils
from frappe.utils import cstr, flt, getdate, comma_and
from frappe import _
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.selling_controller import SellingController
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class WarehouseRequired(frappe.ValidationError): pass
class SalesOrder(SellingController):
def validate_mandatory(self):
# validate transaction date v/s delivery date
if self.delivery_date:
if getdate(self.transaction_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Sales Order Date"))
def validate_po(self):
# validate p.o date v/s delivery date
if self.po_date and self.delivery_date and getdate(self.po_date) > getdate(self.delivery_date):
frappe.throw(_("Expected Delivery Date cannot be before Purchase Order Date"))
if self.po_no and self.customer:
so = frappe.db.sql("select name from `tabSales Order` \
where ifnull(po_no, '') = %s and name != %s and docstatus < 2\
and customer = %s", (self.po_no, self.name, self.customer))
if so and so[0][0]:
frappe.msgprint(_("Warning: Sales Order {0} already exists against same Purchase Order number").format(so[0][0]))
def validate_for_items(self):
check_list = []
for d in self.get('items'):
check_list.append(cstr(d.item_code))
if (frappe.db.get_value("Item", d.item_code, "is_stock_item")==1 or
(self.has_product_bundle(d.item_code) and self.product_bundle_has_stock_item(d.item_code))) \
and not d.warehouse:
frappe.throw(_("Delivery warehouse required for stock item {0}").format(d.item_code),
WarehouseRequired)
# used for production plan
d.transaction_date = self.transaction_date
tot_avail_qty = frappe.db.sql("select projected_qty from `tabBin` \
where item_code = %s and warehouse = %s", (d.item_code,d.warehouse))
d.projected_qty = tot_avail_qty and flt(tot_avail_qty[0][0]) or 0
unique_chk_list = set(check_list)
if len(unique_chk_list) != len(check_list):
frappe.msgprint(_("Warning: Same item has been entered multiple times."))
def product_bundle_has_stock_item(self, product_bundle):
"""Returns true if product bundle has stock item"""
ret = len(frappe.db.sql("""select i.name from tabItem i, `tabProduct Bundle Item` pbi
where pbi.parent = %s and pbi.item_code = i.name and i.is_stock_item = 1""", product_bundle))
return ret
def validate_sales_mntc_quotation(self):
for d in self.get('items'):
if d.prevdoc_docname:
res = frappe.db.sql("select name from `tabQuotation` where name=%s and order_type = %s", (d.prevdoc_docname, self.order_type))
if not res:
frappe.msgprint(_("Quotation {0} not of type {1}").format(d.prevdoc_docname, self.order_type))
def validate_order_type(self):
super(SalesOrder, self).validate_order_type()
def validate_delivery_date(self):
if self.order_type == 'Sales' and not self.delivery_date:
frappe.throw(_("Please enter 'Expected Delivery Date'"))
self.validate_sales_mntc_quotation()
def validate_proj_cust(self):
if self.project_name and self.customer_name:
res = frappe.db.sql("""select name from `tabProject` where name = %s
and (customer = %s or ifnull(customer,'')='')""",
(self.project_name, self.customer))
if not res:
frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project_name))
def validate(self):
super(SalesOrder, self).validate()
self.validate_order_type()
self.validate_delivery_date()
self.validate_mandatory()
self.validate_proj_cust()
self.validate_po()
self.validate_uom_is_integer("stock_uom", "qty")
self.validate_for_items()
self.validate_warehouse()
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self,'items')
self.validate_with_previous_doc()
if not self.status:
self.status = "Draft"
from erpnext.controllers.status_updater import validate_status
validate_status(self.status, ["Draft", "Submitted", "Stopped",
"Cancelled"])
if not self.billing_status: self.billing_status = 'Not Billed'
if not self.delivery_status: self.delivery_status = 'Not Delivered'
def validate_warehouse(self):
from erpnext.stock.utils import validate_warehouse_company
warehouses = list(set([d.warehouse for d in
self.get("items") if d.warehouse]))
for w in warehouses:
validate_warehouse_company(w, self.company)
def validate_with_previous_doc(self):
super(SalesOrder, self).validate_with_previous_doc({
"Quotation": {
"ref_dn_field": "prevdoc_docname",
"compare_fields": [["company", "="], ["currency", "="]]
}
})
def update_enquiry_status(self, prevdoc, flag):
enq = frappe.db.sql("select t2.prevdoc_docname from `tabQuotation` t1, `tabQuotation Item` t2 where t2.parent = t1.name and t1.name=%s", prevdoc)
if enq:
frappe.db.sql("update `tabOpportunity` set status = %s where name=%s",(flag,enq[0][0]))
def update_prevdoc_status(self, flag):
for quotation in list(set([d.prevdoc_docname for d in self.get("items")])):
if quotation:
doc = frappe.get_doc("Quotation", quotation)
if doc.docstatus==2:
frappe.throw(_("Quotation {0} is cancelled").format(quotation))
doc.set_status(update=True)
doc.update_opportunity()
def on_submit(self):
super(SalesOrder, self).on_submit()
self.check_credit_limit()
self.update_stock_ledger(update_stock = 1)
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.base_grand_total, self)
self.update_prevdoc_status('submit')
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
# Cannot cancel stopped SO
if self.status == 'Stopped':
frappe.throw(_("Stopped order cannot be cancelled. Unstop to cancel."))
self.check_nextdoc_docstatus()
self.update_stock_ledger(update_stock = -1)
self.update_prevdoc_status('cancel')
frappe.db.set(self, 'status', 'Cancelled')
def check_nextdoc_docstatus(self):
# Checks Delivery Note
submit_dn = frappe.db.sql_list("""select t1.name from `tabDelivery Note` t1,`tabDelivery Note Item` t2
where t1.name = t2.parent and t2.against_sales_order = %s and t1.docstatus = 1""", self.name)
if submit_dn:
frappe.throw(_("Delivery Notes {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_dn)))
# Checks Sales Invoice
submit_rv = frappe.db.sql_list("""select t1.name
from `tabSales Invoice` t1,`tabSales Invoice Item` t2
where t1.name = t2.parent and t2.sales_order = %s and t1.docstatus = 1""",
self.name)
if submit_rv:
frappe.throw(_("Sales Invoice {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_rv)))
#check maintenance schedule
submit_ms = frappe.db.sql_list("""select t1.name from `tabMaintenance Schedule` t1,
`tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""", self.name)
if submit_ms:
frappe.throw(_("Maintenance Schedule {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_ms)))
# check maintenance visit
submit_mv = frappe.db.sql_list("""select t1.name from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname = %s and t1.docstatus = 1""",self.name)
if submit_mv:
frappe.throw(_("Maintenance Visit {0} must be cancelled before cancelling this Sales Order").format(comma_and(submit_mv)))
# check production order
pro_order = frappe.db.sql_list("""select name from `tabProduction Order`
where sales_order = %s and docstatus = 1""", self.name)
if pro_order:
frappe.throw(_("Production Order {0} must be cancelled before cancelling this Sales Order").format(comma_and(pro_order)))
def check_modified_date(self):
mod_db = frappe.db.get_value("Sales Order", self.name, "modified")
date_diff = frappe.db.sql("select TIMEDIFF('%s', '%s')" %
( mod_db, cstr(self.modified)))
if date_diff and date_diff[0][0]:
frappe.throw(_("{0} {1} has been modified. Please refresh.").format(self.doctype, self.name))
def stop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(-1)
frappe.db.set(self, 'status', 'Stopped')
frappe.msgprint(_("{0} {1} status is Stopped").format(self.doctype, self.name))
def unstop_sales_order(self):
self.check_modified_date()
self.update_stock_ledger(1)
frappe.db.set(self, 'status', 'Submitted')
frappe.msgprint(_("{0} {1} status is Unstopped").format(self.doctype, self.name))
def update_stock_ledger(self, update_stock):
from erpnext.stock.utils import update_bin
for d in self.get_item_list():
if frappe.db.get_value("Item", d['item_code'], "is_stock_item")==1:
args = {
"item_code": d['item_code'],
"warehouse": d['reserved_warehouse'],
"reserved_qty": flt(update_stock) * flt(d['reserved_qty']),
"posting_date": self.transaction_date,
"voucher_type": self.doctype,
"voucher_no": self.name,
"is_amended": self.amended_from and 'Yes' or 'No'
}
update_bin(args)
def on_update(self):
pass
def get_list_context(context=None):
from erpnext.controllers.website_list_for_contact import get_list_context
list_context = get_list_context(context)
list_context["title"] = _("My Orders")
return list_context
@frappe.whitelist()
def stop_or_unstop_sales_orders(names, status):
if not frappe.has_permission("Sales Order", "write"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
names = json.loads(names)
for name in names:
so = frappe.get_doc("Sales Order", name)
if so.docstatus == 1:
if status=="Stop":
if so.status not in ("Stopped", "Cancelled") and (so.per_delivered < 100 or so.per_billed < 100):
so.stop_sales_order()
else:
if so.status == "Stopped":
so.unstop_sales_order()
frappe.local.message_log = []
def before_recurring(self):
super(SalesOrder, self).before_recurring()
for field in ("delivery_status", "per_delivered", "billing_status", "per_billed"):
self.set(field, None)
for d in self.get("items"):
for field in ("delivered_qty", "billed_amt", "planned_qty", "prevdoc_docname"):
d.set(field, None)
@frappe.whitelist()
def make_material_request(source_name, target_doc=None):
def postprocess(source, doc):
doc.material_request_type = "Purchase"
so = frappe.get_doc("Sales Order", source_name)
item_table = "Packed Item" if so.packed_items else "Sales Order Item"
doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Material Request",
"validation": {
"docstatus": ["=", 1]
}
},
item_table: {
"doctype": "Material Request Item",
"field_map": {
"parent": "sales_order_no",
"stock_uom": "uom"
}
}
}, target_doc, postprocess)
return doc
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
if source.po_no:
if target.po_no:
target_po_no = target.po_no.split(", ")
target_po_no.append(source.po_no)
target.po_no = ", ".join(list(set(target_po_no))) if len(target_po_no) > 1 else target_po_no[0]
else:
target.po_no = source.po_no
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.base_amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.base_rate)
target.amount = (flt(source.qty) - flt(source.delivered_qty)) * flt(source.rate)
target.qty = flt(source.qty) - flt(source.delivered_qty)
target_doc = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Delivery Note Item",
"field_map": {
"rate": "rate",
"name": "so_detail",
"parent": "against_sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.delivered_qty < doc.qty
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return target_doc
@frappe.whitelist()
def make_sales_invoice(source_name, target_doc=None):
def postprocess(source, target):
set_missing_values(source, target)
#Get the advance paid Journal Entries in Sales Invoice Advance
target.get_advances()
def set_missing_values(source, target):
target.is_pos = 0
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source, target, source_parent):
target.amount = flt(source.amount) - flt(source.billed_amt)
target.base_amount = target.amount * flt(source_parent.conversion_rate)
target.qty = target.amount / flt(source.rate) if (source.rate and source.billed_amt) else source.qty
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Sales Invoice",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Sales Invoice Item",
"field_map": {
"name": "so_detail",
"parent": "sales_order",
},
"postprocess": update_item,
"condition": lambda doc: doc.base_amount==0 or doc.billed_amt < doc.amount
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"add_if_empty": True
}
}, target_doc, postprocess)
return doclist
@frappe.whitelist()
def make_maintenance_schedule(source_name, target_doc=None):
maint_schedule = frappe.db.sql("""select t1.name
from `tabMaintenance Schedule` t1, `tabMaintenance Schedule Item` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s and t1.docstatus=1""", source_name)
if not maint_schedule:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Schedule",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Schedule Item",
"field_map": {
"parent": "prevdoc_docname"
},
"add_if_empty": True
}
}, target_doc)
return doclist
@frappe.whitelist()
def make_maintenance_visit(source_name, target_doc=None):
visit = frappe.db.sql("""select t1.name
from `tabMaintenance Visit` t1, `tabMaintenance Visit Purpose` t2
where t2.parent=t1.name and t2.prevdoc_docname=%s
and t1.docstatus=1 and t1.completion_status='Fully Completed'""", source_name)
if not visit:
doclist = get_mapped_doc("Sales Order", source_name, {
"Sales Order": {
"doctype": "Maintenance Visit",
"field_map": {
"name": "sales_order_no"
},
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Order Item": {
"doctype": "Maintenance Visit Purpose",
"field_map": {
"parent": "prevdoc_docname",
"parenttype": "prevdoc_doctype"
},
"add_if_empty": True
}
}, target_doc)
return doclist
|
Tejal011089/trufil-erpnext
|
erpnext/selling/doctype/sales_order/sales_order.py
|
Python
|
agpl-3.0
| 15,380
|
[
"VisIt"
] |
0ae3c34894dcac442f8eca005441e70ee1c772ae0298e7e88c0b14489135f55e
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'resources/qosm_settings_dialog.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_qosmDialogSettings(object):
def setupUi(self, qosmDialogSettings):
qosmDialogSettings.setObjectName(_fromUtf8("qosmDialogSettings"))
qosmDialogSettings.resize(605, 377)
qosmDialogSettings.setModal(True)
self.verticalLayout_4 = QtGui.QVBoxLayout(qosmDialogSettings)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.tabWidget = QtGui.QTabWidget(qosmDialogSettings)
self.tabWidget.setObjectName(_fromUtf8("tabWidget"))
self.tabGeneral = QtGui.QWidget()
self.tabGeneral.setObjectName(_fromUtf8("tabGeneral"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.tabGeneral)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.label_5 = QtGui.QLabel(self.tabGeneral)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.horizontalLayout_4.addWidget(self.label_5)
self.generalCacheSize = QtGui.QLabel(self.tabGeneral)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.generalCacheSize.sizePolicy().hasHeightForWidth())
self.generalCacheSize.setSizePolicy(sizePolicy)
self.generalCacheSize.setObjectName(_fromUtf8("generalCacheSize"))
self.horizontalLayout_4.addWidget(self.generalCacheSize)
self.generalClearCache = QtGui.QPushButton(self.tabGeneral)
self.generalClearCache.setObjectName(_fromUtf8("generalClearCache"))
self.horizontalLayout_4.addWidget(self.generalClearCache)
self.verticalLayout_5.addLayout(self.horizontalLayout_4)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label_7 = QtGui.QLabel(self.tabGeneral)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.horizontalLayout.addWidget(self.label_7)
self.generalCacheLocation = QtGui.QLineEdit(self.tabGeneral)
self.generalCacheLocation.setObjectName(_fromUtf8("generalCacheLocation"))
self.horizontalLayout.addWidget(self.generalCacheLocation)
self.generalBrowse = QtGui.QPushButton(self.tabGeneral)
self.generalBrowse.setObjectName(_fromUtf8("generalBrowse"))
self.horizontalLayout.addWidget(self.generalBrowse)
self.verticalLayout_5.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.label = QtGui.QLabel(self.tabGeneral)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label.sizePolicy().hasHeightForWidth())
self.label.setSizePolicy(sizePolicy)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout_2.addWidget(self.label)
self.generalMaxTiles = QtGui.QSpinBox(self.tabGeneral)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.generalMaxTiles.sizePolicy().hasHeightForWidth())
self.generalMaxTiles.setSizePolicy(sizePolicy)
self.generalMaxTiles.setMinimumSize(QtCore.QSize(75, 0))
self.generalMaxTiles.setMaximum(9999)
self.generalMaxTiles.setObjectName(_fromUtf8("generalMaxTiles"))
self.horizontalLayout_2.addWidget(self.generalMaxTiles)
self.verticalLayout_5.addLayout(self.horizontalLayout_2)
self.autoDownload = QtGui.QCheckBox(self.tabGeneral)
self.autoDownload.setChecked(True)
self.autoDownload.setObjectName(_fromUtf8("autoDownload"))
self.verticalLayout_5.addWidget(self.autoDownload)
spacerItem = QtGui.QSpacerItem(20, 119, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_5.addItem(spacerItem)
self.tabWidget.addTab(self.tabGeneral, _fromUtf8(""))
self.tabTiletypes = QtGui.QWidget()
self.tabTiletypes.setObjectName(_fromUtf8("tabTiletypes"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.tabTiletypes)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.tiletypesList = QtGui.QListWidget(self.tabTiletypes)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.tiletypesList.sizePolicy().hasHeightForWidth())
self.tiletypesList.setSizePolicy(sizePolicy)
self.tiletypesList.setMaximumSize(QtCore.QSize(16777215, 150))
self.tiletypesList.setObjectName(_fromUtf8("tiletypesList"))
self.verticalLayout_2.addWidget(self.tiletypesList)
self.horizontalLayout_3 = QtGui.QHBoxLayout()
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.label_3 = QtGui.QLabel(self.tabTiletypes)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout_3.addWidget(self.label_3)
self.tiletypesPattern = QtGui.QLineEdit(self.tabTiletypes)
self.tiletypesPattern.setObjectName(_fromUtf8("tiletypesPattern"))
self.horizontalLayout_3.addWidget(self.tiletypesPattern)
self.tiletypeChange = QtGui.QPushButton(self.tabTiletypes)
self.tiletypeChange.setEnabled(False)
self.tiletypeChange.setObjectName(_fromUtf8("tiletypeChange"))
self.horizontalLayout_3.addWidget(self.tiletypeChange)
self.tiletypeRemove = QtGui.QPushButton(self.tabTiletypes)
self.tiletypeRemove.setEnabled(False)
self.tiletypeRemove.setObjectName(_fromUtf8("tiletypeRemove"))
self.horizontalLayout_3.addWidget(self.tiletypeRemove)
self.verticalLayout_2.addLayout(self.horizontalLayout_3)
self.groupBox = QtGui.QGroupBox(self.tabTiletypes)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.label_4 = QtGui.QLabel(self.groupBox)
self.label_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignTop)
self.label_4.setWordWrap(True)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.verticalLayout_3.addWidget(self.label_4)
self.verticalLayout_2.addWidget(self.groupBox)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_2.addItem(spacerItem1)
self.tabWidget.addTab(self.tabTiletypes, _fromUtf8(""))
self.tabLogs = QtGui.QWidget()
self.tabLogs.setObjectName(_fromUtf8("tabLogs"))
self.verticalLayout = QtGui.QVBoxLayout(self.tabLogs)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.logWhichLog = QtGui.QComboBox(self.tabLogs)
self.logWhichLog.setObjectName(_fromUtf8("logWhichLog"))
self.logWhichLog.addItem(_fromUtf8(""))
self.logWhichLog.addItem(_fromUtf8(""))
self.verticalLayout.addWidget(self.logWhichLog)
self.logText = QtGui.QTextBrowser(self.tabLogs)
self.logText.setObjectName(_fromUtf8("logText"))
self.verticalLayout.addWidget(self.logText)
self.tabWidget.addTab(self.tabLogs, _fromUtf8(""))
self.verticalLayout_4.addWidget(self.tabWidget)
self.button_box = QtGui.QDialogButtonBox(qosmDialogSettings)
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(QtGui.QDialogButtonBox.Apply|QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok|QtGui.QDialogButtonBox.Reset|QtGui.QDialogButtonBox.RestoreDefaults)
self.button_box.setObjectName(_fromUtf8("button_box"))
self.verticalLayout_4.addWidget(self.button_box)
self.retranslateUi(qosmDialogSettings)
self.tabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.button_box, QtCore.SIGNAL(_fromUtf8("accepted()")), qosmDialogSettings.accept)
QtCore.QObject.connect(self.button_box, QtCore.SIGNAL(_fromUtf8("rejected()")), qosmDialogSettings.reject)
QtCore.QMetaObject.connectSlotsByName(qosmDialogSettings)
def retranslateUi(self, qosmDialogSettings):
qosmDialogSettings.setWindowTitle(_translate("qosmDialogSettings", "QOSM Settings", None))
self.label_5.setText(_translate("qosmDialogSettings", "Tile cache:", None))
self.generalCacheSize.setText(_translate("qosmDialogSettings", "calculating...", None))
self.generalClearCache.setText(_translate("qosmDialogSettings", "Clear Cache", None))
self.label_7.setText(_translate("qosmDialogSettings", "Cache location", None))
self.generalBrowse.setText(_translate("qosmDialogSettings", "Browse...", None))
self.label.setText(_translate("qosmDialogSettings", "Max tiles to render at once", None))
self.autoDownload.setText(_translate("qosmDialogSettings", "Automatically download new tiles", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabGeneral), _translate("qosmDialogSettings", "General", None))
self.label_3.setText(_translate("qosmDialogSettings", "URL Pattern", None))
self.tiletypeChange.setText(_translate("qosmDialogSettings", "Change", None))
self.tiletypeRemove.setText(_translate("qosmDialogSettings", "Remove", None))
self.groupBox.setTitle(_translate("qosmDialogSettings", "More info", None))
self.label_4.setText(_translate("qosmDialogSettings", "Custom tile types can be added using the layer properties dialog. For a list of commonly used tile types, visit http://wiki.openstreetmap.org/wiki/Tile_servers", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabTiletypes), _translate("qosmDialogSettings", "Custom Tile Types", None))
self.logWhichLog.setItemText(0, _translate("qosmDialogSettings", "Current session", None))
self.logWhichLog.setItemText(1, _translate("qosmDialogSettings", "Previous session", None))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tabLogs), _translate("qosmDialogSettings", "Logs", None))
|
paleolimbot/qosm
|
qosmpy/ui_qosm_settings_dialog.py
|
Python
|
gpl-2.0
| 11,680
|
[
"VisIt"
] |
1c8fa2ccce0838ee8b1e8600670e6e957d5fd2a86cf62e999abea0db511d709c
|
from __future__ import absolute_import
from builtins import map
from builtins import str
from . import luqum
from .luqum.parser import parser
from .luqum.utils import LuceneTreeTransformer
from .NED import get_ned_data
from .SIMBAD import get_simbad_data
from .client import client
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.coordinates import Angle
from flask import current_app, request
class IncorrectPositionFormatError(Exception):
pass
class ObjectQueryExtractor(LuceneTreeTransformer):
def visit_search_field(self, node, parents):
if isinstance(node, luqum.tree.SearchField):
if node.name != 'object':
return node
else:
self.revisit = False
self.object_nodes.append(str(node))
if isinstance(node.expr, luqum.tree.FieldGroup) or isinstance(node.expr, luqum.tree.Group):
# We need the following if statement for the case object:("M 81")
if isinstance(node.expr.expr, luqum.tree.Phrase) or isinstance(node.expr.expr, luqum.tree.Word):
self.object_names.append(node.expr.expr.value.replace('"','').strip())
# otherwise it is object:("M 81" OR M1)
else:
for o in node.expr.expr.operands:
if isinstance(o, luqum.tree.Phrase) or isinstance(o, luqum.tree.Word):
self.object_names.append(o.value.replace('"','').strip())
else:
self.revisit = True
self.visit_search_field(o, parents)
elif isinstance(node.expr, luqum.tree.Word):
self.object_names.append(node.expr.value)
elif isinstance(node.expr, luqum.tree.Phrase):
self.object_names.append(node.expr.value.replace('"','').strip())
else:
# This section is to capture contents for recursive calls
if isinstance(node.expr, luqum.tree.Phrase) or isinstance(node.expr, luqum.tree.Word):
self.object_names.append(node.expr.expr.value.replace('"','').strip())
# otherwise it is object:("M 81" OR M1)
else:
for o in node.expr.operands:
if isinstance(o, luqum.tree.Phrase) or isinstance(o, luqum.tree.Word):
self.object_names.append(o.value.replace('"','').strip())
else:
self.revisit = True
self.visit_search_field(o, parents)
return node
def isBalanced(s):
"""
Checks if a string has balanced parentheses. This method can be easily extended to
include braces, curly brackets etc by adding the opening/closing equivalents
in the obvious places.
"""
expr = ''.join([x for x in s if x in '()'])
if len(expr)%2!=0:
return False
opening=set('(')
match=set([ ('(',')') ])
stack=[]
for char in expr:
if char in opening:
stack.append(char)
else:
if len(stack)==0:
return False
lastOpen=stack.pop()
# This part only becomes relevant if other entities (like braces) are allowed
# if (lastOpen, char) not in match:
# return False
return len(stack)==0
def parse_query_string(query_string):
# We only accept Solr queries with balanced parentheses
balanced = isBalanced(query_string)
if not balanced:
current_app.logger.error('Unbalanced parentheses found in Solr query: %s'%query_string)
return [], []
# The query string is valid from the parenthese point-of-view
# First create the query tree
try:
query_tree = parser.parse(query_string)
except Exception as err:
current_app.logger.error('Parsing query string blew up: %s'%str(err))
return [], []
# Instantiate the object that will be used to traverse the tree
# and extract the nodes associated with object: query modifiers
extractor = ObjectQueryExtractor()
# Running the extractor will populate two lists
# initialize the extractor
extractor.object_nodes = []
extractor.object_names = []
# run the extractor
extractions = extractor.visit(query_tree)
return [on for on in extractor.object_names if on.strip()], extractor.object_nodes
def get_object_data(identifiers, service):
if service == 'simbad':
object_data = get_simbad_data(identifiers, 'objects')
elif service == 'ned':
object_data = get_ned_data(identifiers, 'objects')
else:
object_data = {'Error':'Unable to get object data',
'Error Info':'Do not have method to get object data for this service: {0}'.format(service)}
return object_data
def get_object_translations(onames, trgts):
# initialize with empty map
idmap = {}
for trgt in trgts:
idmap[trgt] = {}
for oname in onames:
idmap[trgt][oname] = "0"
# now get the object translations for the targets specified
for trgt in trgts:
for oname in onames:
result = get_object_data([oname], trgt)
if 'Error' in result or 'data' not in result:
# An error was returned!
current_app.logger.error('Failed to find data for {0} object {1}!: {2}'.format(trgt.upper(), oname, result.get('Error Info','NA')))
continue
try:
# We need to have a 'try' here in case a service returns an empty 'data' attribute
idmap[trgt][oname] =[e.get('id',0) for e in result['data'].values()][0]
except:
continue
return idmap
def translate_query(solr_query, oqueries, trgts, onames, translations):
# The goal is to translate the original Solr query with the embedded
# "object:" queries into a Solr query with actual Solr fields
# (nedid:, simbid:) and to include an "=abs:" query to simulate the
# "ADS Objects" search from ADS Classic. The following will be the general patterns
# a. single object name:
# object:Andromeda --> (simbid:translations['simbid'].get("Andromeda","0") OR nedid:translations['nedid'].get("Andromeda","0") OR =abs:"Andromeda") database:astronomy
# b. object name as phrase
# object:"Large Magellanic Cloud" --> same idea as under a.
# c. object query as expression
# object:(Boolean expression) like object:(("51 Peg b" OR 16CygB) AND Osiris) -->
# (simbid:(boolean expression of simbid translations) OR nedid:(boolean expression of nedid translations) OR =abs:(original boolean)) database:astronomy
# The approach is then
# For each of the N object query components O_i (i=1,...,N) parsed out of the original Solr query S, create their translated equivalent
# T_i (i=1,...,N) and do a replacement S.replace(O_i, T_i)
for oquery in oqueries:
query_components = [oquery.replace('object:','=abs:')]
simbad_query = oquery.replace('object:','simbid:')
ned_query = oquery.replace('object:','nedid:')
for oname in onames:
if solr_query.find(oquery) == -1:
continue
simbad_query = simbad_query.replace(oname, translations['simbad'].get(oname,"0"))
ned_query = ned_query.replace(oname, translations['ned'].get(oname,"0"))
if "simbad" in trgts:
query_components.append(simbad_query)
if "ned" in trgts:
query_components.append(ned_query)
translated_query = "(({0}) database:astronomy)".format(" OR ".join(query_components))
solr_query = solr_query.replace(oquery, "(({0}) database:astronomy)".format(" OR ".join(query_components)))
return solr_query
def is_number(n):
try:
float(n) # Type-casting the string to `float`.
# If string is not a valid `float`,
# it'll raise `ValueError` exception
except ValueError:
return False
return True
def parse_position_string(pstring):
# In the case of a cone search, we will have received a query of the form
# object:"<position>(:<radius>)"
# (where the search radius is optional)
search_radius = ''
if pstring.find(':') > -1:
position, radius = pstring.split(':')
radius = radius.rstrip().replace("''",'"')
else:
position = pstring.strip()
radius = ''
if is_number(radius):
# A single integer or float was specified: unit is "degrees"
search_radius = Angle('{0} degrees'.format(radius.strip()))
elif radius.endswith("'") or radius.endswith('"'):
# The radius ends with a single or double quote: arcsec or arcmin
search_radius = Angle(radius)
elif radius.count(' ') in [1,2]:
# Sexagesimal format is assumed: (degree, arcmin, arcsec)
try:
c = tuple(map(int, radius.split()))
search_radius = Angle(c, unit=u.deg)
except:
pass
else:
search_radius = ''
# Check if we have a search radius
if not search_radius:
# If not, take the default value
search_radius = Angle('{0} degrees'.format(current_app.config.get('OBJECTS_DEFAULT_RADIUS')))
# Now try to parse the position string using astropy
coords = None
if position.count(" ") == 5:
# We have a position: 05 23 34.6 -69 45 22
coords = SkyCoord(position, unit=(u.hourangle, u.deg))
elif position.count(" ") == 1:
ra, dec = position.split()
if is_number(ra) and is_number(dec):
# We have a position: 80.894167 -69.756111
coords = SkyCoord(float(ra), float(dec), frame='icrs', unit='deg')
else:
# Assume that we have: 05h23m34.6s -69d45m22s
try:
coords = SkyCoord(ra, dec, frame='icrs')
except:
pass
# If we don't have a valid position by now, raise an exception
if not coords:
raise IncorrectPositionFormatError
return coords, search_radius
def verify_query(identifiers, field):
# Safeguard for guarantee that SIMBAD and NED identifiers found are
# indeed in Solr index
query = '{0}:({1})'.format(field, " OR ".join(identifiers))
params = {'wt': 'json', 'q': query, 'fl': 'id',
'rows': 10}
response = current_app.client.get(current_app.config['OBJECTS_SOLRQUERY_URL'], params=query)
if response.status_code != 200:
return {"Error": "Unable to get results!",
"Error Info": "Solr response: %s" % str(response.text),
"Status Code": response.status_code}
resp = response.json()
try:
docs = resp['response']['docs']
except:
docs = []
# return True (we found docs) or False (no docs found)
return len(docs) > 0
|
adsabs/object_service
|
object_service/utils.py
|
Python
|
mit
| 10,867
|
[
"VisIt"
] |
e6e4876103ddb8ad48b31ade55bda14b2e3116483c701fb9359b8a5925a88bcb
|
BOARD_SIZE = 3
MIN_WORD_LENGTH = 3
test_board_size4 = [('b', 'a', 'r', 'i'),
('e', 'c', 'n', 'u'),
('l', 'a', 'o', 'm'),
('i', 'v', 'p', 's')]
test_board_size3 = [('c', 'a', 't'),
('o', 'p', 'c'),
('r', 'e', 'h')]
def is_acceptable(word):
"""
Check whether a word is acceptable for Boggle. House rules: no names!
"""
def is_lowercase(w):
return 97 <= ord(w[0]) <= 122
def is_possessive(w):
return word.strip()[-2:] == "'s"
def is_long_enough(w):
return len(w) > 2
return is_lowercase(word) and (not is_possessive(word)) and is_long_enough(word)
# Um... let's call this a testing suite.
assert is_acceptable("marketplace") and not is_acceptable("zygote's") and not is_acceptable("Albert")
def filter_acceptable(list_of_words):
return [word.strip() for word in list_of_words if is_acceptable(word)]
def get_neighbours(v):
row = v[0]
column = v[1]
return [(r, c) for r in [row - 1, row, row + 1] for c in [column - 1, column, column + 1]
if (not (r == row and c == column)) and (1 <= r <= BOARD_SIZE and 1 <= c <= BOARD_SIZE)]
assert get_neighbours((2, 2)) == [(1, 1), (1, 2), (1, 3), (2, 1), (2, 3), (3, 1), (3, 2), (3, 3)]
assert get_neighbours((1, 1)) == [(1, 2), (2, 1), (2, 2)]
def find_words_naive(graph, vertex):
"""
graph - a square list of tuples,
vertex - a tuple representing the starting cell
This naive solution simply executes a depth-first search kind of a traversal of the field, generating
all the sequence of characters originating at the given vertex.
"""
results = []
def visit(node, visited, word_so_far):
visited.append(node)
word_so_far += graph[node[0] - 1][node[1] - 1]
if len(word_so_far) >= MIN_WORD_LENGTH:
results.append(word_so_far)
neighbours = get_neighbours(node)
for next_coordinates in neighbours:
next_node = (next_coordinates[0], next_coordinates[1])
if not next_node in visited:
visit(next_node, list(visited), word_so_far)
visit(vertex, [vertex], "")
return results
# ********** DO NO RUN THE EXAMPLE BELOW! Naive solver function will take forever!
# BOARD_SIZE = 4
#
# raw_result = []
#
# start_time = time.time()
#
# for start in [(r, c) for r in range(1, 4) for c in range(1, 4)]:
# raw_result = raw_result + find_words_naive(test_board_size4, start)
#
# result = []
#
# for word in raw_result:
# if word in dictionary_list:
# result.append(word)
# result = list(set(result))
#
# print("size 4 boggle solver without pruning took %d seconds, found %d words" % (time.time() - start_time, len(result)))
# OK that works, but it is slow as hell, because we keep looking for all possible paths, even when it's obviously not
# leading to any real word! We need to add pruning of hopeless paths, and we can do it if we convert our dictionary into
# a TRIE using build_trie below, and prune when performing DFS by using is_prefix_in_trie
def build_trie(words):
"""
Create a trie out of a flat list of words. Trie will be a dict of dicts (of dicts, etc).
Example:
["foo","foobar","bar","baz"] =>
{f:
{o:
{o: { None: None},
b:
{a:
{r: {None: None }}}}},
b:
{a:
{r: {None: None},
z:{None: None}}}}
"""
# we'll have 26 choices on the first level
root_dict = {}
for word in words:
current_dict = root_dict
# start adding letters to the dict on the corresponding level, beginning with root
for letter in word:
current_dict = current_dict.setdefault(letter, {})
# a proper word is now represented with keys in nested dicts -- indicate that
current_dict.setdefault(None)
return root_dict
def is_prefix_in_trie(word, trie, strict=False):
"""
Need to be able to prune string which are not valid prefixes of real words (and 99.99999% of all the paths
generated by the solver will be of that garbage kind. strict=False is for this use case.
string=True is for checking that the word is actually a real word (note that it may also be a valid prefix)
"""
idx = 0
done = False
current_dict = trie
while not done:
letter = word[idx]
if not letter in current_dict:
return False
# continue with the rest of the letters
current_dict = current_dict[letter]
idx += 1
if idx >= len(word): # reached the end of the word
if not strict: # just checking if it's a valid prefix (the main use case)
return len(current_dict) >= 1
if strict: # checking if this is an actual word -- recognized by None present in the keys on this level.
return None in current_dict
test_trie = build_trie(["bar", "baz", "bars"])
assert not is_prefix_in_trie("foobar", test_trie)
assert not is_prefix_in_trie("bara", test_trie)
assert not is_prefix_in_trie("baza", test_trie)
assert is_prefix_in_trie("b", test_trie)
assert is_prefix_in_trie("ba", test_trie)
assert is_prefix_in_trie("bar", test_trie)
assert is_prefix_in_trie("bars", test_trie)
assert is_prefix_in_trie("baz", test_trie)
def find_words_with_pruning(graph, vertex, trie):
"""
graph - a square list of tuples,
vertex - a tuple representing the starting cell
"""
results = []
def visit(node, visited, word_so_far):
"""
This inner function does all the real job. Just like the simple version above, but the
kicker is the pruning logic, which uses the previously constructed trie.
"""
visited.append(node)
word_so_far += graph[node[0] - 1][node[1] - 1]
if not is_prefix_in_trie(word_so_far, trie): # prune!
return
if len(word_so_far) >= MIN_WORD_LENGTH and is_prefix_in_trie(word_so_far, trie, True):
results.append(word_so_far)
neighbours = get_neighbours(node)
for next_coordinates in neighbours:
next_node = (next_coordinates[0], next_coordinates[1])
if not next_node in visited:
visit(next_node, list(visited), word_so_far)
visit(vertex, [], "")
return results
## utility functions
def load_dictionary():
return filter_acceptable([line for line in open('/usr/share/dict/words', 'r')])
def solve_boggle(board):
result = []
BOARD_SIZE = 4
for start in [(r, c) for r in range(1, BOARD_SIZE + 1) for c in range(1, BOARD_SIZE + 1)]:
result = result + find_words_with_pruning(board, start, build_trie(load_dictionary()))
return list(set(result)) # remove duplicates
if __name__ == '__main__':
# let's play some Boggle!
import time
# borrow Unix built-in dictionary
dictionary_list = load_dictionary()
print("prepared a dictionary of %d English words" % (len(dictionary_list)))
raw_result = []
start_time = time.time()
for start in [(r, c) for r in range(1, BOARD_SIZE + 1) for c in range(1, BOARD_SIZE + 1)]:
raw_result = raw_result + find_words_naive(test_board_size3, start)
result = []
for word in raw_result:
if word in dictionary_list:
result.append(word)
result = list(set(result))
print("size 3 boggle solver without pruning took %d seconds, found %d words" % (time.time() - start_time, len(result)))
print(result)
print("Not playing 4X4 boggle with naive solution, because it'll take forever")
# now building the big trie
big_trie = build_trie(dictionary_list)
BOARD_SIZE = 3
start_time = time.time()
result = []
for start in [(r, c) for r in range(1, BOARD_SIZE + 1) for c in range(1, BOARD_SIZE + 1)]:
result = result + find_words_with_pruning(test_board_size3, start, big_trie)
result = list(set(result))
print("size 3 boggle solver with pruning took %d seconds, found %d words" % (time.time() - start_time, len(result)))
print(result)
start_time = time.time()
result = []
BOARD_SIZE = 4
for start in [(r, c) for r in range(1, BOARD_SIZE + 1) for c in range(1, BOARD_SIZE + 1)]:
result = result + find_words_with_pruning(test_board_size4, start, big_trie)
result = list(set(result))
print("size 4 boggle solver with pruning took %d seconds, found %d words" % (time.time() - start_time, len(result)))
print(result)
|
alexakarpov/boggler
|
boggler.py
|
Python
|
mit
| 8,535
|
[
"VisIt"
] |
2ae69f7d659106afb118a1d76230cbea58d468d79537b7bf9e26a4e1e74d8b8d
|
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Transitions between Scenes'''
__docformat__ = 'restructuredtext'
import pyglet
from pyglet.gl import *
from cocos.actions import *
import cocos.scene as scene
from cocos.director import director
from cocos.layer import ColorLayer
from cocos.sprite import Sprite
__all__ = [ 'TransitionScene',
'RotoZoomTransition','JumpZoomTransition',
'MoveInLTransition','MoveInRTransition',
'MoveInBTransition','MoveInTTransition',
'SlideInLTransition','SlideInRTransition',
'SlideInBTransition','SlideInTTransition',
'FlipX3DTransition', 'FlipY3DTransition','FlipAngular3DTransition',
'ShuffleTransition',
'TurnOffTilesTransition',
'FadeTRTransition', 'FadeBLTransition',
'FadeUpTransition', 'FadeDownTransition',
'ShrinkGrowTransition',
'CornerMoveTransition',
'EnvelopeTransition',
'SplitRowsTransition', 'SplitColsTransition',
'FadeTransition',
'ZoomTransition',
]
class TransitionScene(scene.Scene):
"""TransitionScene
A Scene that takes two scenes and makes a transition between them.
The input scenes are put into envelopes (Scenes) that are made childs to
the transition scene.
Proper transitions are allowed to modify any parameter for the envelopes,
but must not modify directly the input scenes; that would corrupt the input
scenes in the general case.
"""
def __init__(self, dst, duration=1.25, src=None):
'''Initializes the transition
:Parameters:
`dst` : Scene
Incoming scene, the one that remains visible when the transition ends.
`duration` : float
Duration of the transition in seconds. Default: 1.25
`src` : Scene
Outgoing scene. Default: current scene
'''
super(TransitionScene, self).__init__()
if src == None:
src = director.scene
# if the director is already running a transition scene then terminate
# it so we may move on
if isinstance(src, TransitionScene):
tmp = src.in_scene.get('dst')
src.finish()
src = tmp
if src is dst:
raise Exception("Incoming scene must be different from outgoing scene")
envelope = scene.Scene()
envelope.add(dst, name='dst')
self.in_scene = envelope #: envelope with scene that will replace the old one
envelope = scene.Scene()
envelope.add(src, name='src')
self.out_scene = envelope #: envelope with scene that will be replaced
self.duration = duration #: duration in seconds of the transition
if not self.duration:
self.duration = 1.25
self.start()
def start(self):
'''Adds the incoming scene with z=1 and the outgoing scene with z=0'''
self.add( self.in_scene, z=1 )
self.add( self.out_scene, z=0 )
def finish(self):
'''Called when the time is over.
Envelopes are discarded and the dst scene will be the one runned by director.
'''
# devs:
# try to not override this method
# if you should, try to remain compatible with the recipe TransitionsWithPop
# if you can't, add in the docstring for your class that is not usable
# for that recipe, and bonus points if you add to the recipe that
# your class is not elegible for pop transitions
dst = self.in_scene.get('dst')
src = self.out_scene.get('src')
director.replace( dst )
def hide_out_show_in( self ):
'''Hides the outgoing scene and shows the incoming scene'''
self.in_scene.visible = True
self.out_scene.visible = False
def hide_all( self ):
'''Hides both the incoming and outgoing scenes'''
self.in_scene.visible = False
self.out_scene.visible = False
def visit(self):
# preserve modelview matrix
glPushMatrix()
super(TransitionScene, self).visit()
glPopMatrix()
class RotoZoomTransition(TransitionScene):
'''Rotate and zoom out the outgoing scene, and then rotate and zoom in the incoming
'''
def __init__( self, *args, **kwargs ):
super(RotoZoomTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
self.in_scene.scale = 0.001
self.out_scene.scale = 1.0
self.in_scene.transform_anchor = (width // 2, height //2 )
self.out_scene.transform_anchor = (width // 2, height //2 )
rotozoom = ( ScaleBy(0.001, duration=self.duration/2.0 ) | \
Rotate(360 * 2, duration=self.duration/2.0 ) ) + \
Delay( self.duration / 2.0 )
self.out_scene.do( rotozoom )
self.in_scene.do( Reverse(rotozoom) + CallFunc(self.finish) )
class JumpZoomTransition(TransitionScene):
'''Zoom out and jump the outgoing scene, and then jump and zoom in the incoming
'''
def __init__( self, *args, **kwargs ):
super(JumpZoomTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
self.in_scene.scale = 0.5
self.in_scene.position = ( width, 0 )
self.in_scene.transform_anchor = (width // 2, height //2 )
self.out_scene.transform_anchor = (width // 2, height //2 )
jump = JumpBy( (-width,0), width//4, 2, duration=self.duration / 4.0 )
scalein = ScaleTo( 1, duration=self.duration / 4.0 )
scaleout = ScaleTo( 0.5, duration=self.duration / 4.0 )
jumpzoomout = scaleout + jump
jumpzoomin = jump + scalein
delay = Delay( self.duration / 2.0 )
self.out_scene.do( jumpzoomout )
self.in_scene.do( delay + jumpzoomin + CallFunc(self.finish) )
class MoveInLTransition(TransitionScene):
'''Move in from to the left the incoming scene.
'''
def __init__( self, *args, **kwargs ):
super(MoveInLTransition, self ).__init__( *args, **kwargs)
self.init()
a = self.get_action()
self.in_scene.do( (Accelerate(a,0.5) ) + CallFunc(self.finish) )
def init(self):
width, height = director.get_window_size()
self.in_scene.position=(-width,0)
def get_action(self):
return MoveTo( (0,0), duration=self.duration)
class MoveInRTransition(MoveInLTransition):
'''Move in from to the right the incoming scene.
'''
def init(self):
width, height = director.get_window_size()
self.in_scene.position=(width,0)
def get_action(self):
return MoveTo( (0,0), duration=self.duration)
class MoveInTTransition(MoveInLTransition):
'''Move in from to the top the incoming scene.
'''
def init(self):
width, height = director.get_window_size()
self.in_scene.position=(0,height)
def get_action(self):
return MoveTo( (0,0), duration=self.duration)
class MoveInBTransition(MoveInLTransition):
'''Move in from to the bottom the incoming scene.
'''
def init(self):
width, height = director.get_window_size()
self.in_scene.position=(0,-height)
def get_action(self):
return MoveTo( (0,0), duration=self.duration)
class SlideInLTransition(TransitionScene):
'''Slide in the incoming scene from the left border.
'''
def __init__( self, *args, **kwargs ):
super(SlideInLTransition, self ).__init__( *args, **kwargs)
self.width, self.height = director.get_window_size()
self.init()
move = self.get_action()
self.in_scene.do( Accelerate(move,0.5) )
self.out_scene.do( Accelerate(move,0.5) + CallFunc( self.finish) )
def init(self):
self.in_scene.position=( -self.width,0)
def get_action(self):
return MoveBy( (self.width,0), duration=self.duration)
class SlideInRTransition(SlideInLTransition):
'''Slide in the incoming scene from the right border.
'''
def init(self):
self.in_scene.position=(self.width,0)
def get_action(self):
return MoveBy( (-self.width,0), duration=self.duration)
class SlideInTTransition(SlideInLTransition):
'''Slide in the incoming scene from the top border.
'''
def init(self):
self.in_scene.position=(0,self.height)
def get_action(self):
return MoveBy( (0,-self.height), duration=self.duration)
class SlideInBTransition(SlideInLTransition):
'''Slide in the incoming scene from the bottom border.
'''
def init(self):
self.in_scene.position=(0,-self.height)
def get_action(self):
return MoveBy( (0,self.height), duration=self.duration)
class FlipX3DTransition(TransitionScene):
'''Flips the screen horizontally.
The front face is the outgoing scene and the back face is the incoming scene.
'''
def __init__( self, *args, **kwargs ):
super(FlipX3DTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
turnongrid = Waves3D( amplitude=0, duration=0, grid=(1,1), waves=2 )
flip90 = OrbitCamera( angle_x=0, delta_z=90, duration = self.duration / 2.0 )
flipback90 = OrbitCamera( angle_x=0, angle_z=90, delta_z=90, duration = self.duration / 2.0 )
self.in_scene.visible = False
flip = turnongrid + \
flip90 + \
CallFunc(self.hide_all) + \
FlipX3D(duration=0) + \
CallFunc( self.hide_out_show_in ) + \
flipback90
self.do( flip + \
CallFunc(self.finish) + \
StopGrid() )
class FlipY3DTransition(TransitionScene):
'''Flips the screen vertically.
The front face is the outgoing scene and the back face is the incoming scene.
'''
def __init__( self, *args, **kwargs ):
super(FlipY3DTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
turnongrid = Waves3D( amplitude=0, duration=0, grid=(1,1), waves=2 )
flip90 = OrbitCamera( angle_x=90, delta_z=-90, duration = self.duration / 2.0 )
flipback90 = OrbitCamera( angle_x=90, angle_z=90, delta_z=90, duration = self.duration / 2.0 )
self.in_scene.visible = False
flip = turnongrid + \
flip90 + \
CallFunc(self.hide_all) + \
FlipX3D(duration=0) + \
CallFunc( self.hide_out_show_in ) + \
flipback90
self.do( flip + \
CallFunc(self.finish) + \
StopGrid() )
class FlipAngular3DTransition(TransitionScene):
'''Flips the screen half horizontally and half vertically.
The front face is the outgoing scene and the back face is the incoming scene.
'''
def __init__( self, *args, **kwargs ):
super(FlipAngular3DTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
turnongrid = Waves3D( amplitude=0, duration=0, grid=(1,1), waves=2 )
flip90 = OrbitCamera( angle_x=45, delta_z=90, duration = self.duration / 2.0 )
flipback90 = OrbitCamera( angle_x=45, angle_z=90, delta_z=90, duration = self.duration / 2.0 )
self.in_scene.visible = False
flip = turnongrid + \
flip90 + \
CallFunc(self.hide_all) + \
FlipX3D(duration=0) + \
CallFunc( self.hide_out_show_in ) + \
flipback90
self.do( flip + \
CallFunc(self.finish) + \
StopGrid() )
class ShuffleTransition(TransitionScene):
'''Shuffle the outgoing scene, and then reorder the tiles with the incoming scene.
'''
def __init__( self, *args, **kwargs ):
super(ShuffleTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
aspect = width / float(height)
x,y = int(12*aspect), 12
shuffle = ShuffleTiles( grid=(x,y), duration=self.duration/2.0, seed=15 )
self.in_scene.visible = False
self.do( shuffle + \
CallFunc(self.hide_out_show_in) + \
Reverse(shuffle) + \
CallFunc(self.finish) + \
StopGrid()
)
class ShrinkGrowTransition(TransitionScene):
'''Shrink the outgoing scene while grow the incoming scene
'''
def __init__( self, *args, **kwargs ):
super(ShrinkGrowTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
self.in_scene.scale = 0.001
self.out_scene.scale = 1
self.in_scene.transform_anchor = ( 2*width / 3.0, height / 2.0 )
self.out_scene.transform_anchor = ( width / 3.0, height / 2.0 )
scale_out = ScaleTo( 0.01, duration=self.duration )
scale_in = ScaleTo( 1.0, duration=self.duration )
self.in_scene.do( Accelerate(scale_in,0.5) )
self.out_scene.do( Accelerate(scale_out,0.5) + CallFunc( self.finish) )
class CornerMoveTransition(TransitionScene):
'''Moves the bottom-right corner of the incoming scene to the top-left corner
'''
def __init__( self, *args, **kwargs ):
super(CornerMoveTransition, self ).__init__( *args, **kwargs)
self.out_scene.do( MoveCornerUp( duration=self.duration ) + \
CallFunc(self.finish) + \
StopGrid() )
def start(self):
# don't call super. overriding order
self.add( self.in_scene, z=0 )
self.add( self.out_scene, z=1 )
class EnvelopeTransition(TransitionScene):
'''From the outgoing scene:
- moves the top-right corner to the center
- moves the bottom-left corner to the center
From the incoming scene:
- performs the reverse action of the outgoing scene
'''
def __init__( self, *args, **kwargs ):
super(EnvelopeTransition, self ).__init__( *args, **kwargs)
self.in_scene.visible = False
move = QuadMoveBy( delta0=(320,240), delta1=(-630,0), delta2=(-320,-240), delta3=(630,0), duration=self.duration / 2.0 )
# move = Accelerate( move )
self.do( move + \
CallFunc(self.hide_out_show_in) + \
Reverse(move) + \
CallFunc(self.finish) + \
StopGrid()
)
class FadeTRTransition(TransitionScene):
'''Fade the tiles of the outgoing scene from the left-bottom corner the to top-right corner.
'''
def __init__( self, *args, **kwargs ):
super(FadeTRTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
aspect = width / float(height)
x,y = int(12*aspect), 12
a = self.get_action(x,y)
# a = Accelerate( a)
self.out_scene.do( a + \
CallFunc(self.finish) + \
StopGrid() )
def start(self):
# don't call super. overriding order
self.add( self.in_scene, z=0 )
self.add( self.out_scene, z=1 )
def get_action(self,x,y):
return FadeOutTRTiles( grid=(x,y), duration=self.duration )
class FadeBLTransition(FadeTRTransition):
'''Fade the tiles of the outgoing scene from the top-right corner to the bottom-left corner.
'''
def get_action(self,x,y):
return FadeOutBLTiles( grid=(x,y), duration=self.duration )
class FadeUpTransition(FadeTRTransition):
'''Fade the tiles of the outgoing scene from the bottom to the top.
'''
def get_action(self,x,y):
return FadeOutUpTiles( grid=(x,y), duration=self.duration )
class FadeDownTransition(FadeTRTransition):
'''Fade the tiles of the outgoing scene from the top to the bottom.
'''
def get_action(self,x,y):
return FadeOutDownTiles( grid=(x,y), duration=self.duration )
class TurnOffTilesTransition(TransitionScene):
'''Turn off the tiles of the outgoing scene in random order
'''
def __init__( self, *args, **kwargs ):
super(TurnOffTilesTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
aspect = width / float(height)
x,y = int(12*aspect), 12
a = TurnOffTiles( grid=(x,y), duration=self.duration )
# a = Accelerate( a)
self.out_scene.do( a + \
CallFunc(self.finish) + \
StopGrid() )
def start(self):
# don't call super. overriding order
self.add( self.in_scene, z=0 )
self.add( self.out_scene, z=1 )
class FadeTransition(TransitionScene):
'''Fade out the outgoing scene and then fade in the incoming scene.
Optionally supply the color to fade to in-between as an RGB color tuple.
'''
def __init__( self, *args, **kwargs ):
color = kwargs.pop('color', (0, 0, 0)) + (0,)
super(FadeTransition, self ).__init__( *args, **kwargs)
self.fadelayer = ColorLayer(*color)
self.in_scene.visible = False
self.add( self.fadelayer, z=2 )
def on_enter( self ):
super( FadeTransition, self).on_enter()
self.fadelayer.do( FadeIn( duration=self.duration/2.0) + \
CallFunc( self.hide_out_show_in) + \
FadeOut( duration=self.duration /2.0 ) + \
CallFunc( self.finish) )
def on_exit( self ):
super( FadeTransition, self).on_exit()
self.remove( self.fadelayer )
class SplitColsTransition(TransitionScene):
'''Splits the screen in columns.
The odd columns goes upwards while the even columns goes downwards.
'''
def __init__( self, *args, **kwargs ):
super(SplitColsTransition, self ).__init__( *args, **kwargs)
width, height = director.get_window_size()
self.in_scene.visible = False
flip_a = self.get_action()
flip = flip_a + \
CallFunc( self.hide_out_show_in ) + \
Reverse(flip_a)
self.do( AccelDeccel(flip) + \
CallFunc(self.finish) + \
StopGrid() )
def get_action( self ):
return SplitCols( cols=3, duration=self.duration/2.0)
class SplitRowsTransition(SplitColsTransition):
'''Splits the screen in rows.
The odd rows goes to the left while the even rows goes to the right.
'''
def get_action( self ):
return SplitRows( rows=3, duration=self.duration/2.0)
class ZoomTransition(TransitionScene):
'''Zoom and FadeOut the outgoing scene.'''
def __init__(self, *args, **kwargs):
if 'src' in kwargs or len(args) == 3:
raise Exception("ZoomTransition does not accept 'src' parameter.")
super(ZoomTransition, self ).__init__( *args, **kwargs)
# fixme: if scene was never run and some drawable need to initialize
# in scene on enter the next line will render bad
self.out_scene.visit()
def start(self):
screensprite = self._create_out_screenshot()
zoom = ScaleBy(2, self.duration) | FadeOut(self.duration)
restore = CallFunc(self.finish)
screensprite.do(zoom + restore)
self.add(screensprite, z=1)
self.add(self.in_scene, z=0)
def finish(self):
# tested with the recipe TransitionsWithPop, works.
dst = self.in_scene.get('dst')
director.replace( dst )
def _create_out_screenshot(self):
# TODO: try to use `pyglet.image.get_buffer_manager().get_color_buffer()`
# instead of create a new BufferManager... note that pyglet uses
# a BufferManager singleton that fail when you change the window
# size.
buffer = pyglet.image.BufferManager()
image = buffer.get_color_buffer()
width, height = director.window.width, director.window.height
actual_width, actual_height = director.get_window_size()
out = Sprite(image)
out.position = actual_width / 2, actual_height / 2
out.scale = max(actual_width / float(width), actual_height / float(height))
return out
|
eevee/cocos2d-mirror
|
cocos/scenes/transitions.py
|
Python
|
bsd-3-clause
| 22,202
|
[
"VisIt"
] |
4f7d8a75ed247589e20972e7cb761d459393f203acf27dc51e5d210d49a20ee9
|
#Copyright 2015 Daniel Gusenleitner, Stefano Monti
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""
Usage: python2.7 bam_qc.py -i input_file.bam -o outdir
-h help
-o output_dir
-i input_file.bam *[No default value]
"""
def extract_stats(input_file):
#open bam file
bam_file = pysam.Samfile(input_file, "rb")
#counters
total_aligned_reads = 0
unique_aligned_reads = 0
is_singleton = 0
is_paired = 0
is_proper_pair = 0
is_unmapped = 0
num_unique_mismatches = [0]*5
num_multiple_mismatches = [0.0]*5
num_multiread = [0.0]*20
delet = False
insert = False
spliced = False
reads_with_deletions = 0
spliced_reads = 0
reads_with_inserts = 0
non_spliced_reads = 0
unique_reads_with_deletions = 0
unique_spliced_reads = 0
unique_reads_with_inserts = 0
unique_non_spliced_reads = 0
#tag variables
NH = 0
NM = 0
XS = 0
idx = 0
for read in bam_file:
if read.cigarstring != None:
#get all the relevant tags
for tag in read.tags:
if tag[0] == 'NH':
NH = tag[1]
if tag[0] == 'NM':
NM = tag[1]
if NH == 0:
NH = 1
#number of aligned reads
total_aligned_reads += 1
unique_aligned_reads += 1/NH
#number of mismatches
if NH == 1:
if NM >= 4:
num_unique_mismatches[4] = num_unique_mismatches[4]+1
else:
num_unique_mismatches[NM] = num_unique_mismatches[NM]+1
else:
if NM >= 4:
num_multiple_mismatches[4] = num_multiple_mismatches[4]+(1.0/float(NH))
else:
num_multiple_mismatches[NM] = num_multiple_mismatches[NM]+(1.0/float(NH))
#number of multiple reads
if NH >= 20:
num_multiread[19] = num_multiread[19]+(1.0/float(NH))
else:
num_multiread[NH-1] = num_multiread[NH-1]+(1.0/float(NH))
#singletons, paired, proper paired, unmapped
is_singleton += int(not read.is_paired)
is_paired += int(read.is_paired)
is_proper_pair += int(read.is_proper_pair)
is_unmapped += int(read.is_unmapped)
#splicing, deletions, inserts
spliced = 'N' in read.cigarstring
insert = 'I' in read.cigarstring
delet = 'D' in read.cigarstring
#actual count
spliced_reads += int(spliced)
spliced_reads += int(spliced)
non_spliced_reads += int(not spliced)
reads_with_deletions += int(insert)
reads_with_inserts += int(delet)
#counting reads that are aligned multiple times only once
unique_spliced_reads += int(spliced)/NH
unique_non_spliced_reads += int(not spliced)/NH
unique_reads_with_deletions += int(insert)/NH
unique_reads_with_inserts += int(delet)/NH
if idx % 1000000 == 0:
print str(idx)+' reads done'
idx += 1
bam_file.close()
statistics = dict()
statistics['total_aligned_reads'] = total_aligned_reads
statistics['unique_aligned_reads'] = unique_aligned_reads
statistics['is_singleton'] = is_singleton
statistics['is_paired'] = is_paired
statistics['is_proper_pair'] = is_proper_pair
statistics['is_unmapped'] = is_unmapped
statistics['num_unique_mismatches'] = num_unique_mismatches
statistics['num_multiple_mismatches'] = num_multiple_mismatches
statistics['num_multiread'] = num_multiread
statistics['spliced_reads'] = spliced_reads
statistics['non_spliced_reads'] = non_spliced_reads
statistics['reads_with_inserts'] = reads_with_inserts
statistics['reads_with_deletions'] = reads_with_deletions
statistics['unique_spliced_reads'] = unique_spliced_reads
statistics['unique_non_spliced_reads'] = unique_non_spliced_reads
statistics['unique_reads_with_inserts'] = unique_reads_with_inserts
statistics['unique_reads_with_deletions'] = unique_reads_with_deletions
return statistics
def output_stats(stat, output_dir):
#write all stats into a file
handle = open(output_dir+'output.txt', 'w')
handle.write('total_aligned_reads \t'+str(stat['total_aligned_reads'])+'\n')
handle.write('unique_aligned_reads \t'+str(stat['unique_aligned_reads'])+'\n')
handle.write('is_singleton \t'+str(stat['is_singleton'])+'\n')
handle.write('is_paired \t'+str(stat['is_paired'])+'\n')
handle.write('is_proper_pair \t'+str(stat['is_proper_pair'])+'\n')
handle.write('is_unmapped \t'+str(stat['is_unmapped'])+'\n')
for i in range(len(stat['num_unique_mismatches'])):
handle.write('num_unique_mismatches '+str(i)+ \
'\t'+str(stat['num_unique_mismatches'][i])+'\n')
for i in range(len(stat['num_multiple_mismatches'])):
handle.write('num_multiple_mismatches '+str(i)+'\t'+ \
str(stat['num_multiple_mismatches'][i])+'\n')
for i in range(len(stat['num_multiread'])):
handle.write('num_multiread '+str(i+1)+'\t'+str(stat['num_multiread'][i])+'\n')
handle.write('spliced_reads \t'+str(stat['spliced_reads'])+'\n')
handle.write('non_spliced_reads \t'+str(stat['non_spliced_reads'])+'\n')
handle.write('reads_with_inserts \t'+str(stat['reads_with_inserts'])+'\n')
handle.write('reads_with_deletions \t'+str(stat['reads_with_deletions'])+'\n')
handle.write('unique_spliced_reads \t'+str(stat['unique_spliced_reads'])+'\n')
handle.write('unique_non_spliced_reads \t'+ \
str(stat['unique_non_spliced_reads'])+'\n')
handle.write('unique_reads_with_inserts \t'+ \
str(stat['unique_reads_with_inserts'])+'\n')
handle.write('unique_reads_with_deletions \t'+ \
str(stat['unique_reads_with_deletions'])+'\n')
handle.close()
def plot_mul_alignments(stat, output_dir):
_, _ = plt.subplots()
index = np.arange(len(stat['num_multiread']))
bar_width = 0.8
opacity = 0.4
val = [math.log(sta+1, 10) for sta in stat['num_multiread']]
_ = plt.bar(index, val, bar_width,
alpha=opacity,
color='b',
label='Number of alignements ')
plt.xlabel('Number of alignments')
plt.ylabel('Counts (log10)')
plt.title('Distribution of reads with multiple alignments')
ticks = [str(i+1) for i in range(len(stat['num_multiread']))]
ticks[len(ticks)-1] = ticks[len(ticks)-1]+'+'
plt.xticks(index + bar_width, ticks)
plt.tight_layout()
pylab.savefig(output_dir+'multiple_alignments.png')
def plot_num_unique_mismatches(stat, output_dir):
_, _ = plt.subplots()
index = np.arange(len(stat['num_unique_mismatches']))
bar_width = 0.8
opacity = 0.4
val = [math.log(sta+1, 10) for sta in stat['num_unique_mismatches']]
_ = plt.bar(index,
val,
bar_width,
alpha=opacity,
color='b')
plt.xlabel('Number of mismatches in uniquely aligned samples')
plt.ylabel('Counts (log10)')
plt.title('Distribution of mismatches in reads with unique alignments')
ticks = [str(i) for i in range(len(stat['num_unique_mismatches']))]
ticks[len(ticks)-1] = ticks[len(ticks)-1]+'+'
plt.xticks(index + bar_width, ticks)
plt.tight_layout()
pylab.savefig(output_dir+'num_unique_mismatches.png')
def number_of_multiple_mismatches(stat, output_dir):
_, _ = plt.subplots()
index = np.arange(len(stat['num_multiple_mismatches']))
bar_width = 0.8
opacity = 0.4
val = [math.log(sta+1, 10) for sta in stat['num_multiple_mismatches']]
_ = plt.bar(index,
val,
bar_width,
alpha=opacity,
color='b')
plt.xlabel('Number of mismatches in multiple aligned samples')
plt.ylabel('Counts (log10)')
plt.title('Distribution of mismatches in reads with multiple alignments')
ticks = [str(i) for i in range(len(stat['num_multiple_mismatches']))]
ticks[len(ticks)-1] = ticks[len(ticks)-1]+'+'
plt.xticks(index + bar_width, ticks)
plt.tight_layout()
pylab.savefig(output_dir+'num_multiple_mismatches.png')
def create_html(stat, output_dir):
handle = open(output_dir+'sample_stats.html', 'w')
#output a table with all the counts
handle.write('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" '+ \
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> +\
<head><title></title></head><body>\n')
handle.write('<center><br><h1>Sample overview</h1>')
#table
handle.write('<table id="one-column-emphasis">\n')
handle.write('<thead><tr><th> </th><th>Count</th><th>Percentage</th></tr></thead>\n')
#total number + unique / multiple aligned
handle.write('<tr><td>Total number of aligned reads</td><td>'+ \
str(int(stat['total_aligned_reads']))+'</td><td>'+ \
str(100*round(float(stat['total_aligned_reads'])/ + \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td>Number of uniquely aligned reads</td><td>'+ \
str(int(stat['num_multiread'][0]))+'</td><td>'+ \
str(100*round(float(stat['num_multiread'][0])/ +\
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
multi_read = stat['total_aligned_reads']-stat['num_multiread'][0]
handle.write('<tr><td>Number of multiple aligned reads</td><td>'+ \
str(int(multi_read))+'</td><td>'+str(100*round(float(multi_read)\
/float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr> <td></td><td> </td><td> </td></tr>\n')
#mismatches within uniquely aligned
handle.write('<tr><td>Number of perfect matches within uniquely aligned reads</td><td>'+ \
str(int(stat['num_unique_mismatches'][0]))+'</td><td>'+ \
str(100*round(float(stat['num_unique_mismatches'][0])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
uniq_read_multi_mm = stat['num_multiread'][0]-stat['num_unique_mismatches'][0]
handle.write('<tr><td>Number of uniquely aligned reads with mismatches</td><td>'+\
str(int(uniq_read_multi_mm))+'</td><td>'+ \
str(100*round(float(uniq_read_multi_mm)/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr> <td></td><td> </td> <td> </td></tr>\n')
#mismatches within uniquely aligned
handle.write('<tr><td>Number of perfect matches within multiple aligned '+ \
'reads</td><td>'+str(int(stat['num_multiple_mismatches'][0]))+ \
'</td><td>'+str(100*round(float(stat['num_multiple_mismatches'][0])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
mul_read_multi_mm = multi_read-stat['num_multiple_mismatches'][0]
handle.write('<tr><td>Number of multiple aligned reads with mismatches</td><td>'+ \
str(int(mul_read_multi_mm))+'</td><td>'+ \
str(100*round(float(mul_read_multi_mm)/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td> </td><td> </td><td> </td></tr>\n')
#paired / singleton / ...
handle.write('<tr><td>Number of singleton reads</td><td>'+ \
str(stat['is_singleton'])+'</td><td>'+ \
str(100*round(float(stat['is_singleton'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td>Number of paired reads</td><td>'+str(stat['is_paired'])+ \
'</td><td>'+str(100*round(float(stat['is_paired'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td>Number of proper paired reads</td><td>'+ \
str(stat['is_proper_pair'])+'</td><td>'+ \
str(100*round(float(stat['is_proper_pair'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td>Number of unmapped reads</td><td>'+ \
str(stat['is_unmapped'])+'</td><td>'+ \
str(100*round(float(stat['is_unmapped'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td> </td><td> </td><td> </td></tr>\n')
#spliced / inserts / deletions
handle.write('<tr><td>Number of spliced reads</td><td>'+ \
str(stat['spliced_reads'])+'</td><td>'+ \
str(100*round(float(stat['spliced_reads'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td>Number of reads with inserts</td><td>'+ \
str(stat['reads_with_inserts'])+'</td><td>'+ \
str(100*round(float(stat['reads_with_inserts'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('<tr><td>Number of reads with deletions</td><td>'+ \
str(stat['reads_with_deletions'])+'</td><td>'+ \
str(100*round(float(stat['reads_with_deletions'])/ \
float(stat['total_aligned_reads']), 3))+'% </td></tr>\n')
handle.write('</table><br><br><br><br>\n')
#add figures
handle.write('<img src="multiple_alignments.png" '+ \
'alt="multiple_alignments"><br><br><br><br>\n')
handle.write('<img src="num_unique_mismatches.png" '+ \
'alt="num_unique_mismatches"><br><br><br><br>\n')
handle.write('<img src="num_multiple_mismatches.png" a'+ \
'lt="num_multiple_mismatches"><center><br><br><br><br>\n\n\n')
handle.write('<style>#one-column-emphasis{font-family:"Lucida Sans Unicode",'+ \
' "Lucida Grande", Sans-Serif;font-size:12px;width:480px;'+ \
'text-align:left;border-collapse:collapse;margin:20px;}'+ \
'#one-column-emphasis th{font-size:14px;font-weight:normal;'+ \
'color:#039;padding:12px 15px;}#one-column-emphasis '+ \
'td{color:#669;border-top:1px solid #e8edff;padding:10px 15px;}'+\
'.oce-first{background:#d0dafd;border-right:10px solid '+ \
'transparent;border-left:10px solid transparent;}'+ \
'#one-column-emphasis tr:hover td{color:#339;'+ \
'background:#eff2ff;}</style></body>\n')
handle.close()
def make_report(stat, output_dir):
plot_mul_alignments(stat, output_dir)
plot_num_unique_mismatches(stat, output_dir)
number_of_multiple_mismatches(stat, output_dir)
create_html(stat, output_dir)
if __name__ == "__main__":
## Import modules
import pysam
import sys
import getopt
import json
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import math
import pylab
## Check arguments
if len(sys.argv) < 5:
print __doc__
sys.exit(0)
optlist, cmdlist = getopt.getopt(sys.argv[1:], 'hi:o:')
for opt in optlist:
if opt[0] == '-h':
print __doc__; sys.exit(0)
if opt[0] == '-i':
input_filename = opt[1]
if opt[0] == '-o':
output_directory = opt[1]
#extract stats from bam file
stats = extract_stats(input_filename)
#dump stats into a text file
output_stats(stats, output_directory)
#create a report for a single sample
make_report(stats, output_directory)
#dump stats into a json file
with open(output_directory+'stats.json', 'w') as f:
json.dump(stats, f)
|
montilab/Hydra
|
scripts/run_bamqc.py
|
Python
|
apache-2.0
| 16,391
|
[
"pysam"
] |
17688c40de16d31b59850d91bb5e01b722d7fa834f9e58bfeeb37a2251e3b1cd
|
"""
A set of basic transformers for python asts
"""
import os
import sys
import ast
from ctypes import c_long, c_int, c_byte, c_short, c_char_p, c_void_p, c_float
import ctypes
from collections import deque
import ctree
from ctree.c.nodes import Constant, String, SymbolRef, BinaryOp, TernaryOp, \
Return, While, MultiNode, UnaryOp
from ctree.c.nodes import If, CFile, FunctionCall, FunctionDecl, For, Assign, \
ArrayRef, Cast
from ctree.nodes import CtreeNode
from ctree.c.nodes import Lt, Gt, AddAssign
from ctree.c.nodes import Break, Continue, Pass, Array, Literal, And
from ctree.c.nodes import Op
from ctree.visitors import NodeTransformer
from ctree.types import get_common_ctype
# conditional imports
if sys.version_info < (3, 0):
from itertools import izip_longest
else:
from itertools import zip_longest as izip_longest
def get_type(node):
if hasattr(node, 'get_type'):
return type(node.get_type())
elif hasattr(node, 'type'):
return type(node.type)
return c_void_p
class PyCtxScrubber(NodeTransformer):
"""
Removes pesky ctx attributes from Python ast.Name nodes,
yielding much cleaner python asts.
"""
def visit_Name(self, node):
node.ctx = None
return node
class PyBasicConversions(NodeTransformer):
"""
Convert constructs with obvious C analogues.
"""
def __init__(self, names_dict={}, constants_dict={}):
self.names_dict = names_dict
self.constants_dict = constants_dict
PY_OP_TO_CTREE_OP = {
ast.Add: Op.Add,
ast.Mod: Op.Mod,
ast.Mult: Op.Mul,
ast.Sub: Op.Sub,
ast.Div: Op.Div,
ast.Lt: Op.Lt,
ast.Gt: Op.Gt,
ast.LtE: Op.LtE,
ast.GtE: Op.GtE,
ast.BitAnd: Op.BitAnd,
ast.BitOr: Op.BitOr,
ast.Eq: Op.Eq,
ast.NotEq: Op.NotEq,
ast.Not: Op.Not,
ast.And: Op.And,
ast.Or: Op.Or,
ast.BitXor: Op.BitXor,
ast.LShift: Op.BitShL,
ast.RShift: Op.BitShR,
ast.Is: Op.Eq,
ast.IsNot: Op.NotEq,
ast.USub: Op.SubUnary,
ast.UAdd: Op.AddUnary,
ast.FloorDiv: Op.Div,
ast.Invert: Op.BitNot
# TODO list the rest
}
PY_UOP_TO_CTREE_UOP = {
'UAdd': Op.Add,
'USub': Op.Sub,
'Not': Op.Not,
'Invert': Op.BitNot
}
def visit_Num(self, node):
return Constant(node.n)
def visit_Str(self, node):
return String(node.s)
def visit_Name(self, node):
if node.id in self.constants_dict:
return Constant(self.constants_dict[node.id])
if node.id in self.names_dict:
return SymbolRef(self.names_dict[node.id])
return SymbolRef(node.id)
def visit_BinOp(self, node):
lhs = self.visit(node.left)
rhs = self.visit(node.right)
op = self.PY_OP_TO_CTREE_OP.get(type(node.op), type(node.op))()
return BinaryOp(lhs, op, rhs)
def visit_Return(self, node):
if hasattr(node, 'value'):
return Return(self.visit(node.value))
else:
return Return()
def visit_For(self, node):
"""restricted, for now, to range as iterator with long-type args"""
if isinstance(node, ast.For) and \
isinstance(node.iter, ast.Call) and \
isinstance(node.iter.func, ast.Name) and \
node.iter.func.id in ('range', 'xrange'):
Range = node.iter
nArgs = len(Range.args)
if nArgs == 1:
stop = self.visit(Range.args[0])
start, step = Constant(0), Constant(1)
elif nArgs == 2:
start, stop = map(self.visit, Range.args)
step = Constant(1)
elif nArgs == 3:
start, stop, step = map(self.visit, Range.args)
else:
raise Exception(
"Cannot convert a for...range with %d args." % nArgs)
# check no-op conditions.
if all(isinstance(item, Constant) for item in (start, stop, step)):
if step.value == 0:
raise ValueError("range() step argument must not be zero")
elif start.value == stop.value or \
(start.value < stop.value and step.value < 0) or \
(start.value > stop.value and step.value > 0):
return None
if not all(isinstance(item, CtreeNode)
for item in (start, stop, step)):
node.body = list(map(self.visit, node.body))
return node
# TODO allow any expressions castable to Long type
target_types = [c_long]
for el in (stop, start, step):
# typed item to try and guess type off of. Imperfect right now.
if hasattr(el, 'get_type'):
# TODO take the proper class instead of the last; if start,
# end are doubles, but step is long, target is double
t = el.get_type()
assert any(isinstance(t, klass) for klass in [
c_byte, c_int, c_long, c_short
]), "Can only convert ranges with integer/long \
start/stop/step values"
target_types.append(type(t))
target_type = get_common_ctype(target_types)()
target = SymbolRef(node.target.id, target_type)
op = Lt
if hasattr(start, 'value') and hasattr(stop, 'value') and \
start.value > stop.value:
op = Gt
for_loop = For(
Assign(target, start),
op(target.copy(), stop),
AddAssign(target.copy(), step),
[self.visit(stmt) for stmt in node.body],
)
return for_loop
node.body = list(map(self.visit, node.body))
return node
def visit_If(self, node):
if isinstance(node, ast.If):
cond = self.visit(node.test)
then = [self.visit(t) for t in node.body]
elze = [self.visit(t) for t in node.orelse] or None
return If(cond, then, elze)
else:
return self.generic_visit(node)
def visit_IfExp(self, node):
cond = self.visit(node.test)
then = self.visit(node.body)
elze = self.visit(node.orelse)
return TernaryOp(cond, then, elze)
def visit_BoolOp(self, node):
first = self.visit(node.values[0])
second = self.visit(node.values[1])
op = self.PY_OP_TO_CTREE_OP.get(type(node.op),
type(node.op))()
curr = BinaryOp(first, op, second)
for value in node.values[2:]:
curr = BinaryOp(curr, op, self.visit(value))
return curr
def visit_Compare(self, node):
lhs = self.visit(node.left)
op = self.PY_OP_TO_CTREE_OP.get(type(node.ops[0]),
type(node.ops[0]))()
rhs = self.visit(node.comparators[0])
curr = BinaryOp(lhs, op, rhs)
for i in range(1, len(node.ops)):
op = self.PY_OP_TO_CTREE_OP.get(type(node.ops[i]),
type(node.ops[i]))()
rhs = self.visit(node.comparators[i])
lhs = self.visit(node.comparators[i-1])
curr = And(curr, BinaryOp(lhs, op, rhs))
return curr
def visit_Module(self, node):
body = [self.visit(s) for s in node.body]
return CFile("module", body)
def visit_Call(self, node):
args = [self.visit(a) for a in node.args]
fn = self.visit(node.func)
if getattr(node, 'starargs', None) is not None:
node.func = fn
node.args = args
node.starargs = self.visit(node.starargs)
return node
if hasattr(fn, "name") and fn.name == "float":
return Cast(c_float(), args[0])
return FunctionCall(fn, args)
def visit_Expr(self, node):
return self.visit(node.value)
def visit_FunctionDef(self, node):
if ast.get_docstring(node):
node.body.pop(0)
params = [self.visit(p) for p in node.args.args]
defn = [self.visit(s) for s in node.body]
return FunctionDecl(None, node.name, params, defn)
def visit_arg(self, node):
return SymbolRef(node.arg, node.annotation)
def visit_AugAssign(self, node):
op = type(node.op)
target = self.visit(node.target)
value = self.visit(node.value)
# if op is ast.Add:
# return AddAssign(target, value)
# elif op is ast.Sub:
# return SubAssign(target, value)
# elif op is ast.Mult:
# return MulAssign(target, value)
# elif op is ast.Div:
# return DivAssign(target, value)
# elif op is ast.BitXor:
# return BitXorAssign(target, value)
# # TODO: Error?
lookup = {
ast.Add: 'AddAssign', ast.Sub: 'SubAssign', ast.Mult: 'MulAssign',
ast.Div: 'DivAssign', ast.BitAnd: 'BitAndAssign', ast.BitOr:
'BitOrAssign', ast.BitXor: 'BitXorAssign', ast.LShift:
'BitShLAssign', ast.RShift: 'BitShRAssign'
}
if op in lookup:
return getattr(ctree.c.nodes, lookup[op])(target, value)
return node
def targets_to_list(self, targets):
"""parses target into nested lists"""
res = []
for elt in targets:
if not isinstance(elt, (ast.List, ast.Tuple)):
res.append(elt)
elif isinstance(elt, (ast.Tuple, ast.List)):
res.append(self.targets_to_list(elt.elts))
return res
def value_to_list(self, value):
"""parses value into nested lists for multiple assign"""
res = []
if not isinstance(value, (ast.List, ast.Tuple)):
return value
for elt in value.elts:
if not isinstance(value, (ast.List, ast.Tuple)):
res.append(elt)
else:
res.append(self.value_to_list(elt))
return ast.List(elts=res)
def pair_lists(self, targets, values):
res = []
queue = deque((target, values) for target in targets)
sentinel = object()
while queue:
target, value = queue.popleft()
if isinstance(target, list):
# target hasn't been completely unrolled yet
for sub_target, sub_value in izip_longest(
target, value.elts, fillvalue=sentinel):
if sub_target is sentinel or \
sub_value is sentinel:
raise ValueError(
'Incorrect number of values to unpack')
queue.append((sub_target, sub_value))
else:
res.append((target, value))
return res
def parse_pairs(self, node):
targets = self.targets_to_list(node.targets)
values = self.value_to_list(node.value)
return self.pair_lists(targets, values)
def visit_Assign(self, node):
target_value_list = [(self.visit(target), self.visit(value))
for target, value in self.parse_pairs(node)]
if len(target_value_list) == 1:
target, value = target_value_list[0]
return Assign(target, value)
operation_body = []
swap_body = []
for target, value in target_value_list:
if not isinstance(target, SymbolRef):
operation_body.append(Assign(target, value))
elif isinstance(value, Literal) and \
not isinstance(value, SymbolRef):
operation_body.append(Assign(target, value))
else:
new_target = target.copy()
new_target.name = "____temp__" + new_target.name
operation_body.append(Assign(new_target, value))
swap_body.append(Assign(target, new_target.copy()))
return MultiNode(body=operation_body + swap_body)
def visit_Subscript(self, node):
if isinstance(node.slice, ast.Index):
value = self.visit(node.value)
index = self.visit(node.slice.value)
return ArrayRef(value, index)
else:
return node
def visit_While(self, node):
cond = self.visit(node.test)
body = [self.visit(i) for i in node.body]
return While(cond, body)
def visit_Lambda(self, node):
if isinstance(node, ast.Lambda):
def_node = ast.FunctionDef(name="default", args=node.args,
body=node.body, decorator_list=None)
params = [self.visit(p) for p in def_node.args.args]
defn = [Return(self.visit(def_node.body))]
decl_node = FunctionDecl(None, def_node.name, params, defn)
Lifter().visit_FunctionDecl(decl_node)
return decl_node
else:
return node
def visit_Break(self, node):
return Break()
def visit_Continue(self, node):
return Continue()
def visit_Pass(self, node):
return Pass()
def visit_List(self, node):
elts = [self.visit(elt) for elt in node.elts]
types = [get_type(elt) for elt in elts]
array_type = get_common_ctype(types)
return Array(type=ctypes.POINTER(array_type)(), body=elts)
def visit_UnaryOp(self, node):
# If it's already C unary op, recurse only
if isinstance(node, UnaryOp):
node.arg = self.visit(node.arg)
return node
argument = self.visit(node.operand)
op = self.PY_OP_TO_CTREE_OP.get(type(node.op), type(node.op))()
return UnaryOp(op, argument)
class ResolveGeneratedPathRefs(NodeTransformer):
"""
Converts any instances of ctree.nodes.GeneratedPathRef into strings
containing the absolute path of the target file.
"""
def __init__(self, compilation_dir):
self.compilation_dir = compilation_dir
self.count = 0
def visit_GeneratedPathRef(self, node):
self.count += 1
return String(os.path.join(self.compilation_dir,
node.target.get_filename()))
class Lifter(NodeTransformer):
"""
To aid in adding new includes or parameters during tree
traversals, users can store them with arbitrary child nodes and call this
transformation to move them to the correct position.
"""
def __init__(self, lift_params=True, lift_includes=True):
self.lift_params = lift_params
self.lift_includes = lift_includes
def visit_FunctionDecl(self, node):
if self.lift_params:
for child in ast.walk(node):
for param in getattr(child, '_lift_params', []):
if param not in node.params:
node.params.append(param)
# del child._lift_params
return self.generic_visit(node)
def visit_CFile(self, node):
if self.lift_includes:
new_includes = []
for child in ast.walk(node):
for include in getattr(child, '_lift_includes', []):
if include not in new_includes:
new_includes.append(include)
node.body = list(new_includes) + node.body
return self.generic_visit(node)
|
ucb-sejits/ctree
|
ctree/transformations.py
|
Python
|
bsd-2-clause
| 15,638
|
[
"VisIt"
] |
01ded3990ace144a8b1d468abda1ab3c191c65d17281e9cd6c9e1ffa79176ed2
|
#!/usr/bin/env python
""" Which - locate a command
* adapted from Brian Curtin's http://bugs.python.org/file15381/shutil_which.patch
* see http://bugs.python.org/issue444582
* uses ``PATHEXT`` on Windows
* searches current directory before ``PATH`` on Windows,
but not before an explicitly passed path
* accepts both string or iterable for an explicitly passed path, or pathext
* accepts an explicitly passed empty path, or pathext (either '' or [])
* does not search ``PATH`` for files that have a path specified in their name already
* moved defpath and defpathext lists initialization to module level,
instead of initializing them on each function call
* changed interface: which_files() returns generator, which() returns first match,
or raises IOError(errno.ENOENT)
.. function:: which_files(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return a generator which yields full paths in which the *file* name exists
in a directory that is part of the file name, or on *path*,
and has the given *mode*.
By default, *mode* matches an inclusive OR of os.F_OK and os.X_OK - an
existing executable file.
The *path* is, by default, the ``PATH`` variable on the platform,
or the string/iterable passed in as *path*.
In the event that a ``PATH`` variable is not found, :const:`os.defpath` is used.
On Windows, a current directory is searched before using the ``PATH`` variable,
but not before an explicitly passed *path*.
The *pathext* is only used on Windows to match files with given extensions appended as well.
It defaults to the ``PATHEXT`` variable, or the string/iterable passed in as *pathext*.
In the event that a ``PATHEXT`` variable is not found,
default value for Windows XP/Vista is used.
The command is always searched without extension first,
even when *pathext* is explicitly passed.
.. function:: which(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return first match generated by which_files(file, mode, path, pathext),
or raise IOError(errno.ENOENT).
"""
__docformat__ = 'restructuredtext en'
__all__ = 'which which_files pathsep defpath defpathext F_OK R_OK W_OK X_OK'.split()
import sys
from os import access, defpath, pathsep, environ, F_OK, R_OK, W_OK, X_OK
from os.path import exists, dirname, split, join
windows = sys.platform.startswith('win')
defpath = environ.get('PATH', defpath).split(pathsep)
if windows:
defpath.insert(0, '.') # can insert without checking, when duplicates are removed
# given the quite usual mess in PATH on Windows, let's rather remove duplicates
seen = set()
defpath = [dir for dir in defpath if dir.lower() not in seen and not seen.add(dir.lower())]
del seen
defpathext = [''] + environ.get('PATHEXT',
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC').lower().split(pathsep)
else:
defpathext = ['']
def which_files(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function yields full paths (not necessarily absolute paths),
in which the given file name matches an existing file in a directory on the path.
>>> def test_which(expected, *args, **argd):
... result = list(which_files(*args, **argd))
... assert result == expected, 'which_files: %s != %s' % (result, expected)
...
... try:
... result = [ which(*args, **argd) ]
... except IOError:
... result = []
... assert result[:1] == expected[:1], 'which: %s != %s' % (result[:1], expected[:1])
>>> if windows: cmd = environ['COMSPEC']
>>> if windows: test_which([cmd], 'cmd')
>>> if windows: test_which([cmd], 'cmd.exe')
>>> if windows: test_which([cmd], 'cmd', path=dirname(cmd))
>>> if windows: test_which([cmd], 'cmd', pathext='.exe')
>>> if windows: test_which([cmd], cmd)
>>> if windows: test_which([cmd], cmd, path='<nonexistent>')
>>> if windows: test_which([cmd], cmd, pathext='<nonexistent>')
>>> if windows: test_which([cmd], cmd[:-4])
>>> if windows: test_which([cmd], cmd[:-4], path='<nonexistent>')
>>> if windows: test_which([], 'cmd', path='<nonexistent>')
>>> if windows: test_which([], 'cmd', pathext='<nonexistent>')
>>> if windows: test_which([], '<nonexistent>/cmd')
>>> if windows: test_which([], cmd[:-4], pathext='<nonexistent>')
>>> if not windows: sh = '/bin/sh'
>>> if not windows: test_which([sh], 'sh')
>>> if not windows: test_which([sh], 'sh', path=dirname(sh))
>>> if not windows: test_which([sh], 'sh', pathext='<nonexistent>')
>>> if not windows: test_which([sh], sh)
>>> if not windows: test_which([sh], sh, path='<nonexistent>')
>>> if not windows: test_which([sh], sh, pathext='<nonexistent>')
>>> if not windows: test_which([], 'sh', mode=W_OK) # not running as root, are you?
>>> if not windows: test_which([], 'sh', path='<nonexistent>')
>>> if not windows: test_which([], '<nonexistent>/sh')
"""
filepath, file = split(file)
if filepath:
path = (filepath,)
elif path is None:
path = defpath
elif isinstance(path, str):
path = path.split(pathsep)
if pathext is None:
pathext = defpathext
elif isinstance(pathext, str):
pathext = pathext.split(pathsep)
if not '' in pathext:
pathext.insert(0, '') # always check command without extension, even for custom pathext
for dir in path:
basepath = join(dir, file)
for ext in pathext:
fullpath = basepath + ext
if exists(fullpath) and access(fullpath, mode):
yield fullpath
def which(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function returns full path (not necessarily absolute path),
in which the given file name matches an existing file in a directory on the path,
or raises IOError(errno.ENOENT).
>>> # for doctest see which_files()
"""
try:
return iter(which_files(file, mode, path, pathext)).next()
except StopIteration:
try:
from errno import ENOENT
except ImportError:
ENOENT = 2
raise IOError(ENOENT, '%s not found' % (mode & X_OK and 'command' or 'file'), file)
if __name__ == '__main__':
import doctest
doctest.testmod()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
diogocs1/comps
|
web/openerp/tools/which.py
|
Python
|
apache-2.0
| 6,884
|
[
"Brian"
] |
779ce8c96c16e884e8985ce8a3f95622cee9f817110d766489b716c7fa749769
|
import sys
import numpy as np
from ovito import *
from ovito.data import *
from ovito.modifiers import *
from ase.lattice import bulk
from ase.io import read, write
from ase.calculators.singlepoint import SinglePointCalculator
atoms = bulk('Si', cubic=True)
atoms *= (5, 5, 5)
# add some comuted properties
atoms.new_array('real', (atoms.positions**2).sum(axis=1))
atoms.new_array('int', np.array([-1]*len(atoms)))
# calculate energy and forces with dummy calculator
forces = -1.0*atoms.positions
spc = SinglePointCalculator(atoms,
forces=forces)
atoms.set_calculator(spc)
# convert from Atoms to DataCollection
data = DataCollection.create_from_ase_atoms(atoms)
# Create a node and insert it into the scene
node = ObjectNode()
node.source = data
dataset.scene_nodes.append(node)
new_data = node.compute()
# Select the new node and adjust viewport cameras to show everything.
dataset.selected_node = node
for vp in dataset.viewports:
vp.zoom_all()
# Do the reverse conversion, after pipeline has been applied
atoms = new_data.to_ase_atoms()
# Dump results to disk
atoms.write('dump.extxyz')
|
srinath-chakravarthy/ovito
|
examples/scripts/ase-interface.py
|
Python
|
gpl-3.0
| 1,114
|
[
"ASE",
"OVITO"
] |
61f34d3a0d96c75fb9f9b7bfec2c65ca40888148b5accbbc02130982bbef4acd
|
import os
from generic import obj
from project_base import Pobj,modes
from project_manager import ProjectManager
from machines import Job,Machine,Supercomputer,get_machine
from structure import Structure,generate_structure,generate_cell
from physical_system import PhysicalSystem,generate_physical_system
from pseudopotential import Pseudopotential,Pseudopotentials
from bundle import bundle
from opium import Opium , OpiumInput , OpiumAnalyzer
from sqd import Sqd , SqdInput , SqdAnalyzer , generate_sqd_input , generate_sqd, hunds_rule_filling
from pwscf import Pwscf , PwscfInput , PwscfAnalyzer , generate_pwscf_input , generate_pwscf
from pw2casino import Pw2casino , Pw2casinoInput , Pw2casinoAnalyzer , generate_pw2casino_input , generate_pw2casino
from pw2qmcpack import Pw2qmcpack , Pw2qmcpackInput , Pw2qmcpackAnalyzer , generate_pw2qmcpack_input , generate_pw2qmcpack
from wfconvert import Wfconvert , WfconvertInput , WfconvertAnalyzer , generate_wfconvert_input , generate_wfconvert
from gamess import Gamess , GamessInput , GamessAnalyzer , generate_gamess_input , generate_gamess, FormattedGroup
from convert4qmc import Convert4qmc, Convert4qmcInput, Convert4qmcAnalyzer, generate_convert4qmc_input, generate_convert4qmc
from qmcpack import Qmcpack , QmcpackInput , QmcpackAnalyzer , generate_qmcpack_input , generate_qmcpack
from vasp import Vasp , VaspInput , VaspAnalyzer , generate_vasp_input , generate_vasp
from qmcpack import loop,linear,cslinear,vmc,dmc
from qmcpack import generate_jastrows,generate_jastrow,generate_jastrow1,generate_jastrow2,generate_jastrow3,generate_opt,generate_opts
from auxiliary import *
#set the machine if known, otherwise user will provide
hostmachine = Machine.get_hostname()
if Machine.exists(hostmachine):
Job.machine = hostmachine
ProjectManager.machine = Machine.get(hostmachine)
#end if
class Settings(Pobj):
singleton = None
project_vars = set([
'machine','account','generate_only','verbose','debug','mode',
'local_directory','remote_directory','runs','results','sleep',
'file_locations','load_images','trace','machine_mode','stages',
'pseudo_dir','skip_submit','interactive_cores','monitor',
'status_only','machine_info',
])
gamess_vars = set('ericfmt mcppath'.split())
all_vars = project_vars | gamess_vars
@staticmethod
def kw_set(vars,source=None):
kw = obj()
if source!=None:
for n in vars:
if n in source:
kw[n]=source[n]
del source[n]
#end if
#end for
#end if
return kw
#end def null_kw_set
def __init__(self):
if Settings.singleton is None:
Settings.singleton = self
else:
self.error('attempted to create a second Settings object\n please just use the original')
#end if
#end def __init__
def error(self,message,header='settings',exit=True,trace=True):
Pobj.error(self,message,header,exit,trace)
#end def error
def __call__(self,**kwargs):
# guard against invalid settings
not_allowed = set(kwargs.keys()) - self.all_vars
if len(not_allowed)>0:
self.error('unrecognized variables provided.\n You provided: '+str(list(not_allowed))+'\n Allowed variables are: '+str(list(vars)))
#end if
# extract settings based on keyword groups
kw = Settings.kw_set(self.project_vars,kwargs) # project settings
gamess_kw = Settings.kw_set(self.gamess_vars,kwargs) # gamess settings
if len(kwargs)>0:
self.error('some settings keywords have not been accounted for\nleftover keywords: {0}\nthis is a developer error'.format(sorted(kwargs.keys())))
#end if
# transfer project variables to project base class
for name,value in kw.iteritems():
Pobj.__dict__[name] = value
#end for
# process project manager settings
if 'debug' in kw and kw.debug:
Pobj.verbose = True
#end if
if 'mode' in kw:
Pobj.set_mode(kw.mode)
#end if
# process machine settings
if 'machine_info' in kw:
machine_info = Pobj.machine_info
del Pobj.machine_info
if isinstance(machine_info,dict) or isinstance(machine_info,obj):
for machine_name,minfo in machine_info.iteritems():
mname = machine_name.lower()
if Machine.exists(mname):
machine = Machine.get(mname)
machine.incorporate_user_info(minfo)
else:
self.error('machine {0} is unknown\n cannot set machine_info'.format(machine_name))
#end if
#end for
else:
self.error('machine_info must be a dict or obj\n you provided type '+machine_info.__class__.__name__)
#end if
#end if
if 'machine' in kw:
machine_name = kw.machine
if not Machine.exists(machine_name):
Pobj.class_error('machine {0} is unknown'.format(machine_name))
#end if
Job.machine = machine_name
ProjectManager.machine = Machine.get(machine_name)
#del Pobj.machine
if 'account' in kw:
account = Pobj.account
#del Pobj.account
if not isinstance(account,str):
self.error('account for {0} must be a string\n you provided: {1}'.format(machine_name,account))
#end if
ProjectManager.machine.account = account
#end if
if 'machine_mode' in kw:
machine_mode = kw.machine_mode
if machine_mode in Machine.modes:
machine_mode = Machine.modes[machine_mode]
#end if
if machine_mode==Machine.modes.interactive:
if ProjectManager.machine==None:
ProjectManager.class_error('no machine specified for interactive mode')
#end if
if not isinstance(ProjectManager.machine,Supercomputer):
self.error('interactive mode is not supported for machine type '+ProjectManager.machine.__class__.__name__)
#end if
if not 'interactive_cores' in kw:
self.error('interactive mode requested, but interactive_cores not set')
#end if
ProjectManager.machine = ProjectManager.machine.interactive_representation(Pobj.interactive_cores)
del Pobj.interactive_cores
#end if
del Pobj.machine_mode
#end if
#end if
# process simulation settings
if 'local_directory' in kw:
Pobj.file_locations.append(kw.local_directory)
#end if
if 'skip_submit' in kw:
Pobj.skip_submission = Pobj.skip_submit
del Pobj.skip_submit
#end if
if 'file_locations' in kw:
fl = kw.file_locations
if isinstance(fl,str):
Pobj.file_locations.extend([fl])
else:
Pobj.file_locations.extend(list(fl))
#end if
#end if
if not 'pseudo_dir' in kw:
Pobj.pseudopotentials = Pseudopotentials()
else:
pseudo_dir = kw.pseudo_dir
Pobj.file_locations.append(pseudo_dir)
if not os.path.exists(pseudo_dir):
self.error('pseudo_dir "{0}" does not exist'.format(pseudo_dir),trace=False)
#end if
files = os.listdir(pseudo_dir)
ppfiles = []
for f in files:
pf = os.path.join(pseudo_dir,f)
if os.path.isfile(pf):
ppfiles.append(pf)
#end if
#end for
Pobj.pseudopotentials = Pseudopotentials(ppfiles)
#end if
# more simulation/project manager settings processing
mode = Pobj.mode
modes = Pobj.modes
if mode==modes.stages:
stages = Pobj.stages
elif mode==modes.all:
stages = list(Pobj.primary_modes)
else:
stages = [kw.mode]
#end if
allowed_stages = set(Pobj.primary_modes)
if isinstance(stages,str):
stages = [stages]
#end if
if len(stages)==0:
stages = list(Pobj.primary_modes)
#self.error('variable stages must be a list of primary modes.\n Options are '+str(list(allowed_stages)))
elif 'all' in stages:
stages = list(Pobj.primary_modes)
else:
forbidden = set(Pobj.stages)-allowed_stages
if len(forbidden)>0:
self.error('some stages provided are not primary stages.\n You provided '+str(list(forbidden))+'\n Options are '+str(list(allowed_stages)))
#end if
#end if
Pobj.mode = modes.stages
Pobj.stages = stages
Pobj.stages_set = set(Pobj.stages)
# process gamess settings
Gamess.settings(**gamess_kw)
return
#end def __call__
#end class Settings
settings = Settings()
def run_project(*args,**kwargs):
pm = ProjectManager()
pm.add_simulations(*args,**kwargs)
pm.run_project()
#end def run_project
|
habanero-rice/hcpp
|
test/performance-regression/full-apps/qmcpack/nexus/library/project.py
|
Python
|
bsd-3-clause
| 9,752
|
[
"GAMESS",
"QMCPACK",
"VASP"
] |
55a3001b5c70e7ea960edcdb97f960d0cac67ad11caa80ab22ad170a2387adcc
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import MDAnalysis as mda
import numpy as np
from MDAnalysisTests.datafiles import waterPSF, waterDCD
from MDAnalysis.analysis.lineardensity import LinearDensity
from numpy.testing import assert_almost_equal
def test_serial():
universe = mda.Universe(waterPSF, waterDCD)
sel_string = 'all'
selection = universe.select_atoms(sel_string)
xpos = np.array([0., 0., 0., 0.0072334, 0.00473299, 0.,
0., 0., 0., 0.])
ld = LinearDensity(selection, binsize=5).run()
assert_almost_equal(xpos, ld.results['x']['pos'])
|
MDAnalysis/mdanalysis
|
testsuite/MDAnalysisTests/analysis/test_lineardensity.py
|
Python
|
gpl-2.0
| 1,633
|
[
"MDAnalysis"
] |
488f2b868c30351a32acdff5510ca0056442961693bc98a5e6b5fa2c0afe36a3
|
import sys
import subprocess
from clint.textui import puts, indent, colored
from .base import DeweyCommand
from dewey.util import suppress_stdout_stderr
class Command(DeweyCommand):
def pre_default(self, *args, **kwargs):
pass
def run_command(self, *args, **kwargs):
try:
ps = subprocess.Popen(
"gulp dev",
close_fds=True, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd="app"
)
while ps.poll() is None:
line = ps.stdout.readline()
if "[BS] Serving files from: frontends/groundcontrol/build/web" in line:
print "Bootstrapping done. Launching browser."
subprocess.check_output("open http://localhost:8111", shell=True,)
sys.stdout.write(line)
sys.stdout.flush()
except KeyboardInterrupt:
print "\n\nShutting down."
# def run_command_oliver(self, *args, **kwargs):
# try:
# ps = subprocess.Popen(
# "npm run watch",
# close_fds=True, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
# )
# while ps.poll() is None:
# line = ps.stdout.readline()
# if "[BS] Serving files from: frontends/groundcontrol/build/web" in line:
# print "Bootstrapping done. Launching browser."
# subprocess.check_output("open http://localhost:8111", shell=True,)
# sys.stdout.write(line)
# sys.stdout.flush()
# except KeyboardInterrupt:
# print "\n\nShutting down."
def post_default(self, *args, **kwargs):
pass
|
buddyup/dewey
|
dewey/commands/up.py
|
Python
|
mit
| 1,768
|
[
"GULP"
] |
b450549d2256ff64dbde27ce791fa1f6d939bcfcac1256ae853ea73ab91a48ab
|
# ============================================================================
#
# Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4.QtCore import Qt
from customeditor import CustomEditor
from camelot.core import constants
from camelot.view.art import Icon
from camelot.view.proxy import ValueLoading
from camelot.view.controls.editors.floateditor import CustomDoubleSpinBox
class ColoredFloatEditor(CustomEditor):
"""Widget for editing a float field, with a calculator"""
def __init__(self,
parent,
precision=2,
reverse=False,
neutral=False,
**kwargs):
CustomEditor.__init__(self, parent)
action = QtGui.QAction(self)
action.setShortcut(Qt.Key_F3)
self.setFocusPolicy(Qt.StrongFocus)
self.spinBox = CustomDoubleSpinBox(parent)
self.spinBox.setDecimals(precision)
self.spinBox.setAlignment(Qt.AlignRight|Qt.AlignVCenter)
self.spinBox.addAction(action)
self.spinBox.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
self.arrow = QtGui.QLabel()
self.arrow.setPixmap(Icon('tango/16x16/actions/go-up.png').getQPixmap())
self.arrow.setFixedHeight(self.get_height())
self.arrow.setAutoFillBackground(False)
self.arrow.setMaximumWidth(19)
self.calculatorButton = QtGui.QToolButton()
icon = Icon('tango/16x16/apps/accessories-calculator.png').getQIcon()
self.calculatorButton.setIcon(icon)
self.calculatorButton.setAutoRaise(True)
self.calculatorButton.setFixedHeight(self.get_height())
self.calculatorButton.clicked.connect(
lambda:self.popupCalculator(self.spinBox.value())
)
action.triggered.connect(
lambda:self.popupCalculator(self.spinBox.value())
)
self.spinBox.editingFinished.connect( self.spinbox_editing_finished )
self.releaseKeyboard()
layout = QtGui.QHBoxLayout()
layout.setMargin(0)
layout.setSpacing(0)
layout.addSpacing(3.5)
layout.addWidget(self.arrow)
layout.addWidget(self.spinBox)
layout.addWidget(self.calculatorButton)
self.reverse = reverse
self.neutral = neutral
self.setFocusProxy(self.spinBox)
self.setLayout(layout)
if not self.reverse:
if not self.neutral:
self.icons = {
-1:Icon('tango/16x16/actions/go-down-red.png').getQPixmap(),
1:Icon('tango/16x16/actions/go-up.png').getQPixmap(),
0:Icon('tango/16x16/actions/zero.png').getQPixmap()
}
else:
self.icons = {
-1:Icon('tango/16x16/actions/go-down-blue.png').getQPixmap(),
1:Icon('tango/16x16/actions/go-up-blue.png').getQPixmap(),
0:Icon('tango/16x16/actions/zero.png').getQPixmap()
}
else:
self.icons = {
1:Icon('tango/16x16/actions/go-down-red.png').getQPixmap(),
-1:Icon('tango/16x16/actions/go-up.png').getQPixmap(),
0:Icon('tango/16x16/actions/zero.png').getQPixmap()
}
def set_field_attributes(self,
editable=True,
background_color=None,
prefix='',
suffix='',
minimum=constants.camelot_minfloat,
maximum=constants.camelot_maxfloat,
single_step=1.0,
**kwargs):
self.set_enabled(editable)
self.set_background_color(background_color)
self.spinBox.setPrefix(u'%s '%(unicode(prefix).lstrip()))
self.spinBox.setSuffix(u' %s'%(unicode(suffix).rstrip()))
self.spinBox.setRange(minimum, maximum)
self.spinBox.setSingleStep(single_step)
def set_enabled(self, editable=True):
self.spinBox.setReadOnly(not editable)
self.spinBox.setEnabled(editable)
self.calculatorButton.setShown(editable)
if editable:
self.spinBox.setButtonSymbols(QtGui.QAbstractSpinBox.UpDownArrows)
else:
self.spinBox.setButtonSymbols(QtGui.QAbstractSpinBox.NoButtons)
def set_value(self, value):
value = CustomEditor.set_value(self, value) or 0.0
self.spinBox.setValue(value)
self.arrow.setPixmap(self.icons[cmp(value,0)])
def get_value(self):
self.spinBox.interpretText()
value = self.spinBox.value()
return CustomEditor.get_value(self) or value
def popupCalculator(self, value):
from camelot.view.controls.calculator import Calculator
calculator = Calculator()
calculator.setValue(value)
calculator.calculation_finished_signal.connect( self.calculation_finished )
calculator.exec_()
def calculation_finished(self, value):
self.spinBox.setValue(float(unicode(value)))
self.editingFinished.emit()
@QtCore.pyqtSlot()
def spinbox_editing_finished(self):
self.editingFinished.emit()
def set_background_color(self, background_color):
if background_color not in (None, ValueLoading):
selfpalette = self.spinBox.palette()
sbpalette = self.spinBox.palette()
lepalette = self.spinBox.lineEdit().palette()
for x in [QtGui.QPalette.Active, QtGui.QPalette.Inactive, QtGui.QPalette.Disabled]:
for y in [self.backgroundRole(), QtGui.QPalette.Window, QtGui.QPalette.Base]:
selfpalette.setColor(x, y, background_color)
for y in [self.spinBox.backgroundRole(), QtGui.QPalette.Window, QtGui.QPalette.Base]:
sbpalette.setColor(x, y, background_color)
for y in [self.spinBox.lineEdit().backgroundRole(), QtGui.QPalette.Window, QtGui.QPalette.Base]:
lepalette.setColor(x, y, background_color)
self.setPalette(selfpalette)
self.spinBox.setPalette(sbpalette)
self.spinBox.lineEdit().setPalette(lepalette)
return True
else:
return False
|
kurtraschke/camelot
|
camelot/view/controls/editors/coloredfloateditor.py
|
Python
|
gpl-2.0
| 7,294
|
[
"VisIt"
] |
320b11284ad0264772bc9ad4fdd739ca2eb6633e4656c37110bbf17ef9db98a8
|
#!/usr/bin/env @PYTHON_EXECUTABLE@
"""
Description: Viewer for Siconos mechanics-IO HDF5 files based on VTK.
"""
from siconos.io.vview import VView, VViewConfig, VViewOptions
from siconos.io.mechanics_hdf5 import MechanicsHdf5
if __name__=='__main__':
## Persistent configuration
config = VViewConfig()
# Load it immediately
config.load_configuration()
# Parse command-line
opts = VViewOptions()
opts.parse()
## Options and config already loaded above
with MechanicsHdf5(io_filename=opts.io_filename, mode='r') as io:
vview = VView(io, opts, config)
vview.run()
# Update configuration and save it
config['window_size'] = vview.renderer_window.GetSize()
config.save_configuration(force=False)
|
siconos/siconos
|
io/swig/io/siconos_vview.py
|
Python
|
apache-2.0
| 762
|
[
"VTK"
] |
125409c2518f8f99210d2d96bc4d867117b6594aa123d3264c79d61af85ed208
|
# $Id$
#
# Copyright (C) 2007 by Greg Landrum
# All rights reserved
#
from rdkit import Chem,Geometry
from rdkit.Chem import AllChem
from rdkit.Chem.Subshape import SubshapeObjects
from rdkit.Chem.Subshape import BuilderUtils
from rdkit.six.moves import cPickle
import time
#-----------------------------------------------------------------------------
class SubshapeCombineOperations(object):
UNION=0
SUM=1
INTERSECT=2
#-----------------------------------------------------------------------------
class SubshapeBuilder(object):
gridDims=(20,15,10)
gridSpacing=0.5
winRad=3.0
nbrCount=7
terminalPtRadScale=0.75
fraction=0.25
stepSize=1.0
featFactory=None
def SampleSubshape(self,subshape1,newSpacing):
ogrid=subshape1.grid
rgrid = Geometry.UniformGrid3D(self.gridDims[0],self.gridDims[1],self.gridDims[2],
newSpacing)
for idx in range(rgrid.GetSize()):
l = rgrid.GetGridPointLoc(idx)
v = ogrid.GetValPoint(l)
rgrid.SetVal(idx,v)
res = SubshapeObjects.ShapeWithSkeleton()
res.grid = rgrid
return res;
def GenerateSubshapeShape(self,cmpd,confId=-1,addSkeleton=True,**kwargs):
shape = SubshapeObjects.ShapeWithSkeleton()
shape.grid=Geometry.UniformGrid3D(self.gridDims[0],self.gridDims[1],self.gridDims[2],
self.gridSpacing)
AllChem.EncodeShape(cmpd,shape.grid,ignoreHs=False,confId=confId)
if addSkeleton:
conf = cmpd.GetConformer(confId)
self.GenerateSubshapeSkeleton(shape,conf,kwargs)
return shape
def __call__(self,cmpd,**kwargs):
return self.GenerateSubshapeShape(cmpd,**kwargs)
def GenerateSubshapeSkeleton(self,shape,conf=None,terminalPtsOnly=False,skelFromConf=True):
if conf and skelFromConf:
pts = BuilderUtils.FindTerminalPtsFromConformer(conf,self.winRad,self.nbrCount)
else:
pts = BuilderUtils.FindTerminalPtsFromShape(shape,self.winRad,self.fraction)
pts = BuilderUtils.ClusterTerminalPts(pts,self.winRad,self.terminalPtRadScale)
BuilderUtils.ExpandTerminalPts(shape,pts,self.winRad)
if len(pts)<3:
raise ValueError('only found %d terminals, need at least 3'%len(pts))
if not terminalPtsOnly:
pts = BuilderUtils.AppendSkeletonPoints(shape.grid,pts,self.winRad,self.stepSize)
for i,pt in enumerate(pts):
BuilderUtils.CalculateDirectionsAtPoint(pt,shape.grid,self.winRad)
if conf and self.featFactory:
BuilderUtils.AssignMolFeatsToPoints(pts,conf.GetOwningMol(),self.featFactory,self.winRad)
shape.skelPts=pts
def CombineSubshapes(self,subshape1,subshape2,operation=SubshapeCombineOperations.UNION):
import copy
cs = copy.deepcopy(subshape1)
if operation==SubshapeCombineOperations.UNION:
cs.grid |= subshape2.grid
elif operation==SubshapeCombineOperations.SUM:
cs.grid += subshape2.grid
elif operation==SubshapeCombineOperations.INTERSECT:
cs.grid &= subshape2.grid
else:
raise ValueError('bad combination operation')
return cs
if __name__=='__main__':
from rdkit.Chem import AllChem,ChemicalFeatures
from rdkit.Chem.PyMol import MolViewer
#cmpd = Chem.MolFromSmiles('CCCc1cc(C(=O)O)ccc1')
#cmpd = Chem.AddHs(cmpd)
if 1:
cmpd = Chem.MolFromSmiles('C1=CC=C1C#CC1=CC=C1')
cmpd = Chem.AddHs(cmpd)
AllChem.EmbedMolecule(cmpd)
AllChem.UFFOptimizeMolecule(cmpd)
AllChem.CanonicalizeMol(cmpd)
print >>file('testmol.mol','w+'),Chem.MolToMolBlock(cmpd)
else:
cmpd = Chem.MolFromMolFile('testmol.mol')
builder=SubshapeBuilder()
if 1:
shape=builder.GenerateSubshapeShape(cmpd)
v = MolViewer()
if 1:
import tempfile
tmpFile = tempfile.mktemp('.grd')
v.server.deleteAll()
Geometry.WriteGridToFile(shape.grid,tmpFile)
time.sleep(1)
v.ShowMol(cmpd,name='testMol',showOnly=True)
v.server.loadSurface(tmpFile,'testGrid','',2.5)
v.server.resetCGO('*')
cPickle.dump(shape,file('subshape.pkl','w+'))
for i,pt in enumerate(shape.skelPts):
v.server.sphere(tuple(pt.location),.5,(1,0,1),'Pt-%d'%i)
if not hasattr(pt,'shapeDirs'): continue
momBeg = pt.location-pt.shapeDirs[0]
momEnd = pt.location+pt.shapeDirs[0]
v.server.cylinder(tuple(momBeg),tuple(momEnd),.1,(1,0,1),'v-%d'%i)
|
soerendip42/rdkit
|
rdkit/Chem/Subshape/SubshapeBuilder.py
|
Python
|
bsd-3-clause
| 4,317
|
[
"PyMOL",
"RDKit"
] |
248c77e7343978e52a07f17c0eb4d21cbc9df54c0c300b22c5a3c834e696befe
|
import json
import random
import string
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.http import JsonResponse
from django.shortcuts import render, redirect
import haikunator
from .models import Room
from services.GroupMembers import *
def about(request):
return render(request, "chat/about.html")
def new_room(request):
"""
Randomly create a new room, and redirect to it.
"""
new_room = None
while not new_room:
with transaction.atomic():
label = haikunator.haikunate()
if Room.objects.filter(label=label).exists():
continue
new_room = Room.objects.create(label=label, actor=request.user)
return redirect(chat_room, label=label)
def chat_room(request, label):
"""
Room view - show the room, with latest messages.
The template for this view has the WebSocket business to send and stream
messages, so see the template for where the magic happens.
"""
# If the room with the given label doesn't exist, automatically create it
# upon first visit (a la etherpad).
room, created = Room.objects.get_or_create(label=label)
members = get_all_member_info(room)
# Check if user already in room
if not request.user in members:
print 'in place'
room.members.add(request.user)
members = get_all_member_info(room)
# We want to show the last 50 messages, ordered most-recent-last
messages = reversed(room.messages.order_by('-timestamp')[:50])
return render(request, "chat/room.html", {
'room': room,
'messages': messages,
'members': members,
'actor': room.actor
})
def home(request):
return HttpResponse('You are logged in')
|
TranDinhKhang/funkychat
|
chat/views.py
|
Python
|
bsd-3-clause
| 1,898
|
[
"VisIt"
] |
f2f72e65b14798144d28a6068f5f6623d581d9230c2f49238d10c0bc3d2a049f
|
"""Pipeline functionality shared amongst multiple analysis types.
"""
import os
from contextlib import closing, contextmanager
import fileinput
import functools
import tempfile
import pybedtools
import pysam
import toolz as tz
from bcbio import bam, broad, utils
from bcbio.bam import ref
from bcbio.pipeline import datadict as dd
from bcbio.pipeline import config_utils
from bcbio.utils import file_exists, safe_makedir, save_diskspace
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.provenance import do
# ## Split/Combine helpers
def combine_bam(in_files, out_file, config):
"""Parallel target to combine multiple BAM files.
"""
runner = broad.runner_from_path("picard", config)
runner.run_fn("picard_merge", in_files, out_file)
for in_file in in_files:
save_diskspace(in_file, "Merged into {0}".format(out_file), config)
bam.index(out_file, config)
return out_file
def process_bam_by_chromosome(output_ext, file_key, default_targets=None, remove_alts=False):
"""Provide targets to process a BAM file by individual chromosome regions.
output_ext: extension to supply to output files
file_key: the key of the BAM file in the input data map
default_targets: a list of extra chromosome targets to process, beyond those specified
in the BAM file. Useful for retrieval of non-mapped reads.
remove_alts: Do not process alternative alleles.
"""
if default_targets is None:
default_targets = []
def _do_work(data):
ignore_chroms = set(_get_alt_chroms(data) if remove_alts else [])
bam_file = data[file_key]
out_dir = tz.get_in(["dirs", "out"], data, os.path.dirname(bam_file))
out_file = os.path.join(out_dir, "{base}{ext}".format(
base=os.path.splitext(os.path.basename(bam_file))[0],
ext=output_ext))
part_info = []
if not file_exists(out_file):
work_dir = safe_makedir(
"{base}-split".format(base=os.path.splitext(out_file)[0]))
with closing(pysam.Samfile(bam_file, "rb")) as work_bam:
for chr_ref in list(work_bam.references) + default_targets:
if chr_ref not in ignore_chroms:
chr_out = os.path.join(work_dir,
"{base}-{ref}{ext}".format(
base=os.path.splitext(os.path.basename(bam_file))[0],
ref=chr_ref, ext=output_ext))
part_info.append((chr_ref, chr_out))
return out_file, part_info
return _do_work
def _get_alt_chroms(data):
"""Retrieve alternative contigs as defined in bwa *.alts files.
If no alt files present (when we're not aligning with bwa), work around
with standard set of alts based on hg38 -- anything with HLA, _alt or
_decoy in the name.
"""
alts = []
alt_files = [f for f in tz.get_in(["reference", "bwa", "indexes"], data, []) if f.endswith("alt")]
if alt_files:
for alt_file in alt_files:
with open(alt_file) as in_handle:
for line in in_handle:
if not line.startswith("@"):
alts.append(line.split()[0].strip())
else:
for contig in ref.file_contigs(dd.get_ref_file(data)):
if ("_alt" in contig.name or "_decoy" in contig.name or
contig.name.startswith("HLA-") or ":" in contig.name):
alts.append(contig.name)
return alts
def write_nochr_reads(in_file, out_file, config):
"""Write a BAM file of reads that are not mapped on a reference chromosome.
This is useful for maintaining non-mapped reads in parallel processes
that split processing by chromosome.
"""
if not file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
samtools = config_utils.get_program("samtools", config)
cmd = "{samtools} view -b -f 4 {in_file} > {tx_out_file}"
do.run(cmd.format(**locals()), "Select unmapped reads")
return out_file
def write_noanalysis_reads(in_file, region_file, out_file, config):
"""Write a BAM file of reads in the specified region file that are not analyzed.
We want to get only reads not in analysis regions but also make use of
the BAM index to perform well on large files. The tricky part is avoiding
command line limits. There is a nice discussion on SeqAnswers:
http://seqanswers.com/forums/showthread.php?t=29538
sambamba supports intersection via an input BED file so avoids command line
length issues.
"""
if not file_exists(out_file):
with file_transaction(config, out_file) as tx_out_file:
bedtools = config_utils.get_program("bedtools", config)
sambamba = config_utils.get_program("sambamba", config)
cl = ("{sambamba} view -f bam -L {region_file} {in_file} | "
"{bedtools} intersect -abam - -b {region_file} -f 1.0 -nonamecheck"
"> {tx_out_file}")
do.run(cl.format(**locals()), "Select unanalyzed reads")
return out_file
def subset_bam_by_region(in_file, region, config, out_file_base=None):
"""Subset BAM files based on specified chromosome region.
"""
if out_file_base is not None:
base, ext = os.path.splitext(out_file_base)
else:
base, ext = os.path.splitext(in_file)
out_file = "%s-subset%s%s" % (base, region, ext)
if not file_exists(out_file):
with closing(pysam.Samfile(in_file, "rb")) as in_bam:
target_tid = in_bam.gettid(region)
assert region is not None, \
"Did not find reference region %s in %s" % \
(region, in_file)
with file_transaction(config, out_file) as tx_out_file:
with closing(pysam.Samfile(tx_out_file, "wb", template=in_bam)) as out_bam:
for read in in_bam:
if read.tid == target_tid:
out_bam.write(read)
return out_file
def subset_bed_by_chrom(in_file, chrom, data, out_dir=None):
"""Subset a BED file to only have items from the specified chromosome.
"""
if out_dir is None:
out_dir = os.path.dirname(in_file)
base, ext = os.path.splitext(os.path.basename(in_file))
out_file = os.path.join(out_dir, "%s-%s%s" % (base, chrom, ext))
if not utils.file_uptodate(out_file, in_file):
with file_transaction(data, out_file) as tx_out_file:
_rewrite_bed_with_chrom(in_file, tx_out_file, chrom)
return out_file
def _rewrite_bed_with_chrom(in_file, out_file, chrom):
with open(in_file) as in_handle:
with open(out_file, "w") as out_handle:
for line in in_handle:
if line.startswith("%s\t" % chrom):
out_handle.write(line)
def _subset_bed_by_region(in_file, out_file, region, do_merge=True):
orig_bed = pybedtools.BedTool(in_file)
region_bed = pybedtools.BedTool("\t".join(str(x) for x in region) + "\n", from_string=True)
if do_merge:
orig_bed.intersect(region_bed, nonamecheck=True).filter(lambda x: len(x) > 1).merge().saveas(out_file)
else:
orig_bed.intersect(region_bed, nonamecheck=True).filter(lambda x: len(x) > 1).saveas(out_file)
def get_lcr_bed(items):
lcr_bed = utils.get_in(items[0], ("genome_resources", "variation", "lcr"))
do_lcr = any([utils.get_in(data, ("config", "algorithm", "remove_lcr"), False)
for data in items])
if do_lcr and lcr_bed and os.path.exists(lcr_bed):
return lcr_bed
def remove_lcr_regions(orig_bed, items):
"""If configured and available, update a BED file to remove low complexity regions.
"""
lcr_bed = get_lcr_bed(items)
if lcr_bed:
nolcr_bed = os.path.join("%s-nolcr.bed" % (utils.splitext_plus(orig_bed)[0]))
with file_transaction(items[0], nolcr_bed) as tx_nolcr_bed:
with bedtools_tmpdir(items[0]):
pybedtools.BedTool(orig_bed).subtract(pybedtools.BedTool(lcr_bed), nonamecheck=True).\
saveas(tx_nolcr_bed)
# If we have a non-empty file, convert to the LCR subtracted for downstream analysis
if utils.file_exists(nolcr_bed):
orig_bed = nolcr_bed
return orig_bed
def remove_highdepth_regions(in_file, items):
"""Remove high depth regions from a BED file for analyzing a set of calls.
Tries to avoid spurious errors and slow run times in collapsed repeat regions.
Also adds ENCODE blacklist regions which capture additional collapsed repeats
around centromeres.
"""
from bcbio.variation import bedutils
highdepth_beds = filter(lambda x: x is not None,
list(set([tz.get_in(["config", "algorithm", "highdepth_regions"], x) for x in items])))
encode_bed = tz.get_in(["genome_resources", "variation", "encode_blacklist"], items[0])
if encode_bed and os.path.exists(encode_bed):
highdepth_beds.append(encode_bed)
out_file = "%s-glimit%s" % utils.splitext_plus(in_file)
if not utils.file_uptodate(out_file, in_file):
with file_transaction(items[0], out_file) as tx_out_file:
with bedtools_tmpdir(items[0]):
all_file = "%s-all.bed" % utils.splitext_plus(tx_out_file)[0]
if len(highdepth_beds) > 0:
with open(all_file, "w") as out_handle:
for line in fileinput.input(highdepth_beds):
parts = line.split("\t")
out_handle.write("\t".join(parts[:4]).rstrip() + "\n")
if utils.file_exists(all_file):
to_remove = bedutils.sort_merge(all_file, items[0])
cmd = "bedtools subtract -nonamecheck -a {in_file} -b {to_remove} > {tx_out_file}"
do.run(cmd.format(**locals()), "Remove high depth regions")
else:
utils.symlink_plus(in_file, out_file)
return out_file
@contextmanager
def bedtools_tmpdir(data):
with tx_tmpdir(data) as tmpdir:
orig_tmpdir = tempfile.gettempdir()
pybedtools.set_tempdir(tmpdir)
yield
if orig_tmpdir and os.path.exists(orig_tmpdir):
pybedtools.set_tempdir(orig_tmpdir)
else:
tempfile.tempdir = None
def subtract_low_complexity(f):
"""Remove low complexity regions from callable regions if available.
"""
@functools.wraps(f)
def wrapper(variant_regions, region, out_file, items=None, do_merge=True):
region_bed = f(variant_regions, region, out_file, items, do_merge)
if region_bed and isinstance(region_bed, basestring) and os.path.exists(region_bed) and items:
region_bed = remove_lcr_regions(region_bed, items)
return region_bed
return wrapper
@subtract_low_complexity
def subset_variant_regions(variant_regions, region, out_file, items=None, do_merge=True):
"""Return BED file subset by a specified chromosome region.
variant_regions is a BED file, region is a chromosome name or tuple
of (name, start, end) for a genomic region.
"""
if region is None:
return variant_regions
elif variant_regions is None:
return region
elif not isinstance(region, (list, tuple)) and region.find(":") > 0:
raise ValueError("Partial chromosome regions not supported")
else:
merge_text = "-unmerged" if not do_merge else ""
subset_file = "{0}".format(utils.splitext_plus(out_file)[0])
subset_file += "%s-regions.bed" % (merge_text)
if not os.path.exists(subset_file):
with file_transaction(items[0] if items else None, subset_file) as tx_subset_file:
if isinstance(region, (list, tuple)):
_subset_bed_by_region(variant_regions, tx_subset_file, region, do_merge = do_merge)
else:
_rewrite_bed_with_chrom(variant_regions, tx_subset_file, region)
if os.path.getsize(subset_file) == 0:
return region
else:
return subset_file
|
mjafin/bcbio-nextgen
|
bcbio/pipeline/shared.py
|
Python
|
mit
| 12,312
|
[
"BWA",
"pysam"
] |
c4c44ff5e41ffc9f87ff0cf379e8b5dbba01ff4c67d885522e445e51d79adbb5
|
#!/usr/bin/python
import sys, os, argparse, subprocess, shlex, glob
from datetime import datetime
from multiprocessing import cpu_count
from multiprocessing.dummy import Pool as ThreadPool
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Alphabet import IUPAC
################################################################################
# This script finds core genes from a a list of genomes and a groups file in
# the format output by OrthoMCL. It aligns the core genes and makes a tree from
# the alignment. It uses Lazarus to run PAML ancestral reconstruction, and it
# concatenates the ancestral gene sequences.
#
# Program Requirements: translatorX, mafft, lazarus, raxml, biopython
# Input: OrthoMCL groups file, list of genomes (outgroup last), nucleotide
# sequences for genes in the genomes
################################################################################
TRANSLATOR_X_PATH = "/opt/PepPrograms/translatorx_vLocal.pl"
LAZARUS_PATH = "/opt/PepPrograms/project-lazarus/lazarus.py"
RAXML_PATH = "/opt/PepPrograms/standard-RAxML/raxmlHPC-PTHREADS-AVX"
class FullPaths(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest,
os.path.abspath(os.path.expanduser(values)))
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
def is_dir(dirname):
"""Checks if a path is a directory"""
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
def is_file(filename):
"""Checks if a file exists"""
if not os.path.isfile(filename):
msg = "{0} is not a file".format(filename)
raise argparse.ArgumentTypeError(msg)
else:
return filename
def get_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Ancestral reconstruction of\
core genome')
parser.add_argument("groups", help="OrthoMCL groups file", action=FullPaths,
type=is_file)
parser.add_argument("genomes", help="File listing genomes to be included in\
the analysis- outgroup last", action=FullPaths, type=is_file)
parser.add_argument("genes",
help="Directory with .fasta files of nucleotide sequences for genomes",
action=FullPaths, type=is_dir)
parser.add_argument("-t", "--threads",
help="Number of threads to use (default: 2)",
type=int, default=2, choices=range(2, cpu_count()))
return parser.parse_args()
def check_paths():
for i in [TRANSLATOR_X_PATH, LAZARUS_PATH, RAXML_PATH]:
if not os.path.isfile(i):
msg = "{0} does not exist".format(i)
print msg
sys.exit()
def call_with_log(cmd):
"""Calls a system command with the subprocess module. Redirects both stdout
and stderr to a log file"""
cmd = cmd.format(**(kvmap))
logfile = open(wd + current_datetime+".log", "a+")
logfile.write("Executing command: " + cmd + "\n")
logfile.flush()
ret = subprocess.call(shlex.split(cmd), stdout=logfile, stderr=logfile)
if(ret != 0):
print("Pipeline did not complete successfully. \n Command : \n\n" +
cmd + "\n\n returned with non-zero code: " + str(ret))
logfile.close()
def read_groups_file(inFileName):
""" Read in groups file and create dictionary of group name and proteins in
group"""
print "Reading groups file"
inFile = open(inFileName, 'r')
groupsDict = {}
for line in inFile:
line = line.strip()
entries = line.split(':')
groupName = entries[0]
groupProteins = entries[1][1:].split(' ')
groupsDict[groupName] = groupProteins
inFile.close()
print len(groupsDict)
return groupsDict
def get_core_genes(groupsDict, genomes):
""" Gets core genes for genomes in list """
coreGenes = set()
for group in groupsDict:
genomeList = []
proteinList = groupsDict[group]
for protein in proteinList:
ids = protein.split('|')
genomeID = ids[0]
genomeList.append(genomeID)
genomeSet = set(genomeList)
if set(genomes.keys()).issubset(genomeSet):
if len(genomeList) == len(genomeSet):
coreGenes.add(group)
print len(coreGenes)
return coreGenes
def make_unaligned_fasta(dnaDirectory, groupsDict, coreGenes, genomes, og):
""" Reads through files in provided directory to find gene sequences that
match the proteins in the groups dictionary"""
print "Collecting core genes"
def make_fasta(group):
proteins = groupsDict[group]
out = open(group + '/' + group + '.fasta', 'w')
records = []
outgroup_gene = og
ingroup_genes = []
for protein in proteins:
seqID = protein.split('|')[0]
if seqID in genomes:
protein = protein.split('|')[1]
newRec = seqRecordDict[protein]
newRec.description = ""
records.append(newRec)
if og in newRec.id:
outgroup_gene = newRec.id
else:
ingroup_genes.append(newRec.id)
SeqIO.write(records, out, 'fasta')
return (group, ingroup_genes, outgroup_gene)
files = listdir_fullpath(dnaDirectory)
seqRecordDict = {}
seqIDs = []
for f in files:
handle = open(f, 'r')
for record in SeqIO.parse(handle, 'fasta'):
seqRecordDict[record.id] = record
pool = ThreadPool(args.threads)
seqIDs = pool.map(make_fasta, coreGenes)
pool.close()
pool.join()
return seqIDs
def align_gene_sequences(coreGenes):
""" Use MAFFT to align gene sequences"""
print "Aligning core genes"
def run_translatorX(infile):
outfile = "%s/alignment/%s" % (os.path.dirname(infile),
os.path.splitext(os.path.split(infile)[1])[0])
call_with_log(TRANSLATOR_X_PATH + " -i %s -o %s -p F"
% (infile, outfile))
return outfile + ".nt_ali.fasta"
files = [x + "/" + x + ".fasta" for x in coreGenes]
pool = ThreadPool(args.threads)
outfileList = pool.map(run_translatorX, files)
pool.close()
pool.join()
def make_trees(coreGenes):
""" Use RAxML to calculate maximum likelihood phylogeny """
print "Running RAxML"
def run_raxml(coreGene):
alignFile = "%s/alignment/%s.nt_ali.fasta" % (coreGene, coreGene)
outdir = os.path.abspath("%s/tree/" % coreGene)
name = "ml_" + os.path.splitext(os.path.basename(alignFile))[0]
call_with_log(RAXML_PATH + " -T 2 -m GTRGAMMA -# 20 -p 123 -s %s -w %s \
-n %s" % (alignFile, outdir, name))
return name
pool = ThreadPool(args.threads/2)
outNames = pool.map(run_raxml, coreGenes)
pool.close()
pool.join()
def ancestral_reconstruction(outgroup_genes):
""" Use Lazarus wrapper to run paml ancestral reconstruction """
print "Ancestral Reconstruction"
def run_lazarus(outgroup_gene):
coreGene, ingroup, outgroup = outgroup_gene
align = "%s/alignment/%s.nt_ali.fasta" % (coreGene, coreGene)
tree = "%s/tree/RAxML_bestTree.ml_%s.nt_ali" % (coreGene, coreGene)
model = "/opt/PepPrograms/paml4.8/dat/wag.dat"
outdir = os.path.abspath("%s/ancestral/" % coreGene)
call_with_log(LAZARUS_PATH + " --codeml --outputdir %s --verbose 9 \
--alignment %s --tree %s --model %s --asrv 4 --gapcorrect --getanc --ingroup %s\
--outgroup %s" % (outdir, align, tree, model, "[%s]" % (",".join(ingroup)),
"[%s]" % (outgroup)))
pool = ThreadPool(args.threads)
pool.map(run_lazarus, outgroup_genes)
pool.close()
pool.join()
def concatenate_genes(coreGenes):
""" Using Biopython, concatenate ancestral genes into one file"""
print "Concatenating ancestral reconstructions"
def parse_lazarus_output(coreGene):
ancRecFile = open("%s/ancestral/ancestor.out.txt" % coreGene, "r")
for i,line in enumerate(ancRecFile):
if i == 13:
record = SeqRecord(Seq(line.strip(), IUPAC.ambiguous_dna),
id=coreGene, description="")
return record
pool = ThreadPool(args.threads)
recs = pool.map(parse_lazarus_output, coreGenes)
SeqIO.write(recs, "ancestralGenes.fa", "fasta")
check_paths()
args = get_args()
current_datetime = datetime.today().strftime("%d-%m-%Y-%H%M")
wd = os.getcwd() + "/"
kvmap = {'projectname':'coreAlignment'}
genomes = {}
orderedGenomes = []
with open(args.genomes, "r") as inFile:
for line in inFile:
genomes[line.strip()[-4:]] = line.strip()
orderedGenomes.append(line.strip())
og = orderedGenomes[-1]
groupsDict = read_groups_file(args.groups)
coreGenes = get_core_genes(groupsDict, genomes)
for n in coreGenes:
try:
os.mkdir(n)
os.mkdir(n + "/alignment")
os.mkdir(n + "/tree")
os.mkdir(n + "/ancestral")
except OSError as e:
if e.errno != os.errno.EEXIST:
raise
outgroup_genes = make_unaligned_fasta(args.genes, groupsDict, coreGenes,
genomes, og)
align_gene_sequences(coreGenes)
make_trees(coreGenes)
ancestral_reconstruction(outgroup_genes)
concatenate_genes(coreGenes)
|
tatumdmortimer/popgen-stats
|
ancestralReconstruction.py
|
Python
|
mit
| 9,433
|
[
"Biopython"
] |
548b8564f9ffd04120d0fd0d67648c511c0e40abd5c85f7a39eba4b946ba46bd
|
'''
Path: magpy.lib.magpy_formats
Part of package: stream (read/write)
Type: Input tester, call to read library
PURPOSE:
Tests which format a file has using the read library.
CONTAINS:
isFormat: (Func) Runs through datatypes in library to find fitting type.
readFormat: (Func) When format is found, reads data file into DataStream.
writeFormat: (Func) Writes DataStream object to given format.
DEPENDENCIES:
magpy.lib...
.format_gsm19
.format_didd
.format_gdas
.format_lemi
.format_pos1
#.format_env05
.format_cr800
.format_covjson
.format_iono
.format_iaga02
.format_wdc
.format_magpy
.format_noaa
.format_latex
.format_wik
.format_wic
.format_sfs
.format_bdv
.format_dtu
.format_gfz
.format_imf
.format_rcs
.format_json
.format_pha
CALLED BY:
magpy.stream.read()
magpy.stream.write()
'''
from __future__ import print_function
from magpy.stream import *
import logging
logger = logging.getLogger(__name__)
# IMPORT INSTRUMENT SPECIFIC DATA FORMATS:
from magpy.lib.format_gsm19 import *
from magpy.lib.format_didd import *
from magpy.lib.format_gdas import *
from magpy.lib.format_lemi import *
from magpy.lib.format_pos1 import *
from magpy.lib.format_qspin import *
#from magpy.lib.format_env05 import *
from magpy.lib.format_cr800 import *
from magpy.lib.format_iono import *
# IMPORT GENERAL PURPOSE FORMATS:
from magpy.lib.format_iaga02 import *
from magpy.lib.format_wdc import *
from magpy.lib.format_magpy import *
from magpy.lib.format_noaa import *
from magpy.lib.format_nc import isNETCDF, readNETCDF
from magpy.lib.format_latex import *
from magpy.lib.format_covjson import *
from magpy.lib.format_json import *
# IMPORT OBSERVATORY/GROUP SPECIFIC FORMATS:
from magpy.lib.format_wik import *
from magpy.lib.format_wic import *
from magpy.lib.format_sfs import *
from magpy.lib.format_bdv import *
from magpy.lib.format_dtu import *
from magpy.lib.format_gfz import *
from magpy.lib.format_gfztmp import *
from magpy.lib.format_neic import *
from magpy.lib.format_rcs import *
from magpy.lib.format_pha import *
from magpy.lib.format_basiccsv import *
from magpy.lib.format_imf import *
try:
from magpy.lib.format_autodif_fread import *
except:
logging.warning("magpy-formats: Format package autodif-F not available")
try:
## Overriding:
## -> format_imf ImagCDF method in case of cdflib available
## -> format_magpy PYCDF method in case of cdflib available
from magpy.lib.format_imagcdf import *
from magpy.lib.format_magpycdf import *
from magpy.lib.format_acecdf import *
# please note: magpycdf and acecdf replace the earlier combined method in magpy
except:
logging.warning("magpy-formats: cdflib not available")
IAFMETA = {'StationInstitution':'word', 'StationName':'word', 'StationIAGAcode':'word', 'DataAcquisitionLatitude':'word', 'DataAcquisitionLongitude':'word', 'DataElevation':'word', 'DataFormat':'word', 'DataComponents':'word', 'DataSensorOrientation':'word', 'DataDigitalSampling':'word', 'DataSamplingFilter':'word', 'Data Type':'word', 'DataPublicationLevel':'word', 'DataConversion':'word', 'StationK9':'word', 'DataQuality':'word', 'SensorType':'word', 'StationStreet':'word', 'StationCity':'word', 'StationPostalCode':'word', 'StationCountry':'word', 'StationWebInfo':'word', 'StationEmail':'word'}
IAFBINMETA = {'StationInstitution':'word', 'DataAcquisitionLatitude':'word', 'DataAcquisitionLongitude':'word', 'DataElevation':'word', 'DataComponents':'word', 'DataSensorOrientation':'word', 'DataDigitalSampling':'word', 'DataConversion':'word', 'StationK9':'word', 'DataQuality':'word', 'SensorType':'word', 'StationID':'word'}
IAGAMETA = {'StationInstitution':'word', 'StationName':'word', 'StationIAGAcode':'word', 'DataAcquisitionLatitude':'word', 'DataAcquisitionLongitude':'word', 'DataElevation':'word', 'DataFormat':'word', 'DataComponents':'word', 'DataSensorOrientation':'word', 'DataDigitalSampling':'word', 'DataSamplingFilter':'word', 'DataPublicationLevel':'word'}
IMAGCDFMETA = {'StationInstitution':'word', 'DataPublicationLevel':'number', 'DataStandardLevel':'word', 'StationIAGAcode':'word', 'StationName':'word', 'StationInstitution':'word', 'DataReferences':'word', 'DataTerms':'word', 'DataAcquisitionLatitude':'word', 'DataAcquisitionLongitude':'word', 'DataElevation':'word', 'DataComponents':'word', 'DataSensorOrientation':'word'}
def isFormat(filename, format_type):
if (format_type == "IAGA"):
if (isIAGA(filename)):
return True
elif (format_type == "WDC"):
if (isWDC(filename)):
return True
elif (format_type == "DIDD"):
if (isDIDD(filename)):
return True
elif (format_type == "OPT"):
if (isOPT(filename)):
return True
elif (format_type == "PMAG1"): # Data from the ELSEC820 System
if (isPMAG1(filename)):
return True
elif (format_type == "PMAG2"): # Data from the ELSEC820 System via Cobenzl RCS
if (isPMAG2(filename)):
return True
elif (format_type == "GDASA1"): # Data from the Conrad Observatory GDAS System
if (isGDASA1(filename)):
return True
elif (format_type == "GDASB1"): # Data from the Conrad Observatory GDAS System
if (isGDASB1(filename)):
return True
elif (format_type == "DTU1"): # ASCII Data from the DTU's FGE systems
if (isDTU1(filename)):
return True
elif (format_type == "PYSTR"):
if (isPYSTR(filename)):
return True
elif (format_type == "PYASCII"):
if (isPYASCII(filename)):
return True
elif (format_type == "PYBIN"):
if (isPYBIN(filename)):
return True
elif (format_type == "COVJSON"):
if (isCOVJSON(filename)):
return True
elif (format_type == "JSON"):
if (isJSON(filename)):
return True
elif (format_type == "RMRCS"): # Data from the Conrad Observatory RCS System
if (isRMRCS(filename)):
return True
elif (format_type == "RCS"): # Direct data from the Conrad Observatory RCS System
if (isRCS(filename)):
return True
elif (format_type == "METEO"): # Conrad Observatory RCS System - METEO files
if (isMETEO(filename)):
return True
elif (format_type == "LNM"): # Conrad Observatory LaserNiederschlagsMonitor - LNM Telegram 5 files
if (isLNM(filename)):
return True
elif (format_type == "GRAVSG"): # Data from the Conrad Observatory SG gravity system
if (isGRAVSG(filename)):
return True
elif (format_type == "IWT"): # Data from the Conrad Observatory tiltmeter system
if (isIWT(filename)):
return True
elif (format_type == "LIPPGRAV"): # Data from the Lippmann tiltmeter system
if (isLIPPGRAV(filename)):
return True
elif (format_type == "CR800"): # Data from the CR800 datalogger
if (isCR800(filename)):
return True
elif (format_type == "IONO"): # Data from the IM806 Ionometer
if (isIONO(filename)):
return True
elif (format_type == "RADON"): # Data from the CR800 datalogger
if (isRADON(filename)):
return True
elif (format_type == "CS"):
if (isCS(filename)):
return True
elif (format_type == "GSM19"): # Data from the GEM GSM 19 Overhauzer sensor
if (isGSM19(filename)):
return True
elif (format_type == "LEMIHF"): # High frequency Lemi data (10 Hz)
if (isLEMIHF(filename)):
return True
elif (format_type == "LEMIBIN"): # Binary Lemi data (10 Hz)
if (isLEMIBIN(filename)):
return True
elif (format_type == "LEMIBIN1"): # Binary Lemi data (10 Hz)
if (isLEMIBIN1(filename)):
return True
elif (format_type == "POS1"): # Binary POS1 data (0.2 Hz)
if (isPOS1(filename)):
return True
elif (format_type == "POS1TXT"): # Text POS1 data (0.2 Hz)
if (isPOS1TXT(filename)):
return True
elif (format_type == "PMB"): # POS PMB data
if (isPOSPMB(filename)):
return True
elif (format_type == "IAF"): # Intermagnet Archive Format
if (isIAF(filename)):
return True
elif (format_type == "IYFV"): # Intermagnet Yearly mean Format
if (isIYFV(filename)):
return True
elif (format_type == "DKA"): # Intermagnet K-value Format
if (isDKA(filename)):
return True
elif (format_type == "ACECDF"):
if (isACECDF(filename)):
return True
elif (format_type == "PYCDF"):
if (isPYCDF(filename)):
return True
elif (format_type == "IMAGCDF"): # Intermagnet CDF Format
if (isIMAGCDF(filename)):
return True
elif (format_type == "IMF"): # Intermagnet v1.22,v1.23 data (60 sec)
try:
if (isIMF(filename)):
return True
except:
pass
elif (format_type == "BLV"): # Intermagnet IBFV2.00
try:
if (isBLV(filename)):
return True
except:
pass
elif (format_type == "AUTODIF_FREAD"): # Text AUTODIF F for baseline (0.2 Hz, from POS1)
try:
if (isAUTODIF_FREAD(filename)):
return True
except:
pass
#elif (format_type == "ENV05"): # Binary Environmental data (1 Hz)
# if (isENV05(filename)):
# return True
elif (format_type == "SFDMI"): # San Fernando DMI(FGE) format
if (isSFDMI(filename)):
return True
elif (format_type == "SFGSM"): # San Fernando GSM format
if (isSFGSM(filename)):
return True
elif (format_type == "BDV1"): # Budkov format
if (isBDV1(filename)):
return True
elif (format_type == "GFZKP"): # GFZ Kp
if (isGFZKP(filename)):
return True
elif (format_type == "NOAAACE"): # NOAA ACE Satellite data
if (isNOAAACE(filename)):
return True
elif (format_type == "NETCDF"): # NetCDF format, NOAA DSCOVR satellite data
if (isNETCDF(filename)):
return True
elif (format_type == "NEIC"): # NEIC USGS data
if (isNEIC(filename)):
return True
elif (format_type == "PHA"): # Potentially Hazardous Objects (This research has made use of data and/or services provided by the International Astronomical Union's Minor Planet Center.)
if (isPHA(filename)):
return True
elif (format_type == "USBLOG"): # Data from the USB temperature logger
if (isUSBLOG(filename)):
return True
elif (format_type == "QSPIN"): # Data from the USB temperature logger
if (isQSPIN(filename)):
return True
elif (format_type == "GFZTMP"): # Data from the USB temperature logger
if (isGFZTMP(filename)):
return True
elif (format_type == "CSV"): # Basic CSV data
if (isCSV(filename)):
return True
elif (format_type in ["PYNC", "AUTODIF", "SERMUL", "SERSIN", "LATEX"]): # Not yet supported
return False
elif (format_type == "UNKOWN"): # Unkown
return False
else:
logger.warning("isFormat: Could not identify data format for file {}. Is {} a valid type?".format(filename, format_type))
return False
def readFormat(filename, format_type, headonly=False, **kwargs):
empty = DataStream()
if (format_type == "IAGA"):
return readIAGA(filename, headonly, **kwargs)
elif (format_type == "WDC"):
return readWDC(filename, headonly, **kwargs)
elif (format_type == "IMF"):
return readIMF(filename, headonly, **kwargs)
elif (format_type == "IAF"):
return readIAF(filename, headonly, **kwargs)
elif (format_type == "BLV"): # Intermagnet IBFV2.00
return readBLV(filename, headonly, **kwargs)
elif (format_type == "IYFV"): # Intermagnet IYVF1.01
return readIYFV(filename, headonly, **kwargs)
elif (format_type == "DKA"): # Intermagnet DKA
return readDKA(filename, headonly, **kwargs)
elif (format_type == "DIDD"):
return readDIDD(filename, headonly, **kwargs)
elif (format_type == "GDASA1"):
return readGDASA1(filename, headonly, **kwargs)
elif (format_type == "GDASB1"):
return readGDASB1(filename, headonly, **kwargs)
elif (format_type == "RMRCS"):
return readRMRCS(filename, headonly, **kwargs)
elif (format_type == "RCS"):
return readRCS(filename, headonly, **kwargs)
elif (format_type == "METEO"):
return readMETEO(filename, headonly, **kwargs)
elif (format_type == "LNM"):
return readLNM(filename, headonly, **kwargs)
elif (format_type == "PYSTR"):
return readPYSTR(filename, headonly, **kwargs)
elif (format_type == "PYASCII"):
return readPYASCII(filename, headonly, **kwargs)
elif (format_type == "IMAGCDF"):
return readIMAGCDF(filename, headonly, **kwargs)
elif (format_type == "ACECDF"): # cdf ACE
return readACECDF(filename, headonly, **kwargs)
elif (format_type == "PYCDF"):
return readPYCDF(filename, headonly, **kwargs)
elif (format_type == "PYBIN"):
return readPYBIN(filename, headonly, **kwargs)
elif (format_type == "JSON"):
return readJSON(filename, headonly, **kwargs)
elif (format_type == "GSM19"):
return readGSM19(filename, headonly, **kwargs)
elif (format_type == "LEMIHF"):
return readLEMIHF(filename, headonly, **kwargs)
elif (format_type == "LEMIBIN"):
return readLEMIBIN(filename, headonly, **kwargs)
elif (format_type == "LEMIBIN1"):
return readLEMIBIN1(filename, headonly, **kwargs)
elif (format_type == "POS1"):
return readPOS1(filename, headonly, **kwargs)
elif (format_type == "POS1TXT"):
return readPOS1TXT(filename, headonly, **kwargs)
elif (format_type == "PMB"):
return readPOSPMB(filename, headonly, **kwargs)
elif (format_type == "QSPIN"):
return readQSPIN(filename, headonly, **kwargs)
elif (format_type == "AUTODIF_FREAD"):
return readAUTODIF_FREAD(filename, headonly, **kwargs)
elif (format_type == "COVJSON"):
return readCOVJSON(filename, headonly, **kwargs)
#elif (format_type == "ENV05"):
# return readENV05(filename, headonly, **kwargs)
elif (format_type == "USBLOG"):
return readUSBLOG(filename, headonly, **kwargs)
elif (format_type == "GRAVSG"):
return readGRAVSG(filename, headonly, **kwargs)
elif (format_type == "IWT"):
return readIWT(filename, headonly, **kwargs)
elif (format_type == "LIPPGRAV"):
return readLIPPGRAV(filename, headonly, **kwargs)
elif (format_type == "CR800"):
return readCR800(filename, headonly, **kwargs)
elif (format_type == "IONO"):
return readIONO(filename, headonly, **kwargs)
elif (format_type == "RADON"):
return readRADON(filename, headonly, **kwargs)
elif (format_type == "CS"):
return readCS(filename, headonly, **kwargs)
# Observatory specific
elif (format_type == "OPT"):
return readOPT(filename, headonly, **kwargs)
elif (format_type == "PMAG1"):
return readPMAG1(filename, headonly, **kwargs)
elif (format_type == "PMAG2"):
return readPMAG2(filename, headonly, **kwargs)
elif (format_type == "DTU1"):
return readDTU1(filename, headonly, **kwargs)
elif (format_type == "SFDMI"):
return readSFDMI(filename, headonly, **kwargs)
elif (format_type == "SFGSM"):
return readSFGSM(filename, headonly, **kwargs)
elif (format_type == "BDV1"):
return readBDV1(filename, headonly, **kwargs)
elif (format_type == "GFZKP"):
return readGFZKP(filename, headonly, **kwargs)
elif (format_type == "GFZTMP"):
return readGFZTMP(filename, headonly, **kwargs)
elif (format_type == "NOAAACE"):
return readNOAAACE(filename, headonly, **kwargs)
elif (format_type == "NETCDF"):
return readNETCDF(filename, headonly, **kwargs)
elif (format_type == "NEIC"):
return readNEIC(filename, headonly, **kwargs)
elif (format_type == "PHA"):
return readPHA(filename, headonly, **kwargs)
elif (format_type == "CSV"):
return readCSV(filename, headonly, **kwargs)
else:
logger.info("No valid format found ({}). Returning empty stream.".format(format_type))
return DataStream(empty,empty.header)
def writeFormat(datastream, filename, format_type, **kwargs):
"""
calls the format specific write functions
if the selceted dir is not existing, it is created
"""
directory = os.path.dirname(filename)
if not os.path.exists(directory):
if not filename.startswith('StringIO'):
os.makedirs(os.path.normpath(directory))
if (format_type == "IAGA"):
return writeIAGA(datastream, filename, **kwargs)
elif (format_type == "WDC"):
return writeWDC(datastream, filename, **kwargs)
elif (format_type == "IMF"):
return writeIMF(datastream, filename, **kwargs)
elif (format_type == "IAF"):
return writeIAF(datastream, filename, **kwargs)
elif (format_type == "IMAGCDF"):
return writeIMAGCDF(datastream, filename, **kwargs)
elif (format_type == "BLV"):
return writeBLV(datastream, filename, **kwargs)
elif (format_type == "IYFV"):
return writeIYFV(datastream, filename, **kwargs)
elif (format_type == "DKA"):
return writeDKA(datastream, filename, **kwargs)
elif (format_type == "DIDD"):
return writeDIDD(datastream, filename, **kwargs)
elif (format_type == "PYSTR"):
return writePYSTR(datastream, filename, **kwargs)
elif (format_type == "PYASCII"):
return writePYASCII(datastream, filename, **kwargs)
elif (format_type == "PYCDF"):
return writePYCDF(datastream, filename, **kwargs)
elif (format_type == "COVJSON"):
return writeCOVJSON(datastream, filename, **kwargs)
elif (format_type == "AUTODIF_FREAD"):
return writeAUTODIF_FREAD(datastream, filename, **kwargs)
elif (format_type == "CR800"):
return writeCR800(datastream, filename, **kwargs)
elif (format_type == "CSV"):
return writeCSV(datastream, filename, **kwargs)
elif (format_type == "LATEX"):
return writeLATEX(datastream, filename, **kwargs)
else:
logging.warning("writeFormat: Writing not succesful - format not recognized")
|
geomagpy/magpy
|
magpy/lib/magpy_formats.py
|
Python
|
bsd-3-clause
| 19,144
|
[
"NetCDF"
] |
1257ccaf3d7900208a6e9aab21cbaa5ad5b08bbe0dbf64b2765bf247269691a7
|
from .utils import PyKEArgumentHelpFormatter
import numpy as np
from astropy.io import fits as pyfits
from matplotlib import pyplot as plt
from tqdm import tqdm
from . import kepio, kepmsg, kepkey, kepplot, kepstat, kepfunc
__all__ = ['keppixseries']
def keppixseries(infile, outfile=None, plotfile=None, plottype='global',
filterlc=False, function='boxcar', cutoff=1.0, overwrite=False,
verbose=False, logfile='keppixseries.log'):
"""
keppixseries -- individual time series photometry for all pixels within a
target mask
keppixseries plots a light curve for each individual pixel in a target
mask. Light curves are extracted from a target pixel file obtained from the
Kepler data archive at MAST. If required, the data can be fed through a
boxcar, gaussian or sinc function high bandpass filter in order to remove
low frequency signal from the data. keppixseries is a diagnostic tool for
identifying source contaminants in the background or foreground of the
target. It can be employed to identify pixels for inclusion or exclusion
when re-extracting a Kepler light curve from target pixel files.
Parameters
----------
infile : str
The name of a MAST standard format FITS file containing Kepler Target
Pixel data within the first data extension.
outfile : str
The name of the output FITS file. This file has two data extensions.
The first called 'PIXELSERIES' contains a table with columns of
barycenter-corrected time, barycenter time correction, cadence number,
cadence quality flag and a series of photometric light curves, one for
each pixel within the target mask. Each pixel is labeled COLx_ROWy,
where :math:`x` is the pixel column number and :math:`y` is the pixel
row number on the CCD module/output. The second extension contains the
mask definition map copied directly from the input target pixel file.
plotfile : str
Name of an optional diagnostic output plot file containing the results
of keppixseries. An example is provided in Figure 1. Typically this is
a PNG format file. If no diagnostic file is required, plotfile can be
'None'. The plot will be generated regardless of the value of this
field, but the plot will not be saved to a file if ``plotfile='None'``.
plottype : str
keppixseries can plot light curves of three types.
The choice is made using this argument. The options are:
* local - All individual pixel light curves are scaled separately to
provide the most dynamic range for each pixel.
* global - All pixel light curves are scaled between zero and the
maximum flux attained by the brightest pixel in the mask. This option
provides the relative contribution to the archived light curve by each
pixel.
* full - All pixels light curves are scaled between zero and the
maximum flux attained by that pixel. This provides the fraction of
variability within each individual pixel.
filterlc : bool
If True, the light curve for each pixel will be treated by a high
band-pass filter to remove long-term trends from e.g. differential
velocity aberration.
function : str
The functional form of the high pass-band filter:
* boxcar
* gauss
* sinc
cutoff : float
The frequency of the high pass-band cutoff in units of :math:`days^{-1}`.
overwrite : bool
Overwrite the output file?
verbose : bool
Print informative messages and warnings to the shell and logfile?
logfile = str
Name of the logfile containing error and warning messages.
Examples
--------
.. code-block :: bash
$ keppixseries kplr008256049-2010174085026_lpd-targ.fits.gz
.. image:: ../_static/images/api/keppixseries.png
:align: center
"""
if outfile is None:
outfile = infile.split('.')[0] + "-{}.fits".format(__all__[0])
# log the call
hashline = '--------------------------------------------------------------'
kepmsg.log(logfile, hashline, verbose)
call = ('KEPPIXSERIES -- '
+ ' infile={}'.format(infile)
+ ' outfile={}'.format(outfile)
+ ' plotfile={}'.format(plotfile)
+ ' plottype={}'.format(plottype)
+ ' filterlc={}'.format(filterlc)
+ ' function={}'.format(function)
+ ' cutoff={}'.format(cutoff)
+ ' overwrite={}'.format(overwrite)
+ ' verbose={}'.format(verbose)
+ ' logfile={}'.format(logfile))
kepmsg.log(logfile, call+'\n', verbose)
# start time
kepmsg.clock('KEPPIXSERIES started at', logfile, verbose)
# overwrite output file
if overwrite:
kepio.overwrite(outfile, logfile, verbose)
if kepio.fileexists(outfile):
errmsg = ('ERROR -- KEPPIXSERIES: {} exists. Use --overwrite'
.format(outfile))
kepmsg.err(logfile, errmsg, verbose)
# open TPF FITS file
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, barytime = \
kepio.readTPF(infile, 'TIME', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, tcorr = \
kepio.readTPF(infile, 'TIMECORR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, cadno = \
kepio.readTPF(infile, 'CADENCENO', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, fluxpixels = \
kepio.readTPF(infile, 'FLUX', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, errpixels = \
kepio.readTPF(infile, 'FLUX_ERR', logfile, verbose)
kepid, channel, skygroup, module, output, quarter, season, \
ra, dec, column, row, kepmag, xdim, ydim, qual = \
kepio.readTPF(infile, 'QUALITY', logfile, verbose)
# read mask defintion data from TPF file
maskimg, pixcoord1, pixcoord2 = kepio.readMaskDefinition(infile, logfile,
verbose)
# print target data
print('')
print(' KepID: {}'.format(kepid))
print(' RA (J2000): {}'.format(ra))
print('Dec (J2000): {}'.format(dec))
print(' KepMag: {}'.format(kepmag))
print(' SkyGroup: {}'.format(skygroup))
print(' Season: {}'.format(season))
print(' Channel: {}'.format(channel))
print(' Module: {}'.format(module))
print(' Output: {}'.format(output))
print('')
# how many quality = 0 rows?
npts = 0
nrows = len(fluxpixels)
for i in range(nrows):
if (qual[i] == 0 and np.isfinite(barytime[i])
and np.isfinite(fluxpixels[i, ydim * xdim // 2])):
npts += 1
time = np.empty((npts))
timecorr = np.empty((npts))
cadenceno = np.empty((npts))
quality = np.empty((npts))
pixseries = np.empty((ydim, xdim, npts))
errseries = np.empty((ydim, xdim, npts))
# construct output light curves
nptsx = 0
for i in tqdm(range(ydim)):
for j in range(xdim):
npts = 0
for k in range(nrows):
if (qual[k] == 0 and np.isfinite(barytime[k])
and np.isfinite(fluxpixels[k, int(ydim*xdim/2)])):
time[npts] = barytime[k]
timecorr[npts] = tcorr[k]
cadenceno[npts] = cadno[k]
quality[npts] = qual[k]
pixseries[i, j, npts] = fluxpixels[k, nptsx]
errseries[i, j, npts] = errpixels[k, nptsx]
npts += 1
nptsx += 1
# define data sampling
if filterlc:
tpf = pyfits.open(infile)
cadence = kepkey.cadence(tpf[1], infile, logfile, verbose)
tr = 1.0 / (cadence / 86400)
timescale = 1.0 / (cutoff / tr)
# define convolution function
if function == 'boxcar':
filtfunc = np.ones(int(np.ceil(timescale)))
elif function == 'gauss':
timescale /= 2
dx = np.ceil(timescale * 10 + 1)
filtfunc = filtfunc([1.0, dx / 2 - 1.0, timescale],
np.linspace(0, dx - 1, dx))
elif function == 'sinc':
dx = np.ceil(timescale * 12 + 1)
fx = np.linspace(0, dx - 1, dx)
fx = fx - dx / 2 + 0.5
fx /= timescale
filtfunc = np.sinc(fx)
filtfunc /= np.sum(filtfunc)
# pad time series at both ends with noise model
for i in range(ydim):
for j in range(xdim):
ave, sigma = (np.mean(pixseries[i, j, :len(filtfunc)]),
np.std(pixseries[i, j, :len(filtfunc)]))
padded = np.append(kepstat.randarray(np.ones(len(filtfunc)) * ave,
np.ones(len(filtfunc)) * sigma), pixseries[i, j, :])
ave, sigma = (np.mean(pixseries[i, j, -len(filtfunc):]),
np.std(pixseries[i, j, -len(filtfunc):]))
padded = np.append(padded,
kepstat.randarray(np.ones(len(filtfunc)) * ave,
np.ones(len(filtfunc)) * sigma))
# convolve data
convolved = np.convolve(padded, filtfunc, 'same')
# remove padding from the output array
outdata = convolved[len(filtfunc): -len(filtfunc)]
# subtract low frequencies
outmedian = np.median(outdata)
pixseries[i, j, :] = pixseries[i, j, :] - outdata + outmedian
# construct output file
print("Writing output file {}...".format(outfile))
if ydim * xdim < 1000:
instruct = pyfits.open(infile, 'readonly')
kepkey.history(call, instruct[0], outfile, logfile, verbose)
hdulist = pyfits.HDUList(instruct[0])
cols = []
cols.append(pyfits.Column(name='TIME', format='D',
unit='BJD - 2454833', disp='D12.7',
array=time))
cols.append(pyfits.Column(name='TIMECORR', format='E', unit='d',
disp='E13.6', array=timecorr))
cols.append(pyfits.Column(name='CADENCENO', format='J', disp='I10',
array=cadenceno))
cols.append(pyfits.Column(name='QUALITY', format='J', array=quality))
for i in range(ydim):
for j in range(xdim):
colname = 'COL{}_ROW{}'.format(i + column, j + row)
cols.append(pyfits.Column(name=colname, format='E',
disp='E13.6',
array=pixseries[i, j, :]))
hdu1 = pyfits.BinTableHDU.from_columns(pyfits.ColDefs(cols))
try:
hdu1.header['INHERIT'] = (True, 'inherit the primary header')
except:
pass
try:
hdu1.header['EXTNAME'] = ('PIXELSERIES', 'name of extension')
except:
pass
try:
hdu1.header['EXTVER' ] = (instruct[1].header['EXTVER'],
'extension version number (not format version)')
except:
pass
try:
hdu1.header['TELESCOP'] = (instruct[1].header['TELESCOP'],
'telescope')
except:
pass
try:
hdu1.header['INSTRUME'] = (instruct[1].header['INSTRUME'],
'detector type')
except:
pass
try:
hdu1.header['OBJECT' ] = (instruct[1].header['OBJECT'],
'string version of KEPLERID')
except:
pass
try:
hdu1.header['KEPLERID'] = (instruct[1].header['KEPLERID'],
'unique Kepler target identifier')
except:
pass
try:
hdu1.header['RADESYS'] = (instruct[1].header['RADESYS'],
'reference frame of celestial coordinates')
except:
pass
try:
hdu1.header['RA_OBJ' ] = (instruct[1].header['RA_OBJ'],
'[deg] right ascension from KIC')
except:
pass
try:
hdu1.header['DEC_OBJ'] = (instruct[1].header['DEC_OBJ'],
'[deg] declination from KIC')
except:
pass
try:
hdu1.header['EQUINOX'] = (instruct[1].header['EQUINOX'],
'equinox of celestial coordinate system')
except:
pass
try:
hdu1.header['TIMEREF'] = (instruct[1].header['TIMEREF'],
'barycentric correction applied to times')
except:
pass
try:
hdu1.header['TASSIGN'] = (instruct[1].header['TASSIGN'],
'where time is assigned')
except:
pass
try:
hdu1.header['TIMESYS'] = (instruct[1].header['TIMESYS'],
'time system is barycentric JD')
except:
pass
try:
hdu1.header['BJDREFI'] = (instruct[1].header['BJDREFI'],
'integer part of BJD reference date')
except:
pass
try:
hdu1.header['BJDREFF'] = (instruct[1].header['BJDREFF'],
'fraction of the day in BJD reference date')
except:
pass
try:
hdu1.header['TIMEUNIT'] = (instruct[1].header['TIMEUNIT'],
'time unit for TIME, TSTART and TSTOP')
except:
pass
try:
hdu1.header['TSTART'] = (instruct[1].header['TSTART'],
'observation start time in BJD-BJDREF')
except:
pass
try:
hdu1.header['TSTOP'] = (instruct[1].header['TSTOP'],
'observation stop time in BJD-BJDREF')
except:
pass
try:
hdu1.header['LC_START'] = (instruct[1].header['LC_START'],
'mid point of first cadence in MJD')
except:
pass
try:
hdu1.header['LC_END'] = (instruct[1].header['LC_END'],
'mid point of last cadence in MJD')
except:
pass
try:
hdu1.header['TELAPSE'] = (instruct[1].header['TELAPSE'],
'[d] TSTOP - TSTART')
except:
pass
try:
hdu1.header['LIVETIME'] = (instruct[1].header['LIVETIME'],
'[d] TELAPSE multiplied by DEADC')
except:
pass
try:
hdu1.header['EXPOSURE'] = (instruct[1].header['EXPOSURE'],
'[d] time on source')
except:
pass
try:
hdu1.header['DEADC'] = (instruct[1].header['DEADC'],
'deadtime correction')
except:
pass
try:
hdu1.header['TIMEPIXR'] = (instruct[1].header['TIMEPIXR'],
'bin time beginning=0 middle=0.5 end=1')
except:
pass
try:
hdu1.header['TIERRELA'] = (instruct[1].header['TIERRELA'],
'[d] relative time error')
except:
pass
try:
hdu1.header['TIERABSO'] = (instruct[1].header['TIERABSO'],
'[d] absolute time error')
except:
pass
try:
hdu1.header['INT_TIME'] = (instruct[1].header['INT_TIME'],
'[s] photon accumulation time per frame')
except:
pass
try:
hdu1.header['READTIME'] = (instruct[1].header['READTIME'],
'[s] readout time per frame')
except:
pass
try:
hdu1.header['FRAMETIM'] = (instruct[1].header['FRAMETIM'],
'[s] frame time (INT_TIME + READTIME)')
except:
pass
try:
hdu1.header['NUM_FRM'] = (instruct[1].header['NUM_FRM'],
'number of frames per time stamp')
except:
pass
try:
hdu1.header['TIMEDEL'] = (instruct[1].header['TIMEDEL'],
'[d] time resolution of data')
except:
pass
try:
hdu1.header['DATE-OBS'] = (instruct[1].header['DATE-OBS'],
'TSTART as UTC calendar date')
except:
pass
try:
hdu1.header['DATE-END'] = (instruct[1].header['DATE-END'],
'TSTOP as UTC calendar date')
except:
pass
try:
hdu1.header['BACKAPP'] = (instruct[1].header['BACKAPP'],
'background is subtracted')
except:
pass
try:
hdu1.header['DEADAPP'] = (instruct[1].header['DEADAPP'],
'deadtime applied')
except:
pass
try:
hdu1.header['VIGNAPP'] = (instruct[1].header['VIGNAPP'],
'vignetting or collimator correction applied')
except:
pass
try:
hdu1.header['GAIN'] = (instruct[1].header['GAIN'],
'[electrons/count] channel gain')
except:
pass
try:
hdu1.header['READNOIS'] = (instruct[1].header['READNOIS'],
'[electrons] read noise')
except:
pass
try:
hdu1.header['NREADOUT'] = (instruct[1].header['NREADOUT'],
'number of read per cadence')
except:
pass
try:
hdu1.header['TIMSLICE'] = (instruct[1].header['TIMSLICE'],
'time-slice readout sequence section')
except:
pass
try:
hdu1.header['MEANBLCK'] = (instruct[1].header['MEANBLCK'],
'[count] FSW mean black level')
except:
pass
hdulist.append(hdu1)
hdulist.writeto(outfile)
kepkey.new('EXTNAME', 'APERTURE', 'name of extension', instruct[2],
outfile, logfile, verbose)
pyfits.append(outfile, instruct[2].data, instruct[2].header)
instruct.close()
else:
warnmsg = ('WARNING -- KEPPIXSERIES: output FITS file requires > 999'
'columns. Non-compliant with FITS convention.')
kepmsg.warn(logfile, warnmsg, verbose)
# plot pixel array
fmin = 1.0e33
fmax = -1.033
plt.figure()
plt.clf()
dx = 0.93 / xdim
dy = 0.94 / ydim
ax = plt.axes([0.06, 0.05, 0.93, 0.94])
plt.gca().xaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.gca().yaxis.set_major_formatter(plt.ScalarFormatter(useOffset=False))
plt.gca().xaxis.set_major_locator(plt.MaxNLocator(integer=True))
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(integer=True))
plt.xlim(np.min(pixcoord1) - 0.5, np.max(pixcoord1) + 0.5)
plt.ylim(np.min(pixcoord2) - 0.5, np.max(pixcoord2) + 0.5)
plt.xlabel('time', {'color' : 'k'})
plt.ylabel('arbitrary flux', {'color' : 'k'})
for i in range(ydim):
for j in range(xdim):
tmin = np.amin(time)
tmax = np.amax(time)
try:
np.isfinite(np.amin(pixseries[i, j, :]))
np.isfinite(np.amin(pixseries[i, j, :]))
fmin = np.amin(pixseries[i, j, :])
fmax = np.amax(pixseries[i, j, :])
except:
ugh = 1
xmin = tmin - (tmax - tmin) / 40
xmax = tmax + (tmax - tmin) / 40
ymin = fmin - (fmax - fmin) / 20
ymax = fmax + (fmax - fmin) / 20
if kepstat.bitInBitmap(maskimg[i, j], 2):
plt.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy],
facecolor='lightslategray')
elif maskimg[i, j] == 0:
plt.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy],
facecolor='black')
else:
plt.axes([0.06 + float(j) * dx, 0.05 + i * dy, dx, dy])
if j == int(xdim / 2) and i == 0:
plt.setp(plt.gca(), xticklabels=[], yticklabels=[])
elif j == 0 and i == int(ydim / 2):
plt.setp(plt.gca(), xticklabels=[], yticklabels=[])
else:
plt.setp(plt.gca(), xticklabels=[], yticklabels=[])
ptime = time * 1.0
ptime = np.insert(ptime, [0], ptime[0])
ptime = np.append(ptime, ptime[-1])
pflux = pixseries[i, j, :] * 1.0
pflux = np.insert(pflux, [0], -1000.0)
pflux = np.append(pflux, -1000.0)
plt.plot(time,pixseries[i, j, :], color='#0000ff', linestyle='-',
linewidth=0.5)
if not kepstat.bitInBitmap(maskimg[i, j], 2):
plt.fill(ptime, pflux, fc='lightslategray', linewidth=0.0,
alpha=1.0)
plt.fill(ptime, pflux, fc='#FFF380', linewidth=0.0,alpha=1.0)
if 'loc' in plottype:
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
if 'glob' in plottype:
plt.xlim(xmin, xmax)
plt.ylim(1.0e-10, np.nanmax(pixseries) * 1.05)
if 'full' in plottype:
plt.xlim(xmin, xmax)
plt.ylim(1.0e-10, ymax * 1.05)
# render plot
plt.show()
plt.savefig(plotfile)
# stop time
kepmsg.clock('KEPPIXSERIES ended at', logfile, verbose)
def keppixseries_main():
import argparse
parser = argparse.ArgumentParser(
description=('Individual time series photometry for all pixels'
' within a target mask'),
formatter_class=PyKEArgumentHelpFormatter)
parser.add_argument('infile', help='Name of input file', type=str)
parser.add_argument('--outfile',
help=('Name of FITS file to output.'
' If None, outfile is infile-keppixseries.'),
default=None)
parser.add_argument('--plotfile', default='None',
help='name of output PNG plot file', type=str)
parser.add_argument('--plottype', default='global', help='Plotting type',
type=str, choices=['local','global','full'])
parser.add_argument('--filterlc', action='store_true',
help='High-pass Filter data?')
parser.add_argument('--function', default='boxcar', help='Type of filter',
type=str, choices=['boxcar','gauss','sinc'])
parser.add_argument('--cutoff', default=1.0,
help='Characteristic frequency cutoff of filter [1/days]',
type=float)
parser.add_argument('--overwrite', action='store_true',
help='Overwrite output file?')
parser.add_argument('--verbose', action='store_true',
help='Write to a log file?')
parser.add_argument('--logfile', '-l', help='Name of ascii log file',
default='keppixseries.log', dest='logfile', type=str)
args = parser.parse_args()
keppixseries(args.infile, args.outfile, args.plotfile, args.plottype,
args.filterlc, args.function, args.cutoff, args.overwrite,
args.verbose, args.logfile)
|
gully/PyKE
|
pyke/keppixseries.py
|
Python
|
mit
| 24,628
|
[
"Gaussian"
] |
947c31d8d9d2dc06acd08933e10014234518b8ae1ba623fd8c2eb31af030f5b1
|
import collect_array as ca
import collect_id as ci
import collect_loop as cl
import collect_device as cd
class GenReverseIdx(object):
def __init__(self):
self.ReverseIdx = dict()
self.ReverseIdx[0] = 1
self.ReverseIdx[1] = 0
def get_reverse_idx(ast):
gen_reverse_idx = GenReverseIdx()
return gen_reverse_idx.ReverseIdx
class GenHostArrayData(object):
def __init__(self):
super(GenHostArrayData, self).__init__()
self.HstId = dict()
self.TransposableHstId = list()
self.Mem = dict()
def collect(self, ast):
arrays_ids = ca.GlobalArrayIds()
arrays_ids.visit(ast)
for n in arrays_ids.ids:
self.HstId[n] = 'hst_ptr' + n
self.Mem[n] = 'hst_ptr' + n + '_mem_size'
transposable_array_ids = ca.get_transposable_array_ids(ast)
for n in transposable_array_ids:
self.HstId[n] = n
self.TransposableHstId.append(n)
def get_mem_names(ast):
host_array_data = GenHostArrayData()
host_array_data.collect(ast)
return host_array_data.Mem
def get_host_ids(ast):
host_array_data = GenHostArrayData()
host_array_data.collect(ast)
return host_array_data.HstId
def gen_transposable_host_ids(ast):
host_array_data = GenHostArrayData()
host_array_data.collect(ast)
return host_array_data.TransposableHstId
def get_kernel_args(ast):
gen_kernel_args = GenKernelArgs()
gen_kernel_args.collect(ast)
return gen_kernel_args.kernel_args
class GenArrayDimNames(object):
def __init__(self):
self.num_array_dims = dict()
self.ArrayIdToDimName = dict()
def collect(self, ast):
num_array_dim = ca.NumArrayDim(ast)
num_array_dim.visit(ast)
self.num_array_dims = num_array_dim.numSubscripts
for array_name, num_dims in num_array_dim.numSubscripts.items():
tmp = list()
for i in xrange(num_dims):
tmp.append('hst_ptr' + array_name + '_dim' + str(i + 1))
self.ArrayIdToDimName[array_name] = tmp
stencil_array_id_to_dim_name = ca.LocalMemArrayIdToDimName()
stencil_array_id_to_dim_name.visit(ast)
for key, value in stencil_array_id_to_dim_name.ArrayIdToDimName.iteritems():
self.ArrayIdToDimName[key] = value
def get_array_id_to_dim_name(ast):
gen_array_dim_names = GenArrayDimNames()
gen_array_dim_names.collect(ast)
return gen_array_dim_names.ArrayIdToDimName
class GenIdxToDim(object):
def __init__(self):
self.IdxToDim = dict()
def collect(self, ast, par_dim=2):
col_li = cl.LoopIndices(par_dim)
col_li.visit(ast)
grid_indices = col_li.grid_indices
for i, n in enumerate(reversed(grid_indices)):
self.IdxToDim[i] = n
class GenKernelArgs(object):
def __init__(self):
self.kernel_args = dict()
def collect(self, ast):
arrays_ids = ca.GlobalArrayIds()
arrays_ids.visit(ast)
array_ids = arrays_ids.ids
# print self.ArrayIds
nonarray_ids = ci.GlobalNonArrayIds()
nonarray_ids.visit(ast)
non_array_ids = nonarray_ids.ids
mytype_ids = ci.GlobalTypeIds()
mytype_ids.visit(ast)
types = mytype_ids.types
gen_removed_ids = GenRemovedIds()
gen_removed_ids.collect(ast)
removed_ids = gen_removed_ids.removed_ids
kernel_arg_defines = ci.get_kernel_arg_defines(ast)
arg_ids = non_array_ids.union(array_ids) - removed_ids - kernel_arg_defines
gen_array_dimnames = GenArrayDimNames()
gen_array_dimnames.collect(ast)
num_array_dims = gen_array_dimnames.num_array_dims
arrayid_to_dimname = gen_array_dimnames.ArrayIdToDimName
for n in arg_ids:
tmplist = {n}
try:
if num_array_dims[n] == 2:
tmplist.add(arrayid_to_dimname[n][0])
except KeyError:
pass
for m in tmplist - kernel_arg_defines:
self.kernel_args[m] = types[m]
class GenRemovedIds(object):
def __init__(self):
self.removed_ids = set()
def collect(self, ast):
grid_indices = cl.get_grid_indices(ast)
col_loop_limit = cl.LoopLimit()
col_loop_limit.visit(ast)
upper_limit = col_loop_limit.upper_limit
upper_limits = set(upper_limit[i] for i in grid_indices)
my_kernel = cd.get_kernel(ast)
ids_still_in_kernel = ci.Ids()
ids_still_in_kernel.visit(my_kernel)
self.removed_ids = upper_limits - ids_still_in_kernel.ids
def get_removed_ids(ast):
gen_removed_ids = GenRemovedIds()
gen_removed_ids.collect(ast)
return gen_removed_ids.removed_ids
class GenLocalArrayIdx(object):
def __init__(self):
self.IndexToLocalVar = dict()
def collect(self, ast):
par_dim = cl.get_par_dim(ast)
col_li = cl.LoopIndices(par_dim)
col_li.visit(ast)
grid_indices = col_li.grid_indices
for var in grid_indices:
self.IndexToLocalVar[var] = 'l' + var
def get_local_array_idx(ast):
gen_local_array_idx = GenLocalArrayIdx()
gen_local_array_idx.collect(ast)
return gen_local_array_idx.IndexToLocalVar
class GenIdxToThreadId(object):
def __init__(self):
self.IndexToThreadId = dict()
def collect(self, ast):
par_dim = cl.get_par_dim(ast)
col_li = cl.LoopIndices(par_dim)
col_li.visit(ast)
grid_indices = col_li.grid_indices
for i, n in enumerate(reversed(grid_indices)):
self.IndexToThreadId[n] = 'get_global_id(' + str(i) + ')'
def gen_idx_to_dim(ast):
par_dim = cl.get_par_dim(ast)
gi_to_dim = GenIdxToDim()
gi_to_dim.collect(ast, par_dim)
return gi_to_dim.IdxToDim
|
dikujepsen/OpenTran
|
v2.0/framework/Matmul/collect_gen.py
|
Python
|
mit
| 5,879
|
[
"VisIt"
] |
8eb651e4d7733e8f8867c47017883f1910b5a0343704162a0537a30700d3ed50
|
#!/usr/bin/env python
# (C) Copyright 2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import time
import cdsapi
c = cdsapi.Client(debug=True, wait_until_complete=False)
r = c.retrieve(
"reanalysis-era5-single-levels",
{
"variable": "2t",
"product_type": "reanalysis",
"date": "2015-12-01",
"time": "14:00",
"format": "netcdf",
},
)
sleep = 30
while True:
r.update()
reply = r.reply
r.info("Request ID: %s, state: %s" % (reply["request_id"], reply["state"]))
if reply["state"] == "completed":
break
elif reply["state"] in ("queued", "running"):
r.info("Request ID: %s, sleep: %s", reply["request_id"], sleep)
time.sleep(sleep)
elif reply["state"] in ("failed",):
r.error("Message: %s", reply["error"].get("message"))
r.error("Reason: %s", reply["error"].get("reason"))
for n in (
reply.get("error", {}).get("context", {}).get("traceback", "").split("\n")
):
if n.strip() == "":
break
r.error(" %s", n)
raise Exception(
"%s. %s." % (reply["error"].get("message"), reply["error"].get("reason"))
)
r.download("test.nc")
|
ecmwf/cdsapi
|
examples/example-era5-update.py
|
Python
|
apache-2.0
| 1,550
|
[
"NetCDF"
] |
db1fd69705d143bc484e66d06c15053a8e47f43c64652a54e858c63d167b9caa
|
r"""
This module is a ParaViewWeb server application.
The following command line illustrates how to use it::
$ pvpython .../pv_web_data_prober.py --data-dir /.../path-to-your-data-directory
--data-dir
Path used to list that directory on the server and let the client choose a
file to load. You may also specify multiple directories, each with a name
that should be displayed as the top-level name of the directory in the UI.
If this parameter takes the form: "name1=path1|name2=path2|...",
then we will treat this as the case where multiple data directories are
required. In this case, each top-level directory will be given the name
associated with the directory in the argument.
Any ParaViewWeb executable script comes with a set of standard arguments that can be overriden if need be::
--port 8080
Port number on which the HTTP server will listen.
--content /path-to-web-content/
Directory that you want to serve as static web content.
By default, this variable is empty which means that we rely on another
server to deliver the static content and the current process only
focuses on the WebSocket connectivity of clients.
--authKey vtkweb-secret
Secret key that should be provided by the client to allow it to make any
WebSocket communication. The client will assume if none is given that the
server expects "vtkweb-secret" as secret key.
"""
# Application to probe datasets.
import sys
import os
import os.path
# import paraview modules.
from paraview import simple, servermanager, vtk
from paraview.web import wamp as pv_wamp
from paraview.web import protocols as pv_protocols
from vtk.web import server
from vtkWebCorePython import *
# import annotations
from autobahn.wamp import register as exportRpc
from twisted.python import log
import logging
try:
import argparse
except ImportError:
# since Python 2.6 and earlier don't have argparse, we simply provide
# the source for the same as _argparse and we use it instead.
from vtk.util import _argparse as argparse
# =============================================================================
# Create custom Data Prober class to handle clients requests
# =============================================================================
class _DataProber(pv_wamp.PVServerProtocol):
"""
DataProber extends paraview.web.PVServerProtocol to add API for loading
datasets add probing them.
"""
DataPath = "."
PipelineObjects = []
Database = ""
Widget = None
View = None
authKey = "vtkweb-secret"
def initialize(self):
global directoryToList
self.registerVtkWebProtocol(pv_protocols.ParaViewWebMouseHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPort())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortImageDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortGeometryDelivery())
# Update authentication key to use
self.updateSecret(_DataProber.authKey)
@classmethod
def setupApplication(cls):
"""Setups the default application state."""
# read data directory.
root = { "name": "ROOT", "dirs" : [], "files" : []}
directory_map = {}
directory_map[_DataProber.DataPath] = root
for path, dirs, files in os.walk(_DataProber.DataPath):
element = directory_map[path]
for name in dirs:
item = { "name": name, "dirs" : [], "files" : []}
item["name"] = name
directory_map[os.path.join(path, name)] = item
element["dirs"].append(item)
element["files"] = []
for name in files:
relpath = os.path.relpath(os.path.join(path, name),
_DataProber.DataPath)
item = { "name" : name, "itemValue" : relpath}
element["files"].append(item)
cls.Database = root
cls.View = simple.CreateRenderView()
simple.Render()
# setup animation scene
scene = simple.GetAnimationScene()
simple.GetTimeTrack()
scene.PlayMode = "Snap To TimeSteps"
@classmethod
def endInteractionCallback(cls, self):
def callback(caller, event):
caller.GetProperty("Point1WorldPosition").Copy(
caller.GetProperty("Point1WorldPositionInfo"))
caller.GetProperty("Point2WorldPosition").Copy(
caller.GetProperty("Point2WorldPositionInfo"))
self.publish("vtk.event.probe.data.changed", True)
print 'publish callback'
return callback
def update3DWidget(self):
cls = self.__class__
if not cls.Widget:
widget = simple.servermanager.rendering.LineWidgetRepresentation()
widget.Point1WorldPosition = [-1, -1, -1]
widget.Point2WorldPosition = [1, 1, 1]
cls.View.Representations.append(widget)
widget.Enabled = 1
cls.Widget = widget
widget.SMProxy.AddObserver(vtk.vtkCommand.EndInteractionEvent,
cls.endInteractionCallback(self))
if cls.PipelineObjects:
# compute bounds for all pipeline objects.
total_bounds = [ vtk.VTK_DOUBLE_MAX, vtk.VTK_DOUBLE_MIN,
vtk.VTK_DOUBLE_MAX, vtk.VTK_DOUBLE_MIN,
vtk.VTK_DOUBLE_MAX, vtk.VTK_DOUBLE_MIN]
for item in cls.PipelineObjects:
reader = item["Reader"]
probe = item["Probe"]
bounds = reader.GetDataInformation().GetBounds()
if vtk.vtkMath.AreBoundsInitialized(bounds):
if total_bounds[0] > bounds[0]:
total_bounds[0] = bounds[0]
if total_bounds[1] < bounds[1]:
total_bounds[1] = bounds[1]
if total_bounds[2] > bounds[2]:
total_bounds[2] = bounds[2]
if total_bounds[3] < bounds[3]:
total_bounds[3] = bounds[3]
if total_bounds[4] > bounds[4]:
total_bounds[4] = bounds[4]
if total_bounds[5] < bounds[5]:
total_bounds[5] = bounds[5]
if total_bounds[0] <= total_bounds[1]:
cls.Widget.Point1WorldPosition = [bounds[0], bounds[2], bounds[4]]
cls.Widget.Point2WorldPosition = [bounds[1], bounds[3], bounds[5]]
minpos = cls.Widget.Point1WorldPosition
maxpos = cls.Widget.Point2WorldPosition
return (minpos[0], maxpos[0], minpos[1], maxpos[1], minpos[2], maxpos[2])
@classmethod
def toHTML(cls, element):
if element.has_key("itemValue"):
return '<li itemValue="%s">%s</li>' % (element["itemValue"],
element["name"])
if element["name"] != "ROOT":
text = "<li>"
text += element["name"]
text += "<ul>"
else:
text = ""
for item in element["dirs"]:
text += cls.toHTML(item)
for item in element["files"]:
text += cls.toHTML(item)
if element["name"] != "ROOT":
text += "</ul></li>"
return text
# RpcName: loadData => pv.data.prober.load.data
@exportRpc("pv.data.prober.load.data")
def loadData(self, datafile):
"""Load a data file. The argument is a path relative to the DataPath
pointing to the dataset to load.
Returns True if the dataset was loaded successfully, otherwise returns
False.
If the dataset is loaded, this methods setups the visualization
pipelines for interactive probing all loaded datasets.
"""
datafile = os.path.join(_DataProber.DataPath, datafile)
log.msg("Loading data-file", datafile, logLevel=logging.DEBUG)
reader = simple.OpenDataFile(datafile)
if not reader:
return False
rep = simple.Show(reader, Representation="Wireframe")
probe = simple.PlotOverLine(Source = "High Resolution Line Source")
item = {}
item["Reader"] = reader
item["ReaderRepresentation"] = rep
item["Probe"] = probe
item["name"] = os.path.split(datafile)[1]
_DataProber.PipelineObjects.append(item)
# RpcName: loadDatasets => pv.data.prober.load.dataset
@exportRpc("pv.data.prober.load.dataset")
def loadDatasets(self, datafiles):
# initially, we'll only support loading 1 dataset.
for item in _DataProber.PipelineObjects:
simple.Delete(item["Probe"])
simple.Delete(item["ReaderRepresentation"])
simple.Delete(item["Reader"])
_DataProber.PipelineObjects = []
for path in datafiles:
self.loadData(path)
bounds = self.update3DWidget()
self.resetCameraWithBounds(bounds)
simple.Render()
return True
# RpcName: getProbeData => pv.data.prober.probe.data
@exportRpc("pv.data.prober.probe.data")
def getProbeData(self):
"""Returns probe-data from all readers. The returned datastructure has
the following syntax.
[
{
"name" : "<name>",
"headers" : [ "foo", "time", "bar" ],
"data" : [ [3, "2009-11-04", 1], [...], ...]
},
{ ... }, ...
]
"""
retVal = []
for item in _DataProber.PipelineObjects:
name = item["name"]
probe = item["Probe"]
probe.Source.Point1 = _DataProber.Widget.Point1WorldPosition
probe.Source.Point2 = _DataProber.Widget.Point2WorldPosition
print "Probing ", probe.Source.Point1, probe.Source.Point2
simple.UpdatePipeline(time=_DataProber.View.ViewTime, proxy=probe)
# fetch probe result from root node.
do = simple.servermanager.Fetch(probe, 0)
data = vtkWebUtilities.WriteAttributesToJavaScript(
vtk.vtkDataObject.POINT, do)
headers = vtkWebUtilities.WriteAttributeHeadersToJavaScript(
vtk.vtkDataObject.POINT, do)
# process the strings returned by vtkPVWebUtilities to generate
# Python objects
nan = "_nan_"
data = eval(data)
headers = eval(headers)
retVal.append({ "name": name,
"headers": headers,
"data" : data })
return retVal
def resetCameraWithBounds(self, bounds):
if vtk.vtkMath.AreBoundsInitialized(bounds):
_DataProber.View.SMProxy.ResetCamera(bounds)
_DataProber.View.CenterOfRotation = [
(bounds[0] + bounds[1]) * 0.5,
(bounds[2] + bounds[3]) * 0.5,
(bounds[4] + bounds[5]) * 0.5]
# RpcName: getDatabase => pv.data.prober.database.json
@exportRpc("pv.data.prober.database.json")
def getDatabase(self):
return _DataProber.Database
# RpcName: getDatabaseAsHTML => pv.data.prober.database.html
@exportRpc("pv.data.prober.database.html")
def getDatabaseAsHTML(self):
return _DataProber.toHTML(_DataProber.Database)
# RpcName: goToNext => pv.data.prober.time.next
@exportRpc("pv.data.prober.time.next")
def goToNext(self):
oldTime = self.View.ViewTime
simple.GetAnimationScene().GoToNext()
if oldTime != self.View.ViewTime:
self.publish("vtk.event.probe.data.changed", True)
print 'publish a'
return True
return False
# RpcName: goToPrev => pv.data.prober.time.previous
@exportRpc("pv.data.prober.time.previous")
def goToPrev(self):
oldTime = self.View.ViewTime
simple.GetAnimationScene().GoToPrevious()
if oldTime != self.View.ViewTime:
self.publish("vtk.event.probe.data.changed", True)
print 'publish b'
return True
return False
# =============================================================================
# Main: Parse args and start server
# =============================================================================
if __name__ == "__main__":
# Create argument parser
parser = argparse.ArgumentParser(description="ParaView Web data.prober")
# Add default arguments
server.add_arguments(parser)
# Add local arguments
parser.add_argument("--data-dir", help="path to data directory", dest="path")
# Exctract arguments
args = parser.parse_args()
# Configure our current application
_DataProber.DataPath = args.path
_DataProber.setupApplication()
_DataProber.authKey = args.authKey
# Start server
server.start_webserver(options=args, protocol=_DataProber)
|
HopeFOAM/HopeFOAM
|
ThirdParty-0.1/ParaView-5.0.1/Web/Applications/DataProber/server/pv_web_data_prober.py
|
Python
|
gpl-3.0
| 13,167
|
[
"ParaView",
"VTK"
] |
8bdfb69a99c21164fa58fa20a38999f07e08be632d7637a6b3c978e7d9dc4110
|
from __future__ import division
from __future__ import print_function
#This module contains several functions which calculate
#observational quantities affected by cosmology
from builtins import range
from builtins import object
from past.utils import old_div
import bpz_tools
import useful
import numpy
import glob
import os
import sys
f_z_sed = bpz_tools.f_z_sed
f_z_sed_AB = bpz_tools.f_z_sed_AB
equal = numpy.equal
log10 = numpy.log10
ABtoVega = bpz_tools.ABtoVega
cho = 2.99e3 # c/H_0 in Mpc
ht = 9.7776e9 # hubble time in h^-1 yr
#Get the ABflux files in stock
ab_db = []
ab_dir = bpz_tools.ab_dir
print("## AB_DIR: %s" % ab_dir, file=sys.stderr)
ab_db = glob.glob(ab_dir + '*.AB')
for i in range(len(ab_db)):
ab_db[i] = os.path.basename(ab_db[i])
ab_db[i] = ab_db[i][:-3]
#K-corrections and the like
def kcor(z, sed, filter):
"""K-correction in a giver filter for the spectrum SED at redshift z
( m=M+5log(D_L/10pc)+K(z) )"""
fo = f_z_sed(sed, filter)
if type(z) == type(1.): z = numpy.array([z])
k = 2.5 * log10((1. + z) * fo / f_z_sed(sed, filter, z))
if len(k) == 1: return k[0]
else: return k
def reobs(sed,
m=0.,
z_0=0.,
oldfilter='I_LRIS',
z_new=0.,
newfilter='V_LRIS',
cosmology=(0.3, 0.7, .7),
madau='yes'):
"""Arguments: sed,m,z_0,oldfilter,z_new,newfilter,cosmology
Takes a galaxy with m at redshift z_0 in oldfilter,
SED=sed and produces its new magnitude in newfilter at z_new.
Takes into account cosmological dimming and intergalactic Madau absorption
The tuple cosmology=(omega,lambda,hubble_constant)
"""
if sed[-4:] == '.sed': sed = sed[:-4]
#single_z=type(z_new)==type(z_0)
single_z = z_new.__class__.__name__[0:3] == z_0.__class__.__name__[0:3]
if single_z:
if z_0 == z_new and oldfilter == newfilter: return m
z_new = numpy.array([z_new])
#Calculate fnew
model = '.'.join([sed, newfilter, 'AB'])
model_path = os.path.join(ab_dir, model)
#Check whether there are already AB files
if madau == 'yes':
if model[:-3] in ab_db:
zo, f_mod_0 = useful.get_data(model_path, (0, 1))
fnew = z_new * 0.
for i in range(len(z_new)):
fnew[i] = useful.match_resol(zo, f_mod_0, z_new[i])
else:
fnew = f_z_sed_AB(sed, newfilter, z_new, 'nu')
else:
fnew = f_z_sed(sed, newfilter, z_new, units='nu', madau=madau)
fnew = numpy.where(
equal(fnew, 0.), 99.,
fnew) # if the new flux is 0, returns 99. (code non-detection)
#Calculate f_old
model = '.'.join([sed, oldfilter, 'AB'])
model_path = os.path.join(ab_dir, model)
#Check whether there are already AB files
if madau == 'yes':
if model[:-3] in ab_db:
zo, f_mod_0 = useful.get_data(model_path, (0, 1))
f_old = useful.match_resol(zo, f_mod_0, z_0)
else:
f_old = f_z_sed_AB(sed, oldfilter, numpy.array([z_0]), units='nu')
else:
f_old = f_z_sed(sed, oldfilter, numpy.array([z_0]), units='nu')
k = 2.5 * log10((old_div((1. + z_new), fnew)) * (old_div(f_old,
(1. + z_0))))
if single_z and z_0 == z_new[0]:
m_obs = m + k
return m_obs[0]
#Distance modulus
dist = dist_mod(z_new, cosmology) - dist_mod(z_0, cosmology)
m_obs = m + dist + k
if single_z: return m_obs[0]
else: return m_obs
def color_z(sed,
filter_new,
filter_old,
z=numpy.arange(0., 1.5, 0.5),
calibration='AB',
file=None):
"""
Calculates the color filter_new-filter_old at the redshift vector z.
It can return the color in Vega or AB calibrations
Usage:
gr=color_z('El_cww','g_WFC','r_WFC',z=numpy.arange(0.,2.,0.1),'Vega')
It also works with scalars, e.g.
gr=color_z('El_cww','g_WFC','r_WFC',1.2)
"""
try:
n = len(z)
except:
z = numpy.array([z])
n = 1
color = z * 0.
for i in range(len(z)):
color[i] = reobs(sed, 0., z[i], filter_old, z[i], filter_new)
if calibration == 'Vega':
color += ABtoVega(0., filter_new) - ABtoVega(0., filter_old)
if file == None:
if n == 1: color = color[0]
return color
else:
put_data(file, (z, color),
header='z %s-%s(%s) ' %
(filter_new, filter_old, calibration))
def m_abs(m, z, sed, filter, cosmology=(0.3, .7, .7), filter2=None):
"""Arguments: m,z,sed,filter,cosmology,filter2
If filter2 is used, returns the absolute magnitude
in a different filter"""
#print z,sed,filter
mabs = m - dist_mod(z, cosmology) - kcor(z, sed, filter)
if filter2 != None:
#We add the color filter2-filter at z=0
mabs = mabs + reobs(sed, 0., 0., filter, 0., filter2)
return mabs
#Incluir algo que pase de magnitudes absolutas a luminosidades solares!!
#def luminosity(m,filter):
# Basicamente hallar la magnitud absoluta del sol en el filtro que corresponda
# m_sun y normalizar
# return m-dist_mod(z,cosmo[0],cosmo[1],cosmo[2])
#COSMOLOGICAL DISTANCES
def dl_lambda(z, omega=.3, h=1.):
"""Aproximation for the luminosity distance
for flat cosmologies with cosmological constant
ApJSS, Ue Li Pen 120:4950, 1999"""
if omega < 0.2:
raise """omega less than 0.2: outside
parameter range for the aproximation"""
if h > 1. or h < .4:
print("Wrong value for h", h)
sys.exit()
def eta(a, om):
s = (old_div((1. - om), om))**(old_div(1., 3.))
return 2.*numpy.sqrt(s**3+1.)*\
(a**(-4)-0.1540*s/a**3+0.4304*s**2/a**2+
0.19097*s**3/a+0.066941*s**4)**(old_div(-1.,8.))
return 2.9979*1e5/(h*100.)*(1.+z)*\
(eta(1.,omega)-eta(old_div(1.,(1.+z)),omega))
def dl_nolambda(z, omega=.3, h=.7):
"""Luminosity distance for a lambda=0
universe"""
cosa = sqrt(1. + omega * z)
return 2.9979 * 1e5 / (h * 100.) * z * (1. + cosa + z) /\
(1. + cosa + omega * z / 2.)
def dl(z, cosmology=(.3, .7, .7)):
omega, l, h = cosmology
if l > 0.:
if l + omega != 1.: raise 'lambda>0 but no flat cosmology!'
return dl_lambda(z, omega, h)
if l == 0: return dl_nolambda(z, omega, h)
if l < 0: raise 'lambda<0!!'
def da(z, cosmology=(.3, .7, .7)):
return old_div(dl(z, cosmology), (1. + z)**2)
######New distance definitions. Hogg 1999, astro-ph/9905116############Not tested!
def dh(cosmology=(0.3, 0.7, 0.7)):
return old_div(3000., cosmology[2])
def omega_k(cosmology=(0.3, 0.7, 0.7)):
return 1. - cosmology[0] - cosmology[1]
def e(z, cosmology=(0.3, 0.7, 0.7)):
o_k = omega_k(cosmology)
o_m = cosmology[0]
o_l = cosmology[1]
return sqrt(o_m * (1. + z)**3 + o_k * (1. + z)**2 + o_l)
def lookback_time(z, cosmology):
zp = numpy.arange(0., z, min([0.001, old_div(z, 100.)]))
return 9.78e9 / cosmology[2] * trapz(1. / (1 + zp) / e(zp, cosmology), zp)
#def dc(z,cosmology=(0.3,0.7,0.7)):
# dz=0.000001
# xz=arange(0.,z+dz,dz)
# ez=e(xz,cosmology)
# return dh(cosmology)*trapz(ez,xz)
#def dm(z,cosmology=(0.3,0.7,0.7)):
# o_k=omegak(cosmology)
# if o_k>0:
# return dh(cosmology)/sqrt(o_k)*sinh(sqrt(o_k)*dc(z,cosmology)/dh(cosmology))
# elif o_k==0:
# return dc(z,cosmology)
# else:
# o_k=abs(o_k)
# return dh(cosmology)/sqrt(o_k)*sin(sqrt(o_k)*dc(z,cosmology)/dh(cosmology))
#def da_h(z,cosmology=(0.3,0.7,0.7)):
# return dm(z,cosmology)/(1.+z)
#def dl_h(z,cosmology=(0.3,0.7,0.7)):
# return dm(z,cosmology)*(1.+z)
######################## Hogg 1999 ########################################
class vc(object):
def __init__(self,
z=0.57,
sed='Sbc_cww',
m=20.,
em=0.02,
filter='B_Johnson',
cosmo=(0.3, 0.7, 0.7),
vc_filter='B_Johnson'):
"""Generates velocity dispersion and error for a galaxy using TF or Faber--Jackson
Inputs: redshift, magnitude, error, filter, spectral type, cosmo and filter for TF in the rest frame
(FB Jackson always uses BJ as the rest frame filter)
Usage:
cosa=vc(0.55,'El_cww',20.,0.02,'I_Cousins',(0.3,0.7,0.7))
cosa=vc(0.55,'Scd_cww',20.,0.02,'I_Cousins',(0.3,0.7,0.7),'H_Johnson')
Uses the closest rest frame filter by default
Assumes that the input magnitudes are AB
"""
self.sed = sed
#Everything has to be properly transformed from AB to Vega!!
#Info about TF
#Pierce and Tully 1992
#vc=158.1*10.**(-(mabs+constant_TF)/slope_TF)
#It actually only works for Sbc galaxies
#For bluer stuff it is better to use the reddest filter, I_Cousins
#H_Johnson gives weird results, it may be due to template problems
self.filters_TF = ['B_Johnson', 'R_Cousins', 'I_Cousins', 'H_Johnson']
self.centers_TF = [4477.8, 6648.33, 8086.4, 16509.64]
self.slope_TF = [7.48, 8.23, 8.72, 9.50]
self.constant_TF = [19.55, 20.46, 20.94, 21.67]
self.error_TF = [0.14, 0.10, 0.10, 0.08]
#Info about FB
# Kochanek 1994, ApJ
# sigma_*=(225+-22.5)*(L/L_*)**(.24+-0.03)
# the error is approximate Kochanek 1996, magnitude is BJ
# with M_B (BJ) = -19.9+5*log10(h)
# Using L/L*=10.**[-0.4[M-M_*]]
# sigma_*=225.*10.(-(mabs+19.9)/10.42)
if sed == 'El_cww':
self.m_abs = ABtoVega(m_abs(m, z, sed, filter, cosmo, 'BJ'), 'BJ')
self.v_c = 225. * 10.**(.4 * (
-self.m_abs - (19.9 - 5. * log10(cosmo[2]))) * 0.24)
self.e_v_c = (old_div(25., 225.)) * self.v_c
self.filter_v_c = 'BJ'
else:
if sed == 'Sbc_cww' or sed == 'Scd_cww':
fc = filter_center(filter)
#Look for the closest filter
k = argmin(abs(numpy.array(self.centers_TF) - old_div(fc, (
1. + z))))
elif sed == 'Im_cww' or sed == 'SB2_kin' or sed == 'SB3_kin':
k = 2 #Use I_Cousins
self.m_abs = ABtoVega(
m_abs(m, z, sed, filter, cosmo, self.filters_TF[k]),
self.filters_TF[k])
self.v_c = 158.1 * 10.**(old_div(
-(self.m_abs + self.constant_TF[k]), self.slope_TF[k]))
self.e_v_c = self.v_c * 2.3 / self.slope_TF[k] * sqrt(
em**2 + self.error_TF[k]**2)
self.filter_v_c = self.filters_TF[k]
def dist_mod(z, cosmology=(0.3, .7, .7)):
"""Usage: dist_mod(z,cosmology)"""
return 25. + 5. * log10(dl(z, cosmology))
def angular_size(length, z, cosmology=(0.3, 0.7, .7)):
"""Usage: angular_size(length,z,cosmology)"""
return length / da(z, cosmology) / 3.141592654 * 180. * 3600.
def physical_size(angle, z, cosmology=(0.3, 0.7, .7)):
"""Usage: physical_size(angle,z,cosmology)
Units: arcseconds, Mpc"""
return angle / 360. / 60. / 60. * 2. * 3.141592654 * da(z, cosmology)
def lookback_time_open(z, omega=0.3, h=0.7):
"""Usage: lookback_time_open(z,omega,h)
Units: Myr Approximation from Peacock"""
omega_z = omega * (1. + z) / (1. + omega * z)
h_z = h * (1. + z) * sqrt(1. + omega * z)
t = h_z * (1. + old_div(omega_z**.6, 2.))
return ht / t / 1e9
def test():
test = 'reobs'
Testing(test)
z1, z2, f1, f2, t, c = 0.2, 0.8, 'V_LRIS', 'I_LRIS', 'El_cww', (0.3, 0.7,
0.7)
dr = reobs(t, 0., z1, f1, z2, f2, c)
ds = (reobs(t, 0., 0., f1, 0., f2, c) + 5. * log10(old_div(
dl(z2, c), dl(z1, c))) + (kcor(z2, t, f2) - kcor(z1, t, f1)))
print('dr,ds')
print(dr, ds)
ask('More?')
#The reobs function has been tested indirectly by using
#bpz to estimate redshifts of objects whose colors were generated
#by reobs, the agreement is perfect.
#The rest of the cosmological functions can be tested
#by comparing them with plots in the original references
pass
test = 'Distance modulus'
Testing(test)
print('Compare with Peebles Physical Cosmology, page 329')
print('Values of Omega are 0.2,0.5,1.')
z = numpy.arange(0.0001, 10., .01)
omega = [0.2, 0.5, 1.]
d = []
dlambda = []
p1 = FramedPlot()
p1.title = 'Lambda = 0'
p1.xrange = -0.2, 10.
p1.yrange = 41., 52.
p2 = FramedPlot()
p2.title = 'Flat universes'
p2.xrange = -0.2, 10.
p2.yrange = 41., 52.
for i in range(len(omega)):
d.append(dist_mod(z, (omega[i], 0., 1.)))
dlambda.append(dist_mod(z, (omega[i], 1. - omega[i], 1.)))
p1.add(Curve(z, d[i]))
p2.add(Curve(z, dlambda[i]))
p1.show()
p2.show()
print()
print()
print()
ask('More?')
test = 'Cosmological distances'
Testing(test)
z = numpy.arange(0., 4., .01)
da1 = old_div(da(z, (1., 0., 1.)), cho)
da2 = old_div(da(z, (.3, 0., 1.)), cho)
da3 = old_div(da(z, (.3, 0.7, 1.)), cho)
p = FramedPlot()
p.add(Curve(z, da1))
p.add(Curve(z, da3))
p.add(Curve(z, da2, style='dashed'))
p.yrange = 0., 1.
p.show()
print("Compare with Cosmological Physics, page 93")
print()
print()
print()
ask('More?')
test = 'K-corrections'
Testing(test)
print('Compare with Physical cosmology, page 331')
z = numpy.arange(0., 1.5, .01)
p = FramedPlot()
p.xrange = 0., 1.5
p.yrange = -1., 5.
for tipo in ['El_cww', 'Sbc_cww', 'Scd_cww']:
k = kcor(z, tipo, 'B_Johnson')
p.add(Curve(z, k))
p.show()
print()
print()
print()
if __name__ == '__main__':
test()
else:
pass
|
boada/planckClusters
|
MOSAICpipe/pipe_utils/cosmology.py
|
Python
|
mit
| 13,917
|
[
"Galaxy"
] |
b07bbc3a5ea97ce197e39fe9899a5bf380430de76a28dbee436eef79a92688e6
|
""" This is a test of the ProxyDB
It supposes that the DB is present and installed in DIRAC
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=invalid-name,wrong-import-position,protected-access
import os
import re
import sys
import stat
import shutil
import tempfile
# TODO: This should be modernised to use subprocess(32)
try:
import commands
except ImportError:
# Python 3's subprocess module contains a compatibility layer
import subprocess as commands
import unittest
from diraccfg import CFG
from DIRAC.Core.Base.Script import parseCommandLine
parseCommandLine()
import DIRAC
from DIRAC import gLogger, gConfig, S_OK, S_ERROR
from DIRAC.Core.Security.X509Chain import X509Chain # pylint: disable=import-error
from DIRAC.FrameworkSystem.DB.ProxyDB import ProxyDB
from DIRAC.Resources.ProxyProvider.DIRACCAProxyProvider import DIRACCAProxyProvider
certsPath = os.path.join(os.path.dirname(DIRAC.__file__), "Core/Security/test/certs")
ca = DIRACCAProxyProvider()
ca.setParameters(
{"CertFile": os.path.join(certsPath, "ca/ca.cert.pem"), "KeyFile": os.path.join(certsPath, "ca/ca.key.pem")}
)
diracTestCACFG = """
Resources
{
ProxyProviders
{
DIRAC_CA
{
ProviderType = DIRACCA
CertFile = %s
KeyFile = %s
Supplied = C, O, OU, CN
Optional = emailAddress
DNOrder = C, O, OU, CN, emailAddress
OU = None
C = DN
O = DIRACCA
}
}
}
""" % (
os.path.join(certsPath, "ca/ca.cert.pem"),
os.path.join(certsPath, "ca/ca.key.pem"),
)
userCFG = """
Registry
{
Users
{
# In dirac_user group
user_ca
{
DN = /C=DN/O=DIRACCA/OU=None/CN=user_ca/emailAddress=user_ca@diracgrid.org
DNProperties
{
DN.1
{
DN = /C=DN/O=DIRACCA/OU=None/CN=user_ca/emailAddress=user_ca@diracgrid.org
ProxyProviders = DIRAC_CA
Groups = dirac_user
}
}
}
user
{
DN = /C=CC/O=DN/O=DIRAC/CN=user
DNProperties
{
DN.1
{
DN = /C=CC/O=DN/O=DIRAC/CN=user
ProxyProviders =
Groups = dirac_user
}
}
}
user_1
{
DN = /C=CC/O=DN/O=DIRAC/CN=user_1
DNProperties
{
DN.1
{
DN = /C=CC/O=DN/O=DIRAC/CN=user_1
ProxyProviders =
Groups = dirac_user
}
}
}
user_2
{
DN = /C=CC/O=DN/O=DIRAC/CN=user_2
}
user_3
{
DN = /C=CC/O=DN/O=DIRAC/CN=user_3
}
# Not in dirac_user group
user_4
{
DN = /C=CC/O=DN/O=DIRAC/CN=user_4
}
}
Groups
{
group_1
{
Users = user_ca, user, user_1, user_2, user_3
VO = vo_1
}
group_2
{
Users = user_4
enableToDownload = False
}
}
VO
{
vo_1
{
VOMSName = vo_1
VOMSServers
{
}
}
}
}
"""
db = ProxyDB()
class ProxyDBTestCase(unittest.TestCase):
@classmethod
def createProxy(self, userName, group, time, vo=None, role=None):
"""Create user proxy
:param str userName: user name
:param str group: group name
:param int time: proxy expired time
:param str vo: VOMS VO name
:param str role: VOMS Role
:return: S_OK(tuple)/S_ERROR() -- contain proxy as and as string
"""
userCertFile = os.path.join(self.userDir, userName + ".cert.pem")
userKeyFile = os.path.join(self.userDir, userName + ".key.pem")
self.proxyPath = os.path.join(self.userDir, userName + ".pem")
if not vo:
chain = X509Chain()
# Load user cert and key
retVal = chain.loadChainFromFile(userCertFile)
if not retVal["OK"]:
gLogger.warn(retVal["Message"])
return S_ERROR("Can't load %s" % userCertFile)
retVal = chain.loadKeyFromFile(userKeyFile)
if not retVal["OK"]:
gLogger.warn(retVal["Message"])
if "bad decrypt" in retVal["Message"]:
return S_ERROR("Bad passphrase")
return S_ERROR("Can't load %s" % userKeyFile)
result = chain.generateProxyToFile(self.proxyPath, time * 3600, diracGroup=group)
if not result["OK"]:
return result
else:
cmd = "voms-proxy-fake --cert %s --key %s -q" % (userCertFile, userKeyFile)
cmd += " -hostcert %s -hostkey %s" % (self.hostCert, self.hostKey)
cmd += " -uri fakeserver.cern.ch:15000"
cmd += ' -voms "%s"' % vo
cmd += ' -fqan "/%s/Role=%s/Capability=NULL"' % (vo, role)
cmd += " -hours %s -out %s -rfc" % (time, self.proxyPath)
status, output = commands.getstatusoutput(cmd)
if status:
return S_ERROR(output)
chain = X509Chain()
result = chain.loadProxyFromFile(self.proxyPath)
if not result["OK"]:
return result
result = chain.generateProxyToString(12 * 3600, diracGroup=group)
if not result["OK"]:
return result
return S_OK((chain, result["Value"]))
@classmethod
def setUpClass(cls):
cls.failed = False
# Add configuration
cfg = CFG()
cfg.loadFromBuffer(diracTestCACFG)
gConfig.loadCFG(cfg)
cfg.loadFromBuffer(userCFG)
gConfig.loadCFG(cfg)
# Prepare CA
lines = []
cfgDict = {}
cls.caPath = os.path.join(certsPath, "ca")
cls.caConfigFile = os.path.join(cls.caPath, "openssl_config_ca.cnf")
# Save original configuration file
shutil.copyfile(cls.caConfigFile, cls.caConfigFile + "bak")
# Parse
fields = ["dir", "database", "serial", "new_certs_dir", "private_key", "certificate"]
with open(cls.caConfigFile, "r") as caCFG:
for line in caCFG:
if re.findall("=", re.sub(r"#.*", "", line)):
field = re.sub(r"#.*", "", line).replace(" ", "").rstrip().split("=")[0]
line = "dir = %s #PUT THE RIGHT DIR HERE!\n" % (cls.caPath) if field == "dir" else line
val = re.sub(r"#.*", "", line).replace(" ", "").rstrip().split("=")[1]
if field in fields:
for i in fields:
if cfgDict.get(i):
val = val.replace("$%s" % i, cfgDict[i])
cfgDict[field] = val
if not cfgDict[field]:
cls.failed = "%s have empty value in %s" % (field, cls.caConfigFile)
lines.append(line)
with open(cls.caConfigFile, "w") as caCFG:
caCFG.writelines(lines)
for field in fields:
if field not in cfgDict.keys():
cls.failed = "%s value is absent in %s" % (field, cls.caConfigFile)
cls.hostCert = os.path.join(certsPath, "host/hostcert.pem")
cls.hostKey = os.path.join(certsPath, "host/hostkey.pem")
cls.caCert = cfgDict["certificate"]
cls.caKey = cfgDict["private_key"]
os.chmod(cls.caKey, stat.S_IREAD)
# Check directory for new certificates
cls.newCertDir = cfgDict["new_certs_dir"]
if not os.path.exists(cls.newCertDir):
os.makedirs(cls.newCertDir)
for f in os.listdir(cls.newCertDir):
os.remove(os.path.join(cls.newCertDir, f))
# Empty the certificate database
cls.index = cfgDict["database"]
with open(cls.index, "w") as indx:
indx.write("")
# Write down serial
cls.serial = cfgDict["serial"]
with open(cls.serial, "w") as serialFile:
serialFile.write("1000")
# Create temporaly directory for users certificates
cls.userDir = tempfile.mkdtemp(dir=certsPath)
# Create user certificates
for userName in ["no_user", "user", "user_1", "user_2", "user_3"]:
userConf = """[ req ]
default_bits = 4096
encrypt_key = yes
distinguished_name = req_dn
prompt = no
req_extensions = v3_req
[ req_dn ]
C = CC
O = DN
0.O = DIRAC
CN = %s
[ v3_req ]
# Extensions for client certificates (`man x509v3_config`).
nsComment = "OpenSSL Generated Client Certificate"
keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth
""" % (
userName
)
userConfFile = os.path.join(cls.userDir, userName + ".cnf")
userReqFile = os.path.join(cls.userDir, userName + ".req")
userKeyFile = os.path.join(cls.userDir, userName + ".key.pem")
userCertFile = os.path.join(cls.userDir, userName + ".cert.pem")
with open(userConfFile, "w") as f:
f.write(userConf)
status, output = commands.getstatusoutput("openssl genrsa -out %s" % userKeyFile)
if status:
gLogger.error(output)
exit()
gLogger.debug(output)
os.chmod(userKeyFile, stat.S_IREAD)
status, output = commands.getstatusoutput(
"openssl req -config %s -key %s -new -out %s" % (userConfFile, userKeyFile, userReqFile)
)
if status:
gLogger.error(output)
exit()
gLogger.debug(output)
cmd = "openssl ca -config %s -extensions usr_cert -batch -days 375 -in %s -out %s"
cmd = cmd % (cls.caConfigFile, userReqFile, userCertFile)
status, output = commands.getstatusoutput(cmd)
if status:
gLogger.error(output)
exit()
gLogger.debug(output)
# Result
status, output = commands.getstatusoutput("ls -al %s" % cls.userDir)
if status:
gLogger.error(output)
exit()
gLogger.debug("User certificates:\n", output)
def setUp(self):
gLogger.debug("\n")
if self.failed:
self.fail(self.failed)
db._update('DELETE FROM ProxyDB_Proxies WHERE UserName IN ("user_ca", "user", "user_1", "user_2", "user_3")')
db._update(
'DELETE FROM ProxyDB_CleanProxies WHERE UserName IN ("user_ca", "user", "user_1", "user_2", "user_3")'
)
def tearDown(self):
db._update('DELETE FROM ProxyDB_Proxies WHERE UserName IN ("user_ca", "user", "user_1", "user_2", "user_3")')
db._update(
'DELETE FROM ProxyDB_CleanProxies WHERE UserName IN ("user_ca", "user", "user_1", "user_2", "user_3")'
)
@classmethod
def tearDownClass(cls):
shutil.move(cls.caConfigFile + "bak", cls.caConfigFile)
if os.path.exists(cls.newCertDir):
for f in os.listdir(cls.newCertDir):
os.remove(os.path.join(cls.newCertDir, f))
for f in os.listdir(cls.caPath):
if re.match("%s..*" % cls.index, f) or f.endswith(".old"):
os.remove(os.path.join(cls.caPath, f))
if os.path.exists(cls.userDir):
shutil.rmtree(cls.userDir)
# Empty the certificate database
with open(cls.index, "w") as index:
index.write("")
# Write down serial
with open(cls.serial, "w") as serialFile:
serialFile.write("1000")
class testDB(ProxyDBTestCase):
def test_connectDB(self):
"""Try to connect to the ProxyDB"""
res = db._connect()
self.assertTrue(res["OK"])
def test_getUsers(self):
"""Test 'getUsers' - try to get users from DB"""
field = '("%%s", "/C=CC/O=DN/O=DIRAC/CN=%%s", %%s "PEM", TIMESTAMPADD(SECOND, %%s, UTC_TIMESTAMP()))%s' % ""
# Fill table for test
gLogger.info("\n* Fill tables for test..")
for table, values, fields in [
(
"ProxyDB_Proxies",
[field % ("user", "user", '"group_1",', "800"), field % ("user_2", "user_2", '"group_1",', "-1")],
"(UserName, UserDN, UserGroup, Pem, ExpirationTime)",
),
(
"ProxyDB_CleanProxies",
[field % ("user_3", "user_3", "", "43200")],
"(UserName, UserDN, Pem, ExpirationTime)",
),
]:
result = db._update("INSERT INTO %s%s VALUES %s ;" % (table, fields, ", ".join(values)))
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
# Testing 'getUsers'
gLogger.info("\n* Run `purgeExpiredProxies()`..")
for user, exp, expect, log in [
(False, 0, ["user", "user_2", "user_3"], "\n* Without arguments"),
(False, 1200, ["user_3"], "* Request proxy live time"),
("user_2", 0, ["user_2"], "* Request user name"),
("no_user", 0, [], "* Request not exist user name"),
]:
gLogger.info("%s.." % log)
result = db.getUsers(validSecondsLeft=exp, userMask=user)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
usersList = []
for line in result["Value"]:
if line["Name"] in ["user", "user_2", "user_3"]:
usersList.append(line["Name"])
self.assertEqual(set(expect), set(usersList), str(usersList) + ", when expected " + str(expect))
def test_purgeExpiredProxies(self):
"""Test 'purgeExpiredProxies' - try to purge expired proxies"""
# Purge existed proxies
gLogger.info("\n* First cleaning..")
cmd = "INSERT INTO ProxyDB_Proxies(UserName, UserDN, UserGroup, Pem, ExpirationTime) VALUES "
cmd += '("user", "/C=CC/O=DN/O=DIRAC/CN=user", "group_1", "PEM", '
cmd += "TIMESTAMPADD(SECOND, -1, UTC_TIMESTAMP()));"
result = db._query(cmd)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
cmd = "SELECT COUNT( * ) FROM ProxyDB_Proxies WHERE ExpirationTime < UTC_TIMESTAMP()"
self.assertTrue(bool(db._query(cmd)["Value"][0][0] > 0))
result = db.purgeExpiredProxies()
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(result["Value"] > 0, "Must be more then null")
self.assertFalse(bool(db._query(cmd)["Value"][0][0] > 0), "Must be null")
def test_getRemoveProxy(self):
"""Testing get, store proxy"""
gLogger.info("\n* Check that DB is clean..")
result = db.getProxiesContent({"UserName": ["user_ca", "user", "user_1" "user_2", "user_3"]}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 0), "In DB present proxies.")
gLogger.info("* Check posible crashes when get proxy..")
# Make record with not valid proxy, valid group, user and short expired time
cmd = "INSERT INTO ProxyDB_Proxies(UserName, UserDN, UserGroup, Pem, ExpirationTime) VALUES "
cmd += '("user", "/C=CC/O=DN/O=DIRAC/CN=user", "group_1", "PEM", '
cmd += "TIMESTAMPADD(SECOND, 1800, UTC_TIMESTAMP()));"
result = db._update(cmd)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
# Try to no correct getProxy requests
for dn, group, reqtime, log in [
(
"/C=CC/O=DN/O=DIRAC/CN=user",
"group_1",
9999,
"No proxy provider, set request time, not valid proxy in ProxyDB_Proxies",
),
("/C=CC/O=DN/O=DIRAC/CN=user", "group_1", 0, "Not valid proxy in ProxyDB_Proxies"),
("/C=CC/O=DN/O=DIRAC/CN=no_user", "no_valid_group", 0, "User not exist, proxy not in DB tables"),
("/C=CC/O=DN/O=DIRAC/CN=user", "no_valid_group", 0, "Group not valid, proxy not in DB tables"),
("/C=CC/O=DN/O=DIRAC/CN=user", "group_1", 0, "No proxy provider for user, proxy not in DB tables"),
("/C=CC/O=DN/O=DIRAC/CN=user_4", "group_2", 0, "Group has option enableToDownload = False in CS"),
]:
gLogger.info("== > %s:" % log)
result = db.getProxy(dn, group, reqtime)
self.assertFalse(result["OK"], "Must be fail.")
gLogger.info("Msg: %s" % result["Message"])
# In the last case method found proxy and must to delete it as not valid
cmd = 'SELECT COUNT( * ) FROM ProxyDB_Proxies WHERE UserName="user"'
self.assertTrue(bool(db._query(cmd)["Value"][0][0] == 0), "GetProxy method didn't delete the last proxy.")
gLogger.info("* Check that DB is clean..")
result = db.getProxiesContent({"UserName": ["user_ca", "user", "user_1", "user_2", "user_3"]}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 0), "In DB present proxies.")
gLogger.info("* Generate proxy on the fly..")
result = db.getProxy("/C=DN/O=DIRACCA/OU=None/CN=user_ca/emailAddress=user_ca@diracgrid.org", "group_1", 1800)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
gLogger.info("* Check that ProxyDB_CleanProxy contain generated proxy..")
result = db.getProxiesContent({"UserName": "user_ca"}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 1), "Generated proxy must be one.")
for table, count in [("ProxyDB_Proxies", 0), ("ProxyDB_CleanProxies", 1)]:
cmd = 'SELECT COUNT( * ) FROM %s WHERE UserName="user_ca"' % table
self.assertTrue(
bool(db._query(cmd)["Value"][0][0] == count),
table + " must " + (count and "contain proxy" or "be empty"),
)
gLogger.info("* Check that DB is clean..")
result = db.deleteProxy(
"/C=DN/O=DIRACCA/OU=None/CN=user_ca/emailAddress=user_ca@diracgrid.org", proxyProvider="DIRAC_CA"
)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
result = db.getProxiesContent({"UserName": ["user_ca", "user", "user_1", "user_2", "user_3"]}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 0), "In DB present proxies.")
gLogger.info("* Upload proxy..")
for user, dn, group, vo, time, res, log in [
("user", "/C=CC/O=DN/O=DIRAC/CN=user", "group_1", False, 12, False, "With group extension"),
("user", "/C=CC/O=DN/O=DIRAC/CN=user", False, "vo_1", 12, False, "With voms extension"),
("user_1", "/C=CC/O=DN/O=DIRAC/CN=user_1", False, "vo_1", 12, False, "With voms extension"),
("user", "/C=CC/O=DN/O=DIRAC/CN=user", False, False, 0, False, "Expired proxy"),
("no_user", "/C=CC/O=DN/O=DIRAC/CN=no_user", False, False, 12, False, "Not exist user"),
("user", "/C=CC/O=DN/O=DIRAC/CN=user", False, False, 12, True, "Valid proxy"),
]:
# Clean tables with proxies
for table in ["ProxyDB_Proxies", "ProxyDB_CleanProxies"]:
result = db._update('DELETE FROM %s WHERE UserName = "user"' % table)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
result = db._update('DELETE FROM %s WHERE UserName = "user_1"' % table)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
gLogger.info("== > %s:" % log)
result = self.createProxy(user, group, time, vo=vo)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
chain = result["Value"][0]
# Assert VOMSProxy
if vo:
self.assertTrue(bool(chain.isVOMS().get("Value")), "Cannot create proxy with VOMS extension")
result = db.generateDelegationRequest(chain, dn)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
resDict = result["Value"]
result = chain.generateChainFromRequestString(resDict["request"], time * 3500)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
if not chain.isVOMS().get("Value") and vo:
gLogger.info("voms-proxy-fake command not working as expected, so proxy have no VOMS extention")
res = not res
result = db.completeDelegation(resDict["id"], dn, result["Value"])
text = "Must be ended %s%s" % (
"successful" if res else "with error",
": %s" % result.get("Message", "Error message is absent."),
)
self.assertEqual(result["OK"], res, text)
if not res:
gLogger.info("Msg: %s" % (result["Message"]))
cmd = 'SELECT COUNT( * ) FROM ProxyDB_Proxies WHERE UserName="%s"' % user
self.assertTrue(
bool(db._query(cmd)["Value"][0][0] == 0),
"ProxyDB_Proxies must " + ("contain proxy" if res else "be empty"),
)
cmd = 'SELECT COUNT( * ) FROM ProxyDB_CleanProxies WHERE UserName="%s"' % user
self.assertTrue(
bool(db._query(cmd)["Value"][0][0] == (1 if res else 0)),
"ProxyDB_CleanProxies must " + ("contain proxy" if res else "be empty"),
)
# Last test test must leave proxy in DB
gLogger.info("* Check that ProxyDB_CleanProxy contain generated proxy..")
result = db.getProxiesContent({"UserName": "user"}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 1), "Generated proxy must be one.")
cmd = 'SELECT COUNT( * ) FROM ProxyDB_CleanProxies WHERE UserName="user"'
self.assertTrue(bool(db._query(cmd)["Value"][0][0] == 1), "ProxyDB_CleanProxies must contain proxy")
gLogger.info("* Get proxy that store only in ProxyDB_CleanProxies..")
# Try to get proxy that was stored to ProxyDB_CleanProxies in previous step
for res, group, reqtime, log in [
(False, "group_1", 24 * 3600, "Request time more that in stored proxy"),
(False, "group_2", 0, "Request group not contain user"),
(True, "group_1", 0, "Request time less that in stored proxy"),
]:
gLogger.info("== > %s:" % log)
result = db.getProxy("/C=CC/O=DN/O=DIRAC/CN=user", group, reqtime)
text = "Must be ended %s%s" % (
res and "successful" or "with error",
": %s" % result.get("Message", "Error message is absent."),
)
self.assertEqual(result["OK"], res, text)
if res:
chain = result["Value"][0]
self.assertTrue(chain.isValidProxy()["OK"], "\n" + result.get("Message", "Error message is absent."))
result = chain.getDIRACGroup()
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertEqual("group_1", result["Value"], "Group must be group_1, not " + result["Value"])
else:
gLogger.info("Msg: %s" % (result["Message"]))
gLogger.info("* Check that DB is clean..")
result = db.deleteProxy("/C=CC/O=DN/O=DIRAC/CN=user", proxyProvider="Certificate")
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
result = db.getProxiesContent({"UserName": ["user_ca", "user", "user_2", "user_3"]}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 0), "In DB present proxies.")
gLogger.info("* Get proxy when it store only in ProxyDB_Proxies..")
# Make record with proxy that contain group
result = ca._forceGenerateProxyForDN("/C=CC/O=DN/O=DIRAC/CN=user", 12 * 3600, group="group_1")
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
proxyStr = result["Value"][1]
cmd = "INSERT INTO ProxyDB_Proxies(UserName, UserDN, UserGroup, Pem, ExpirationTime) VALUES "
cmd += '("user", "%s", "%s", "%s", TIMESTAMPADD(SECOND, 43200, UTC_TIMESTAMP()))' % (dn, group, proxyStr)
result = db._update(cmd)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
# Try to get it
result = db.getProxy(dn, group, 1800)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
# Check that proxy contain group
chain = result["Value"][0]
self.assertTrue(chain.isValidProxy()["OK"], "\n" + result.get("Message", "Error message is absent."))
result = chain.getDIRACGroup()
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertEqual("group_1", result["Value"], "Group must be group_1, not " + result["Value"])
gLogger.info("* Check that DB is clean..")
result = db.deleteProxy("/C=CC/O=DN/O=DIRAC/CN=user")
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
result = db.getProxiesContent({"UserName": ["user_ca", "user", "user_1", "user_2", "user_3"]}, {})
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
self.assertTrue(bool(int(result["Value"]["TotalRecords"]) == 0), "In DB present proxies.")
gLogger.info("* Get VOMS proxy..")
for vomsuser in ["user", "user_1"]:
# Create proxy with VOMS extension
result = self.createProxy(vomsuser, "group_1", 12, vo="vo_1", role="role_2")
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
chain, proxyStr = result["Value"]
# Assert VOMSProxy
self.assertTrue(bool(chain.isVOMS().get("Value")), "Cannot create proxy with VOMS extension")
cmd = "INSERT INTO ProxyDB_Proxies(UserName, UserDN, UserGroup, Pem, ExpirationTime) VALUES "
cmd += '("%s", "/C=CC/O=DN/O=DIRAC/CN=%s", "group_1", "%s", ' % (vomsuser, vomsuser, proxyStr)
cmd += "TIMESTAMPADD(SECOND, 43200, UTC_TIMESTAMP()))"
result = db._update(cmd)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
# Try to get proxy with VOMS extension
for dn, group, role, time, log in [
("/C=CC/O=DN/O=DIRAC/CN=user_4", "group_2", False, 9999, "Not exist VO for current group"),
(
"/C=CC/O=DN/O=DIRAC/CN=user",
"group_1",
"role_1",
9999,
"Stored proxy already have different VOMS extension",
),
(
"/C=CC/O=DN/O=DIRAC/CN=user_1",
"group_1",
"role_1",
9999,
"Stored proxy already have different VOMS extension",
),
(
"/C=DN/O=DIRACCA/OU=None/CN=user_ca/emailAddress=user_ca@diracgrid.org",
"group_1",
"role_1",
9999,
"Not correct VO configuration",
),
]:
gLogger.info("== > %s(DN: %s):" % (log, dn))
if not any([dn, group, role, time, log]):
gLogger.info(
"voms-proxy-fake command not working as expected, proxy have no VOMS extention, go to the next.."
)
continue
result = db.getVOMSProxy(dn, group, time, role)
self.assertFalse(result["OK"], "Must be fail.")
gLogger.info("Msg: %s" % result["Message"])
# Check stored proxies
for table, user, count in [("ProxyDB_Proxies", "user", 1), ("ProxyDB_CleanProxies", "user_ca", 1)]:
cmd = 'SELECT COUNT( * ) FROM %s WHERE UserName="%s"' % (table, user)
self.assertTrue(bool(db._query(cmd)["Value"][0][0] == count))
gLogger.info("* Delete proxies..")
for dn, table in [
("/C=CC/O=DN/O=DIRAC/CN=user", "ProxyDB_Proxies"),
("/C=DN/O=DIRACCA/OU=None/CN=user_ca/emailAddress=user_ca@diracgrid.org", "ProxyDB_CleanProxies"),
]:
result = db.deleteProxy(dn)
self.assertTrue(result["OK"], "\n" + result.get("Message", "Error message is absent."))
cmd = 'SELECT COUNT( * ) FROM %s WHERE UserName="user_ca"' % table
self.assertTrue(bool(db._query(cmd)["Value"][0][0] == 0))
if __name__ == "__main__":
suite = unittest.defaultTestLoader.loadTestsFromTestCase(ProxyDBTestCase)
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(testDB))
testResult = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not testResult.wasSuccessful())
|
ic-hep/DIRAC
|
tests/Integration/Framework/Test_ProxyDB.py
|
Python
|
gpl-3.0
| 29,906
|
[
"DIRAC"
] |
7de2b8ac1855b14fc448783c4d609292b523a4c35e73fd82b6f2a26a67a468b5
|
from setuptools import setup, find_packages
import imp
version = imp.load_source('librosa.version', 'librosa/version.py')
setup(
name='librosa',
version=version.version,
description='Python module for audio and music processing',
author='Brian McFee',
author_email='brian.mcfee@nyu.edu',
url='http://github.com/bmcfee/librosa',
download_url='http://github.com/bmcfee/librosa/releases',
packages=find_packages(),
package_data={'': ['example_data/*']},
long_description="""A python module for audio and music processing.""",
classifiers=[
"License :: OSI Approved :: ISC License (ISCL)",
"Programming Language :: Python",
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Multimedia :: Sound/Audio :: Analysis",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
],
keywords='audio music sound',
license='ISC',
install_requires=[
'audioread',
'numpy >= 1.8.0',
'scipy >= 0.13.0',
'scikit-learn >= 0.14.0',
'matplotlib',
'joblib',
'decorator',
'six',
],
extras_require={
'resample': 'scikits.samplerate>=0.3',
'docs': ['numpydoc', 'seaborn', 'sphinx_rtd_theme']
}
)
|
yunque/librosa
|
setup.py
|
Python
|
isc
| 1,429
|
[
"Brian"
] |
ee009285fa814cbe7a200dbe838812799eeaa98f05689144527d1744649a1682
|
def VtkDefineIdFilter(nmbIdFilter):
# vtkIdFilter genera escalares a partir del identificador
nmbIdFilter= vtk.vtkIdFilter()
nmbIdFilter.SetInput(ugrid)
nmbIdFilter.CellIdsOff()
nmbIdFilter.PointIdsOff()
|
lcpt/xc
|
python_modules/postprocess/xcVtk/vtk_define_id_filter.py
|
Python
|
gpl-3.0
| 215
|
[
"VTK"
] |
ca021d22977d8a8941b896327d1f7913a8e9ce800b92f306780410ff4f25e6bc
|
# coding: utf-8
#
# Copyright 2015 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from celery import Celery
from kombu import Exchange, Queue
import config
app = Celery('pypln_workers', backend='mongodb',
broker='amqp://', include=['pypln.backend.workers'])
app.conf.update(
BROKER_URL=config.BROKER_URL,
CELERY_RESULT_BACKEND=config.CELERY_RESULT_BACKEND,
CELERY_QUEUES=(Queue(config.CELERY_QUEUE_NAME,
Exchange(config.CELERY_QUEUE_NAME),
routing_key=config.CELERY_QUEUE_NAME),),
CELERY_DEFAULT_QUEUE=config.CELERY_DEFAULT_QUEUE,
)
|
flavioamieiro/pypln.backend
|
pypln/backend/celery_app.py
|
Python
|
gpl-3.0
| 1,245
|
[
"NAMD"
] |
42bfbe3ec41b0656cbdbec890d9dd2d657155ec08081e69c7f69e47998a689e8
|
from __future__ import print_function
from confirmation.models import Confirmation
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template import loader
from zerver.decorator import statsd_increment, uses_mandrill
from zerver.models import Recipient, ScheduledJob, UserMessage, \
Stream, get_display_recipient, get_user_profile_by_email, \
get_user_profile_by_id, receives_offline_notifications, \
get_context_for_message
import datetime
import re
import subprocess
import ujson
import urllib
from collections import defaultdict
def unsubscribe_token(user_profile):
# Leverage the Django confirmations framework to generate and track unique
# unsubscription tokens.
return Confirmation.objects.get_link_for_object(user_profile).split("/")[-1]
def one_click_unsubscribe_link(user_profile, endpoint):
"""
Generate a unique link that a logged-out user can visit to unsubscribe from
Zulip e-mails without having to first log in.
"""
token = unsubscribe_token(user_profile)
base_url = "https://" + settings.EXTERNAL_HOST
resource_path = "accounts/unsubscribe/%s/%s" % (endpoint, token)
return "%s/%s" % (base_url.rstrip("/"), resource_path)
def hashchange_encode(string):
# Do the same encoding operation as hashchange.encodeHashComponent on the
# frontend.
# `safe` has a default value of "/", but we want those encoded, too.
return urllib.quote(
string.encode("utf-8"), safe="").replace(".", "%2E").replace("%", ".")
def pm_narrow_url(participants):
participants.sort()
base_url = "https://%s/#narrow/pm-with/" % (settings.EXTERNAL_HOST,)
return base_url + hashchange_encode(",".join(participants))
def stream_narrow_url(stream):
base_url = "https://%s/#narrow/stream/" % (settings.EXTERNAL_HOST,)
return base_url + hashchange_encode(stream)
def topic_narrow_url(stream, topic):
base_url = "https://%s/#narrow/stream/" % (settings.EXTERNAL_HOST,)
return "%s%s/topic/%s" % (base_url, hashchange_encode(stream),
hashchange_encode(topic))
def build_message_list(user_profile, messages):
"""
Builds the message list object for the missed message email template.
The messages are collapsed into per-recipient and per-sender blocks, like
our web interface
"""
messages_to_render = []
def sender_string(message):
sender = ''
if message.recipient.type in (Recipient.STREAM, Recipient.HUDDLE):
sender = message.sender.full_name
return sender
def relative_to_full_url(content):
# URLs for uploaded content are of the form
# "/user_uploads/abc.png". Make them full paths.
#
# There's a small chance of colliding with non-Zulip URLs containing
# "/user_uploads/", but we don't have much information about the
# structure of the URL to leverage.
content = re.sub(
r"/user_uploads/(\S*)",
settings.EXTERNAL_HOST + r"/user_uploads/\1", content)
# Our proxying user-uploaded images seems to break inline images in HTML
# emails, so scrub the image but leave the link.
content = re.sub(
r"<img src=(\S+)/user_uploads/(\S+)>", "", content)
# URLs for emoji are of the form
# "static/third/gemoji/images/emoji/snowflake.png".
content = re.sub(
r"static/third/gemoji/images/emoji/",
settings.EXTERNAL_HOST + r"/static/third/gemoji/images/emoji/",
content)
return content
def fix_plaintext_image_urls(content):
# Replace image URLs in plaintext content of the form
# [image name](image url)
# with a simple hyperlink.
return re.sub(r"\[(\S*)\]\((\S*)\)", r"\2", content)
def fix_emoji_sizes(html):
return html.replace(' class="emoji"', ' height="20px"')
def build_message_payload(message):
plain = message.content
plain = fix_plaintext_image_urls(plain)
plain = relative_to_full_url(plain)
html = message.rendered_content
html = relative_to_full_url(html)
html = fix_emoji_sizes(html)
return {'plain': plain, 'html': html}
def build_sender_payload(message):
sender = sender_string(message)
return {'sender': sender,
'content': [build_message_payload(message)]}
def message_header(user_profile, message):
disp_recipient = get_display_recipient(message.recipient)
if message.recipient.type == Recipient.PERSONAL:
header = "You and %s" % (message.sender.full_name)
html_link = pm_narrow_url([message.sender.email])
header_html = "<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
elif message.recipient.type == Recipient.HUDDLE:
other_recipients = [r['full_name'] for r in disp_recipient
if r['email'] != user_profile.email]
header = "You and %s" % (", ".join(other_recipients),)
html_link = pm_narrow_url([r["email"] for r in disp_recipient
if r["email"] != user_profile.email])
header_html = "<a style='color: #ffffff;' href='%s'>%s</a>" % (html_link, header)
else:
header = "%s > %s" % (disp_recipient, message.subject)
stream_link = stream_narrow_url(disp_recipient)
topic_link = topic_narrow_url(disp_recipient, message.subject)
header_html = "<a href='%s'>%s</a> > <a href='%s'>%s</a>" % (
stream_link, disp_recipient, topic_link, message.subject)
return {"plain": header,
"html": header_html,
"stream_message": message.recipient.type_name() == "stream"}
# # Collapse message list to
# [
# {
# "header": {
# "plain":"header",
# "html":"htmlheader"
# }
# "senders":[
# {
# "sender":"sender_name",
# "content":[
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# {
# "plain":"content",
# "html":"htmlcontent"
# }
# ]
# }
# ]
# },
# ]
messages.sort(key=lambda message: message.pub_date)
for message in messages:
header = message_header(user_profile, message)
# If we want to collapse into the previous recipient block
if len(messages_to_render) > 0 and messages_to_render[-1]['header'] == header:
sender = sender_string(message)
sender_block = messages_to_render[-1]['senders']
# Same message sender, collapse again
if sender_block[-1]['sender'] == sender:
sender_block[-1]['content'].append(build_message_payload(message))
else:
# Start a new sender block
sender_block.append(build_sender_payload(message))
else:
# New recipient and sender block
recipient_block = {'header': header,
'senders': [build_sender_payload(message)]}
messages_to_render.append(recipient_block)
return messages_to_render
@statsd_increment("missed_message_reminders")
def do_send_missedmessage_events_reply_in_zulip(user_profile, missed_messages, message_count):
"""
Send a reminder email to a user if she's missed some PMs by being offline.
The email will have its reply to address set to a limited used email
address that will send a zulip message to the correct recipient. This
allows the user to respond to missed PMs, huddles, and @-mentions directly
from the email.
`user_profile` is the user to send the reminder to
`missed_messages` is a list of Message objects to remind about they should
all have the same recipient and subject
"""
# Disabled missedmessage emails internally
if not user_profile.enable_offline_email_notifications:
return
recipients = set((msg.recipient_id, msg.subject) for msg in missed_messages)
if len(recipients) != 1:
raise ValueError(
'All missed_messages must have the same recipient and subject %r' %
recipients
)
template_payload = {
'name': user_profile.full_name,
'messages': build_message_list(user_profile, missed_messages),
'message_count': message_count,
'url': 'https://%s' % (settings.EXTERNAL_HOST,),
'reply_warning': False,
'external_host': settings.EXTERNAL_HOST,
'mention': missed_messages[0].recipient.type == Recipient.STREAM,
'reply_to_zulip': True,
}
headers = {}
from zerver.lib.email_mirror import create_missed_message_address
address = create_missed_message_address(user_profile, missed_messages[0])
headers['Reply-To'] = address
senders = set(m.sender.full_name for m in missed_messages)
sender_str = ", ".join(senders)
plural_messages = 's' if len(missed_messages) > 1 else ''
subject = "Missed Zulip%s from %s" % (plural_messages, sender_str)
from_email = "%s (via Zulip) <%s>" % (sender_str, settings.NOREPLY_EMAIL_ADDRESS)
text_content = loader.render_to_string('zerver/missed_message_email.txt', template_payload)
html_content = loader.render_to_string('zerver/missed_message_email_html.txt', template_payload)
msg = EmailMultiAlternatives(subject, text_content, from_email, [user_profile.email],
headers = headers)
msg.attach_alternative(html_content, "text/html")
msg.send()
user_profile.last_reminder = datetime.datetime.now()
user_profile.save(update_fields=['last_reminder'])
@statsd_increment("missed_message_reminders")
def do_send_missedmessage_events(user_profile, missed_messages, message_count):
"""
Send a reminder email and/or push notifications to a user if she's missed some PMs by being offline
`user_profile` is the user to send the reminder to
`missed_messages` is a list of Message objects to remind about
"""
# Disabled missedmessage emails internally
if not user_profile.enable_offline_email_notifications:
return
senders = set(m.sender.full_name for m in missed_messages)
sender_str = ", ".join(senders)
plural_messages = 's' if len(missed_messages) > 1 else ''
template_payload = {'name': user_profile.full_name,
'messages': build_message_list(user_profile, missed_messages),
'message_count': message_count,
'url': 'https://%s' % (settings.EXTERNAL_HOST,),
'reply_warning': False,
'external_host': settings.EXTERNAL_HOST}
headers = {}
if all(msg.recipient.type in (Recipient.HUDDLE, Recipient.PERSONAL)
for msg in missed_messages):
# If we have one huddle, set a reply-to to all of the members
# of the huddle except the user herself
disp_recipients = [", ".join(recipient['email']
for recipient in get_display_recipient(mesg.recipient)
if recipient['email'] != user_profile.email)
for mesg in missed_messages]
if all(msg.recipient.type == Recipient.HUDDLE for msg in missed_messages) and \
len(set(disp_recipients)) == 1:
headers['Reply-To'] = disp_recipients[0]
elif len(senders) == 1:
headers['Reply-To'] = missed_messages[0].sender.email
else:
template_payload['reply_warning'] = True
else:
# There are some @-mentions mixed in with personals
template_payload['mention'] = True
template_payload['reply_warning'] = True
headers['Reply-To'] = "Nobody <%s>" % (settings.NOREPLY_EMAIL_ADDRESS,)
# Give users a one-click unsubscribe link they can use to stop getting
# missed message emails without having to log in first.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "missed_messages")
template_payload["unsubscribe_link"] = unsubscribe_link
subject = "Missed Zulip%s from %s" % (plural_messages, sender_str)
from_email = "%s (via Zulip) <%s>" % (sender_str, settings.NOREPLY_EMAIL_ADDRESS)
text_content = loader.render_to_string('zerver/missed_message_email.txt', template_payload)
html_content = loader.render_to_string('zerver/missed_message_email_html.txt', template_payload)
msg = EmailMultiAlternatives(subject, text_content, from_email, [user_profile.email],
headers = headers)
msg.attach_alternative(html_content, "text/html")
msg.send()
user_profile.last_reminder = datetime.datetime.now()
user_profile.save(update_fields=['last_reminder'])
def handle_missedmessage_emails(user_profile_id, missed_email_events):
message_ids = [event.get('message_id') for event in missed_email_events]
user_profile = get_user_profile_by_id(user_profile_id)
if not receives_offline_notifications(user_profile):
return
messages = [um.message for um in UserMessage.objects.filter(user_profile=user_profile,
message__id__in=message_ids,
flags=~UserMessage.flags.read)]
if not messages:
return
messages_by_recipient_subject = defaultdict(list)
for msg in messages:
messages_by_recipient_subject[(msg.recipient_id, msg.subject)].append(msg)
mesage_count_by_recipient_subject = {
recipient_subject: len(msgs)
for recipient_subject, msgs in messages_by_recipient_subject.items()
}
for msg_list in messages_by_recipient_subject.values():
msg = min(msg_list, key=lambda msg: msg.pub_date)
if msg.recipient.type == Recipient.STREAM:
msg_list.extend(get_context_for_message(msg))
# Send an email per recipient subject pair
if user_profile.realm.domain == 'zulip.com':
for recipient_subject, msg_list in messages_by_recipient_subject.items():
unique_messages = {m.id: m for m in msg_list}
do_send_missedmessage_events_reply_in_zulip(
user_profile,
unique_messages.values(),
mesage_count_by_recipient_subject[recipient_subject],
)
else:
all_messages = [
msg_
for msg_list in messages_by_recipient_subject.values()
for msg_ in msg_list
]
unique_messages = {m.id: m for m in all_messages}
do_send_missedmessage_events(
user_profile,
unique_messages.values(),
len(messages),
)
@uses_mandrill
def clear_followup_emails_queue(email, mail_client=None):
"""
Clear out queued emails (from Mandrill's queue) that would otherwise
be sent to a specific email address. Optionally specify which sender
to filter by (useful when there are more Zulip subsystems using our
mandrill account).
`email` is a string representing the recipient email
`from_email` is a string representing the zulip email account used
to send the email (for example `support@zulip.com` or `signups@zulip.com`)
"""
# SMTP mail delivery implementation
if not mail_client:
items = ScheduledJob.objects.filter(type=ScheduledJob.EMAIL, filter_string__iexact = email)
items.delete()
return
# Mandrill implementation
for email in mail_client.messages.list_scheduled(to=email):
result = mail_client.messages.cancel_scheduled(id=email["_id"])
if result.get("status") == "error":
print(result.get("name"), result.get("error"))
return
def log_digest_event(msg):
import logging
logging.basicConfig(filename=settings.DIGEST_LOG_PATH, level=logging.INFO)
logging.info(msg)
@uses_mandrill
def send_future_email(recipients, email_html, email_text, subject,
delay=datetime.timedelta(0), sender=None,
tags=[], mail_client=None):
"""
Sends email via Mandrill, with optional delay
'mail_client' is filled in by the decorator
"""
# When sending real emails while testing locally, don't accidentally send
# emails to non-zulip.com users.
if settings.DEVELOPMENT and \
settings.EMAIL_BACKEND != 'django.core.mail.backends.console.EmailBackend':
for recipient in recipients:
email = recipient.get("email")
if get_user_profile_by_email(email).realm.domain != "zulip.com":
raise ValueError("digest: refusing to send emails to non-zulip.com users.")
# message = {"from_email": "othello@zulip.com",
# "from_name": "Othello",
# "html": "<p>hello</p> there",
# "tags": ["signup-reminders"],
# "to": [{'email':"acrefoot@zulip.com", 'name': "thingamajig"}]
# }
# SMTP mail delivery implementation
if not mail_client:
if sender is None:
# This may likely overridden by settings.DEFAULT_FROM_EMAIL
sender = {'email': settings.NOREPLY_EMAIL_ADDRESS, 'name': 'Zulip'}
for recipient in recipients:
email_fields = {'email_html': email_html,
'email_subject': subject,
'email_text': email_text,
'recipient_email': recipient.get('email'),
'recipient_name': recipient.get('name'),
'sender_email': sender['email'],
'sender_name': sender['name']}
ScheduledJob.objects.create(type=ScheduledJob.EMAIL, filter_string=recipient.get('email'),
data=ujson.dumps(email_fields),
scheduled_timestamp=datetime.datetime.utcnow() + delay)
return
# Mandrill implementation
if sender is None:
sender = {'email': settings.NOREPLY_EMAIL_ADDRESS, 'name': 'Zulip'}
message = {'from_email': sender['email'],
'from_name': sender['name'],
'to': recipients,
'subject': subject,
'html': email_html,
'text': email_text,
'tags': tags,
}
# ignore any delays smaller than 1-minute because it's cheaper just to sent them immediately
if not isinstance(delay, datetime.timedelta):
raise TypeError("specified delay is of the wrong type: %s" % (type(delay),))
if delay < datetime.timedelta(minutes=1):
results = mail_client.messages.send(message=message, async=False, ip_pool="Main Pool")
else:
send_time = (datetime.datetime.utcnow() + delay).__format__("%Y-%m-%d %H:%M:%S")
results = mail_client.messages.send(message=message, async=False, ip_pool="Main Pool", send_at=send_time)
problems = [result for result in results if (result['status'] in ('rejected', 'invalid'))]
if problems:
for problem in problems:
if problem["status"] == "rejected":
if problem["reject_reason"] == "hard-bounce":
# A hard bounce means the address doesn't exist or the
# recipient mail server is completely blocking
# delivery. Don't try to send further emails.
if "digest-emails" in tags:
from zerver.lib.actions import do_change_enable_digest_emails
bounce_email = problem["email"]
user_profile = get_user_profile_by_email(bounce_email)
do_change_enable_digest_emails(user_profile, False)
log_digest_event("%s\nTurned off digest emails for %s" % (
str(problems), bounce_email))
continue
elif problem["reject_reason"] == "soft-bounce":
# A soft bounce is temporary; let it try to resolve itself.
continue
raise Exception(
"While sending email (%s), encountered problems with these recipients: %r"
% (subject, problems))
return
def send_local_email_template_with_delay(recipients, template_prefix,
template_payload, delay,
tags=[], sender={'email': settings.NOREPLY_EMAIL_ADDRESS, 'name': 'Zulip'}):
html_content = loader.render_to_string(template_prefix + ".html", template_payload)
text_content = loader.render_to_string(template_prefix + ".text", template_payload)
subject = loader.render_to_string(template_prefix + ".subject", template_payload).strip()
return send_future_email(recipients,
html_content,
text_content,
subject,
delay=delay,
sender=sender,
tags=tags)
def enqueue_welcome_emails(email, name):
sender = {'email': 'wdaher@zulip.com', 'name': 'Waseem Daher'}
if settings.VOYAGER:
sender = {'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'}
user_profile = get_user_profile_by_email(email)
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
template_payload = {'name': name,
'not_voyager': not settings.VOYAGER,
'external_host': settings.EXTERNAL_HOST,
'unsubscribe_link': unsubscribe_link}
#Send day 1 email
send_local_email_template_with_delay([{'email': email, 'name': name}],
"zerver/emails/followup/day1",
template_payload,
datetime.timedelta(hours=1),
tags=["followup-emails"],
sender=sender)
#Send day 2 email
tomorrow = datetime.datetime.utcnow() + datetime.timedelta(hours=24)
# 11 AM EDT
tomorrow_morning = datetime.datetime(tomorrow.year, tomorrow.month, tomorrow.day, 15, 0)
assert(datetime.datetime.utcnow() < tomorrow_morning)
send_local_email_template_with_delay([{'email': email, 'name': name}],
"zerver/emails/followup/day2",
template_payload,
tomorrow_morning - datetime.datetime.utcnow(),
tags=["followup-emails"],
sender=sender)
def convert_html_to_markdown(html):
# On Linux, the tool installs as html2markdown, and there's a command called
# html2text that does something totally different. On OSX, the tool installs
# as html2text.
commands = ["html2markdown", "html2text"]
for command in commands:
try:
# A body width of 0 means do not try to wrap the text for us.
p = subprocess.Popen(
[command, "--body-width=0"], stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
break
except OSError:
continue
markdown = p.communicate(input=html.encode("utf-8"))[0].strip()
# We want images to get linked and inline previewed, but html2text will turn
# them into links of the form ``, which is
# ugly. Run a regex over the resulting description, turning links of the
# form `` into
# `[image.png](http://foo.com/image.png)`.
return re.sub(r"!\[\]\((\S*)/(\S*)\?(\S*)\)",
r"[\2](\1/\2)", markdown).decode("utf-8")
|
atomic-labs/zulip
|
zerver/lib/notifications.py
|
Python
|
apache-2.0
| 24,261
|
[
"VisIt"
] |
bca66e01b045ef355f103223627ce94c7cf2c16f4500c5def8e2b2949d4a88be
|
#!/usr/local/bin/env python
# =============================================================================================
# MODULE DOCSTRING
# =============================================================================================
"""
Run YANK self tests after installation.
"""
# =============================================================================================
# MODULE IMPORTS
# =============================================================================================
import doctest
import pkgutil
import subprocess
import re
from .. import version
from . import platforms
import simtk.openmm as mm
# =============================================================================================
# COMMAND-LINE INTERFACE
# =============================================================================================
usage = """
YANK selftest
Usage:
yank selftest [-d | --doctests] [-n | --nosetests] [--verbosity=#] [(--skip (platforms | openeye))]...
Description:
Run the YANK selftests to check that functions are behaving as expected and if external licenses are available
General Options:
-d, --doctests Run module doctests
-n, --nosetests Run the nosetests (slow) on YANK
--verbosity=# Optional verbosity level of nosetests OR verbose doctests [default: 1]
Does nothing without -n or -d
--skip Skips a named selftests (platforms OR openeye) for speed
May be specified multiple times, once per TEST
"""
# =============================================================================================
# COMMAND DISPATCH
# =============================================================================================
def dispatch(args):
# Determine verbosity in advance
# TODO: Figure out how to get -v back in to command and allow -vv and -vvv
# nosetests: -v == --verbosity=2
# Assuming that verbosity = 1 (or no -v) is no verbosity for doctests
# verbosity = max(args['-v'] + 1, int(args['--verbosity']))
verbosity = int(args['--verbosity'])
# Header
print("\n")
print("YANK Selftest")
print("-------------")
# Yank Version
print("Yank Version %s \n" % version.version)
# OpenMM Platforms
if not (args['platforms'] > 0): # Don't need to check for --skip since invalid without argument
platforms.dispatch(None)
# Errors
platform_errors = mm.Platform.getPluginLoadFailures()
if len(platform_errors) > 0: # This check only required to make header
print("************************************************")
print("\nWarning! There were OpenMM Platform Load Errors!")
print("************************************************")
for e in platform_errors:
print(e)
print("************************************************")
print("************************************************")
else:
print("Skipped OpenMM Platform Test")
# Space out tests
print("\n")
# OpenEye checks
if not (args['openeye'] > 0):
try:
import openeye
import openeye.examples.openeye_tests as OETests
print("OpenEye version %s Found! Checking install..." % openeye.__version__)
OETests.run_test_suite()
except:
print("Valid OpenEye install not found")
print("Not required, but please check install if you expected it")
else:
print("Skipped OpenEye Tests")
print("\n")
# NVIDIA-SMI calls
print("Checking GPU Computed Mode (if present)...")
try:
nvidia_output = subprocess.check_output('nvidia-smi -q -d COMPUTE', shell=True)
except subprocess.CalledProcessError as e:
print("nvidia-smi had an issue, could not find CUDA cards, however this may be expected on your system.")
else:
n_cards = 0
card_modes = []
split_nvidia_output = nvidia_output.split('\n')
for line in split_nvidia_output:
match = re.search('(?:Compute[^:]*:\s+)(\w+)', line)
if match:
n_cards += 1
card_modes.append(match.group(1))
if n_cards == 0:
print("nvidia-smi returned 'Compute' search, but no cards matched query pattern.\n"
"Please run `nvidia-smi` yourself to confirm the Compute Mode is in shared/Default")
else:
print("Found {} NVIDIA GPUs in the following modes: [".format(n_cards) + ', '.join(card_modes) + "]\n"
"These should all be in shared/Default mode for YANK to use them")
# Run nosetests
# Note: These will not run during standard nosetests because they must be explicitly called
# i.e. no infinite nosetests loop
if args['--nosetests']:
# Clear some lines
print("\n")
# Alert User
print("******************************************")
print("Nosetests invoked! This will take a while!")
print("******************************************")
import nose
try: # Check for timer install
result = nose.run(argv=['yank', '--nocapture', '--verbosity=%d' % verbosity, '--with-timer', '-a', '!slow'])
except:
result = nose.run(argv=['yank', '--nocapture', '--verbosity=%d' % verbosity, '-a', '!slow'])
print("\n")
# Doctests
if args['--doctests']:
# Alert User
print("*****************************************")
print("Doctests invoked! This will take a while!")
print("*****************************************")
# Run tests on main module.
import yank # NOT "from .. import yank" since we want to run on the whole module
if verbosity > 1:
verbose = True
else:
verbose = False
(failure_count, test_count) = doctest.testmod(yank, verbose=verbose)
# Run tests on all submodules.
package = yank
prefix = package.__name__ + "."
for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix):
module = __import__(modname, fromlist="dummy")
(module_failure_count, module_test_count) = doctest.testmod(module, verbose=verbose)
failure_count += module_failure_count
test_count += module_test_count
# Report results.
if failure_count == 0:
print("All doctests pass.")
else:
print("WARNING: There were %d doctest failures." % failure_count)
print("\n")
# Helpful end test
print("YANK Selftest complete.\nThank you for using YANK!\n")
return True
|
andrrizzi/yank
|
Yank/commands/selftest.py
|
Python
|
mit
| 6,775
|
[
"OpenMM"
] |
6311194f12dd3dd1c5b7f74d2ff3403b35755313a11eb71e857a9313e0f1e2e6
|
"""
Canned Views using PySAL and Matplotlib
"""
__author__ = "Marynia Kolak <marynia.kolak@gmail.com>"
import pandas as pd
import numpy as np
import pysal as ps
import matplotlib.pyplot as plt
__all__ = ['mplot']
def mplot(m, xlabel='', ylabel='', title='', custom=(7,7)):
"""
Produce basic Moran Plot
Parameters
----------
m : pysal.Moran instance
values of Moran's I Global Autocorrelation Statistic
xlabel : str
label for x axis
ylabel : str
label for y axis
title : str
title of plot
custom : tuple
dimensions of figure size
Returns
-------
fig : Matplotlib Figure instance
Moran scatterplot figure
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import pysal as ps
>>> from pysal.contrib.pdio import read_files
>>> from pysal.contrib.viz.plot import mplot
>>> link = ps.examples.get_path('columbus.shp')
>>> db = read_files(link)
>>> y = db['HOVAL'].values
>>> w = ps.queen_from_shapefile(link)
>>> w.transform = 'R'
>>> m = ps.Moran(y, w)
>>> mplot(m, xlabel='Response', ylabel='Spatial Lag',
... title='Moran Scatterplot', custom=(7,7))
>>> plt.show()
"""
lag = ps.lag_spatial(m.w, m.z)
fit = ps.spreg.OLS(m.z[:, None], lag[:,None])
# Customize plot
fig = plt.figure(figsize=custom)
ax = fig.add_subplot(111)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
fig.suptitle(title)
ax.scatter(m.z, lag, s=60, color='k', alpha=.6)
ax.plot(lag, fit.predy, color='r')
ax.axvline(0, alpha=0.5)
ax.axhline(0, alpha=0.5)
return fig
|
ljwolf/pysal
|
pysal/contrib/viz/plot.py
|
Python
|
bsd-3-clause
| 1,683
|
[
"COLUMBUS"
] |
c3a7a1c407ffab8e5fb8207fb201b8c1c4d2f9aaf9681a8ea87bf2e65c04ed7b
|
'''
Created on Aug 27, 2014
@author: David Zwicker <dzwicker@seas.harvard.edu>
Provides a dictionary with default parameters for the mouse tracking.
This can also be seen as some kind of documentation of the available
parameters.
'''
from __future__ import division
from collections import namedtuple, defaultdict
import os.path
import warnings
import numpy as np
# enum of different units that we use
class UNIT(object):
FACTOR = 1
FRACTION = 2
FOLDER = 3
SUBFOLDER = 4
COLOR = 5
BOOLEAN = 6
INTEGER = 7
STRING = 8
LIST = 9
LENGTH_PIXEL = 11
LENGTH_CM = 12
AREA_PIXEL = 15
TIME_FRAMES = 20
RATE_FRAMES = 21
TIME_SECONDS = 22
SPEED_PIXEL_FRAME = 30
SPEED_CM_SEC = 31
DEPRECATED = 100
# create dictionary with parser functions
parser = defaultdict(lambda: lambda val: val)
# initialize converters
UNIT.parser[UNIT.FACTOR] = float
UNIT.parser[UNIT.FRACTION] = float
UNIT.parser[UNIT.BOOLEAN] = bool
UNIT.parser[UNIT.INTEGER] = int
UNIT.parser[UNIT.LIST] = list
UNIT.parser[UNIT.LENGTH_PIXEL] = float
UNIT.parser[UNIT.LENGTH_CM] = float
UNIT.parser[UNIT.AREA_PIXEL] = float
UNIT.parser[UNIT.TIME_FRAMES] = float
UNIT.parser[UNIT.RATE_FRAMES] = float
UNIT.parser[UNIT.SPEED_PIXEL_FRAME] = float
UNIT.parser[UNIT.SPEED_CM_SEC] = float
# define a class that holds information about parameters
Parameter = namedtuple('Parameter',
['key', 'default_value', 'unit', 'description'])
# define all parameters that we support with associated information
PARAMETER_LIST = [
# Basic parameters
Parameter('python_paths', ['__video_analysis_path__', '__project_path__'],
UNIT.LIST,
'List of paths that will be appended to the python path.'),
Parameter('base_folder', '.', UNIT.FOLDER,
'Base folder in which all files are kept'),
Parameter('factor_length', 1, UNIT.DEPRECATED, #UNIT.FACTOR,
'A factor by which all length scales will be scaled.'
'Deprecated since 2014-12-20. Instead, `scale_length` should be '
'used, which will be processed when loading the parameters once'),
Parameter('use_threads', True, UNIT.BOOLEAN,
'Determines whether multithreading is used in analyzing the '
'videos. Generally, multithreading should speed up the analysis, '
'but this is not always the case, especially for small videos, '
'where the thread overhead is large.'),
# Video input
Parameter('video/filename_pattern', 'raw_video/*.MTS', UNIT.SUBFOLDER,
'Filename pattern used to look for videos'),
Parameter('video/folder_temporary', None, UNIT.SUBFOLDER,
'Folder in which the video should be stored temporarily, e.g. to '
'speed up the analysis.'),
Parameter('video/initial_adaptation_frames', 100, UNIT.TIME_FRAMES,
'Number of initial frames to skip during analysis'),
Parameter('video/blur_method', 'gaussian', UNIT.STRING,
'The method to be used for reducing noise in the video. The '
'supported methods are `mean`, `gaussian`, `bilateral`, in '
'increasing complexity, i.e. decreasing speed.'),
Parameter('video/blur_radius', 3, UNIT.LENGTH_PIXEL,
'Radius of the blur filter to remove noise'),
Parameter('video/blur_sigma_color', 0, UNIT.COLOR,
'Standard deviation in color space of the bilateral filter'),
Parameter('video/frames', None, None,
'Frames of the video which are analyzed [start and end index '
'should be given]'),
Parameter('video/frames_skip', 0, UNIT.TIME_FRAMES,
'Number of frames that are skipped before starting the '
'analysis. This value is only considered if `video/frames` '
'is None.'),
Parameter('video/rotation', 0, UNIT.INTEGER,
"Specifies how much the video will be rotated in "
"counter-clockwise direction. The value specified will be "
"multiplied by 90 degrees to specify the amount of rotation."),
Parameter('video/cropping_rect', None, None,
"Rectangle to which the video is cropped. This can be either "
"four numbers [left, top, width, height] or some string like "
"'upper left', 'lower right', etc."),
# Parameters of the video reading class
Parameter('video/video_parameters/reopen_delay', 0, UNIT.TIME_SECONDS,
"Delay in seconds before a video is reopened. This can prevent "
"some problems with filesystems"),
Parameter('video/video_parameters/video_info_method', 'ffprobe',
UNIT.STRING,
"Determines how video information, like the total number of "
"frames are determined. Possible values are `header` and "
"`ffprobe`. Note that the header information might be inaccurate "
"but using ffprobe requires iterating through the video once."),
Parameter('video/video_parameters/ffprobe_cache',
os.path.expanduser('~/.videos.sqlite'), UNIT.STRING,
'File where video information obtained from ffprobe will be '
'stored to prevent multiple runs of ffprobe on the same video'),
Parameter('video/video_parameters/seek_method', 'auto', UNIT.STRING,
"Method used for seeking in videos. Can be any of ['exact', "
"'keyframe', 'auto']. If 'auto', the method is determined based "
"on the ffmpeg version."),
Parameter('video/video_parameters/seek_max_frames', 100, UNIT.INTEGER,
"The maximal number of frames that will be seeked by simply "
"iterating the video. If larger jumps are desired, the video "
"will be reopened."),
Parameter('video/video_parameters/seek_offset', 1, UNIT.TIME_SECONDS,
"The time the rough seek is placed before the target in order to "
"make sure a keyframe is hit. This is only used if 'keyframe' is "
"chosen as a 'seek_method'"),
# Logging
Parameter('logging/enabled', True, UNIT.BOOLEAN,
'Flag indicating whether logging is enabled'),
Parameter('logging/folder', 'logging/', UNIT.SUBFOLDER,
'Folder to which the log file is written'),
Parameter('logging/level_stderr', 'INFO', None,
'Level of messages to log to stderr [standard python logging '
'levels]'),
Parameter('logging/level_file', 'INFO', None,
'Level of messages to log to file if folder is set '
'[standard python logging levels]'),
# Debug
Parameter('debug/output', [], UNIT.LIST,
"List of identifiers determining what debug output is produced. "
"Supported identifiers include 'video', 'explored_area', "
"'background', 'difference', 'cage_estimate', 'predug', "
"'ground_estimate', 'explored_area_mask'."),
Parameter('debug/use_multiprocessing', True, UNIT.BOOLEAN,
'Flag indicating whether multiprocessing should be used to read '
'and display videos'),
Parameter('debug/folder', 'debug/', UNIT.SUBFOLDER,
'Folder to which debug videos are written'),
Parameter('debug/window_position', None, None,
'Position (x, y) of the top-left corner of the debug window'),
# Output
Parameter('output/folder', 'results/', UNIT.SUBFOLDER,
'Folder to which the YAML and HDF5 result files are written'),
Parameter('output/video/folder', 'results/', UNIT.SUBFOLDER,
'Folder to which the result video is written'),
Parameter('output/video/folder_underground', 'results/underground_video/',
UNIT.SUBFOLDER,
'Folder to which the underground video is written'),
Parameter('output/output_period', 1, UNIT.TIME_FRAMES,
'How often are frames written to the output file or shown on the '
'screen'),
Parameter('output/hdf5_compression', 'gzip', None,
'Compression algorithm to be used for the HDF5 data. Possible '
'options might be None, "gzip", "lzf", and "szip".'),
# Output video
Parameter('output/video/enabled', True, UNIT.BOOLEAN,
'Flag determining whether the final video should be produced'),
Parameter('output/video/extension', '.mov', None,
'File extension used for debug videos'),
Parameter('output/video/codec', 'libx264', None,
'ffmpeg video codec used for debug videos'),
Parameter('output/video/bitrate', '2000k', None,
'Bitrate used for debug videos'),
Parameter('output/video/period', 100, UNIT.TIME_FRAMES,
'How often are frames written to the output file'),
Parameter('output/video/mouse_trail_length', 1000, UNIT.TIME_FRAMES,
'The length of the trail indicating the mouse position in the '
'past'),
Parameter('output/video/underground_video_length', 30*60*20, #< 20 min
UNIT.TIME_FRAMES,
'Typical length of the underground video in number of frames'),
Parameter('output/video/underground_display_item', '{time} [{frame}]',
UNIT.STRING,
'The data that is displayed in the underground video. '
'Placeholders like `{time}` and `{frame}` are replaced by the '
'data of the current frame.'),
Parameter('output/video/crop_border_buffer', 5, UNIT.LENGTH_CM,
'Size by which the cropping rectangle is extended when creating '
'a cropped movie.'),
Parameter('output/video/crop_frame_compression', 1, UNIT.FACTOR,
'Factor that determines how many frames are dropped to compress '
'the cropped video. For example, a factor of 2 drops every other '
'frame.'),
Parameter('output/video/crop_time_duration', -1, UNIT.TIME_SECONDS,
'Number of seconds the output video should last at maximum. '
'Negative numbers indicate that the video is not cut and thus '
'all available data is used.'),
Parameter('output/video/crop_bitrate', '1000k', None,
'Bitrate used for cropped video videos. If None is given, the '
'bitrate given in `output/video/bitrate` will be used.'),
# Cage
Parameter('cage/width_cm', 85.5, UNIT.LENGTH_CM,
'Measured width of the cages/antfarms. The width is measured '
'inside the cage, not including the frame.'),
Parameter('cage/determine_boundaries', True, UNIT.BOOLEAN,
'Flag indicating whether the cropping rectangle should be '
'determined automatically. If False, we assume that the original '
'video is already cropped'),
Parameter('cage/restrict_to_largest_patch', True, UNIT.BOOLEAN,
'Determines whether the cage analysis will be restricted to the '
'largest patch in the first thresholded image.'),
Parameter('cage/threshold_basic', 'otsu', None,
'Determines the basic method that is used for thresholding. The '
'default is `otsu`, which implements and automatic threshold. '
'Alternatively, a number between 0 and 255 can be given, which '
'is then used directly.'),
Parameter('cage/threshold_zscore', 0.5, UNIT.FACTOR,
'Factor that determines the threshold for producing the binary '
'image that is used to located the frame of the cage. The '
'threshold is calculated according to the formula '
'thresh = img_mean - factor*img_std, where factor is the factor'
'determined here.'),
Parameter('cage/refine_by_fitting', True, UNIT.BOOLEAN,
'Flag determining whether the cage rectangle should be refined '
'by using fitting to locate the cage boundaries.'),
Parameter('cage/boundary_detection_bottom_estimate', 0.95, UNIT.FRACTION,
'Fraction of the image height that is used to estimate the '
'position of the bottom of the frame'),
Parameter('cage/boundary_detection_thresholds', [0.7, 0.3, 0.7, 0.9],
UNIT.LIST,
'Thresholds for the boundary detection algorithm. The four '
'values are the fraction of bright pixels necessary to define '
'the boundary for [left, top, right, bottom], respectively.'),
Parameter('cage/width_min', 550, UNIT.LENGTH_PIXEL,
'Minimal width of the cage. This is only used to make a '
'plausibility test of the results'),
Parameter('cage/width_max', 800, UNIT.LENGTH_PIXEL,
'Maximal width of the cage. This is only used to make a '
'plausibility test of the results'),
Parameter('cage/height_min', 300, UNIT.LENGTH_PIXEL,
'Minimal height of the cage. This is only used to make a '
'plausibility test of the results'),
Parameter('cage/height_max', 500, UNIT.LENGTH_PIXEL,
'Maximal height of the cage. This is only used to make a '
'plausibility test of the results'),
Parameter('cage/rectangle_buffer', 5, UNIT.LENGTH_PIXEL,
'Margin by which the estimated cage rectangle is enlarged '
'before it is located by fitting.'),
Parameter('cage/linescan_length', 50, UNIT.LENGTH_PIXEL,
'Length of the line scan that is used to determine the cage '
'boundary.'),
Parameter('cage/linescan_width', 30, UNIT.LENGTH_PIXEL,
'Width of the line scan use to extend the ground line to the '
'cage frame.'),
Parameter('cage/linescan_smooth', 5, UNIT.LENGTH_PIXEL,
'Standard deviation used for smoothing the line scan profile'),
# Colors
Parameter('colors/adaptation_interval', 1000, UNIT.TIME_FRAMES,
'How often are the color estimates adapted'),
Parameter('colors/std_min', 5, UNIT.COLOR,
'Minimal standard deviation of sky and sand colors'),
# Background and explored area
Parameter('background/adaptation_rate', 1e-2, UNIT.RATE_FRAMES,
'Rate at which the background is adapted'),
Parameter('explored_area/adaptation_rate_outside', 1e-3, UNIT.RATE_FRAMES,
'Rate at which the explored area is adapted outside of burrows'),
Parameter('explored_area/adaptation_rate_burrows', 0, UNIT.RATE_FRAMES,
'Rate at which the explored area is adapted inside burrows'),
# Ground
Parameter('ground/template', '', None,
'Name of the ground template stored in the assets directory. '
'If the template is not given or could not be found, an '
'alternative method based on line scans is used.'),
Parameter('ground/template_width_factors', np.arange(0.7, 1.01, 0.05), None,
'Different factors to try for scaling the template width with '
'respect to the cage width.'),
Parameter('ground/template_aspect_factors', np.arange(0.7, 1.31, 0.1), None,
'Different factors to try for scaling the template aspect ratio'),
Parameter('ground/template_width_fraction', 0.8, UNIT.FRACTION,
'Fraction of the full template width that is used for matching.'),
Parameter('ground/template_margin', 40, UNIT.LENGTH_PIXEL,
'Margin on the top and the bottom of the template.'),
Parameter('ground/point_spacing', 20, UNIT.LENGTH_PIXEL,
'Spacing of the support points describing the ground profile'),
Parameter('ground/linescan_length', 50, UNIT.DEPRECATED,
'Length of the line scan used to determine the ground profile. '
'Deprecated since 2014-12-19'),
Parameter('ground/slope_detector_max_factor', 0.4, UNIT.FACTOR,
'Factor important in the ridge detection step, where the ridge '
'is roughly located by looking at vertical line scans and points '
'with large slopes are located. The smaller this factor, the '
'more such points are detected and the further up the profile is '
'estimated to be'),
Parameter('ground/length_max', 1500, UNIT.LENGTH_PIXEL,
'Maximal length of the ground profile above which it is rejected'),
Parameter('ground/curvature_energy_factor', 1, UNIT.DEPRECATED,
'Relative strength of the curvature energy to the image energy '
'in the snake model of the ground line.'
'Deprecated since 2014-12-19.'),
Parameter('ground/snake_energy_max', 10, UNIT.DEPRECATED,
'Determines the maximal energy the snake is allowed to have. '
'Deprecated since 2014-12-19'),
Parameter('ground/slope_max', 3, UNIT.FRACTION,
'Maximal slope of the side ridges'),
Parameter('ground/frame_margin', 50, UNIT.LENGTH_PIXEL,
'Width of the margin to the frame in which the ground profile is '
'not determined'),
Parameter('ground/grabcut_uncertainty_margin', 50, UNIT.LENGTH_PIXEL,
'Width of the region around the estimated profile, in which '
'the GrabCut algorithm may optimize'),
Parameter('ground/active_snake_gamma', 1e-1, UNIT.FACTOR,
'Time scale of the active snake evolution algorithm for finding '
'the ground line. Too large gammas may lead to instabilities in '
'the algorithm, while too small gammas may cause a very slow '
'convergence.'),
Parameter('ground/active_snake_beta', 1e6, UNIT.FACTOR,
'Stiffness of the active snake evolution algorithm for finding '
'the ground line. Larger values lead to straighter lines.'),
Parameter('ground/adaptation_interval', 100, UNIT.TIME_FRAMES,
'How often is the ground profile adapted'),
Parameter('ground/ridge_width', 5, UNIT.LENGTH_PIXEL,
'Width of the ground profile ridge'),
Parameter('ground/smoothing_sigma', 1000, UNIT.TIME_FRAMES,
'Standard deviation for Gaussian smoothing over time'),
# Water bottle localization
Parameter('water_bottle/remove_from_video', True, UNIT.BOOLEAN,
'Flag that indicates whether the water bottle should be removed '
'from the video'),
Parameter('water_bottle/template_image', 'water_bottle.png', None,
'Name of the template for removing the water bottle from the '
'background estimate.'),
Parameter('water_bottle/template_width', 60, UNIT.LENGTH_PIXEL,
'Width of the water bottle template. This will be scaled to the '
'right dimensions'),
Parameter('water_bottle/template_height', 60, UNIT.LENGTH_PIXEL,
'Width of the water bottle template. This will be scaled to the '
'right dimensions'),
Parameter('water_bottle/search_region', [0.8, 1., 0., 0.3], UNIT.LIST,
'Defines the region [x_min, x_max, y_min, y_max] in which the '
'upper left corner of the water bottle rectangle lies. The '
'coordinates are given relative to the cage width and height. '
'This is used to restrict the template matching to a sensible '
'region.'),
# Predug localization
Parameter('predug/locate_predug', True, UNIT.BOOLEAN,
'Flag determining whether the predug should be located.'),
Parameter('predug/location', 'auto', UNIT.STRING,
'Where the predug is located. Can be one of [`left`, `right`, '
'`auto`]. For `auto`, the predug is searched on both sides.'),
Parameter('predug/template_file', 'predug.yaml', None,
'Name of the template for detecting the predug.'),
Parameter('predug/scale_predug', True, UNIT.BOOLEAN,
'Flag indicating whether the predug template will be scaled to '
'the sizes given in `predug/template_width` and '
'`predug/template_height`.'),
Parameter('predug/template_width', 100, UNIT.LENGTH_PIXEL,
'Width of the predug template. This will be scaled to the right '
'dimensions'),
Parameter('predug/template_height', 100, UNIT.LENGTH_PIXEL,
'Width of the predug template. This will be scaled to the right '
'dimensions'),
Parameter('predug/wait_interval', 300,#30*60, #< 1 minute
UNIT.TIME_FRAMES,
'The time period after which the predug is detected.'),
Parameter('predug/search_height_factor', 1, UNIT.FACTOR,
'Determines the height of the area in which the predug is '
'searched for. Half the height is this factor times the maximal '
'vertical span of the ground line.'),
Parameter('predug/search_width_factor', 0.75, UNIT.FACTOR,
'Determines the width of the area in which the predug is '
'searched for. Half the width is this factor times the width of '
'the valley defined by the ground line.'),
Parameter('predug/refine_shape', True, UNIT.BOOLEAN,
'Determines whether the predug shape should be refined using the '
'actual intensity values of the image. If this is not enabled, '
'the returned predug shape will be that of the template.' ),
Parameter('predug/simplify_threshold', 5, UNIT.AREA_PIXEL,
'Threshold value for simplifying the contour line of the '
'predug.'),
Parameter('predug/debug_with_lines', True, UNIT.BOOLEAN,
'Flag indicating whether lines should be drawn on the debug '
'image of the predug.'),
# Mouse and the associated tracking
Parameter('mouse/max_count', 1, UNIT.INTEGER,
'Maximal number of mice to be found. Most of the code has only '
'been tested with `max_count = 1`, but we eventually want to '
'extend this to more mice.'),
Parameter('mouse/intensity_threshold', 1, UNIT.FACTOR,
'Determines how much brighter than the background (usually the '
'sky) the mouse has to be. This value is measured in terms of '
'standard deviations of the sky color'),
Parameter('mouse/model_radius', 25, UNIT.LENGTH_PIXEL,
'Radius of the mouse model'),
Parameter('mouse/area_max', 5000, UNIT.AREA_PIXEL,
'Maximal area of a feature to be considered in tracking'),
Parameter('mouse/area_min', 100, UNIT.AREA_PIXEL,
'Minimal area of a feature to be considered in tracking'),
Parameter('mouse/area_mean', 700, UNIT.AREA_PIXEL,
'Mean area of a mouse, which is used to score the mouse'),
Parameter('mouse/speed_max', 30, UNIT.SPEED_PIXEL_FRAME,
'Maximal speed of the mouse'),
Parameter('mouse/max_rel_area_change', 0.5, UNIT.FACTOR,
'Maximal area change allowed between consecutive frames'),
Parameter('mouse/speed_smoothing_window', 25, UNIT.DEPRECATED,
'Deprecated since 2014-11-29. Use '
'`tracking/position_smoothing_window` instead.'),
Parameter('mouse/moving_threshold_cm_sec', 5, UNIT.SPEED_CM_SEC,
'The threshold value of the speed above which the mouse is '
'considered to be moving.'),
Parameter('mouse/moving_threshold_pixel_frame', None, UNIT.DEPRECATED,
'Deprecated since 2014-12-01.'),
Parameter('mouse/activity_smoothing_interval', 30*60*30, #< 30 minutes
UNIT.TIME_FRAMES,
'The standard deviation of the Gaussian that is used for '
'smoothing temporal data that is associated with activity '
'measurements.'),
Parameter('mouse/digging_rate_time_min', 30*60, UNIT.TIME_FRAMES,
'Minimal time span the mouse has to be digging before we '
'calculate a digging rate.'),
Parameter('tracking/weight', 0.5, UNIT.FACTOR,
'Relative weight of distance vs. size of objects for matching '
'them'),
Parameter('tracking/moving_window', 200, UNIT.TIME_FRAMES,
'Number of consecutive frames used for motion detection'),
Parameter('tracking/moving_threshold', 1, UNIT.SPEED_PIXEL_FRAME,
'Threshold speed above which an object is said to be moving'),
Parameter('tracking/object_count_max', 7, UNIT.INTEGER,
'Maximal number of objects allowed in a single frame. If there '
'are more objects, the entire frame is discarded'),
Parameter('tracking/time_scale', 10, UNIT.TIME_FRAMES,
'Time duration of not seeing the mouse after which we do not '
'know where it is anymore'),
Parameter('tracking/tolerated_overlap', 50, UNIT.TIME_FRAMES,
'How much are two consecutive tracks allowed to overlap'),
Parameter('tracking/initial_score_threshold', 1000, UNIT.FACTOR,
'Initial threshold for building the tracking graph'),
Parameter('tracking/score_threshold_max', 1e10, UNIT.FACTOR,
'Maximal threshold above which the graph based tracking is '
'aborted.'),
Parameter('tracking/end_node_interval', 1000, UNIT.TIME_FRAMES,
'What time duration do we consider for start and end nodes'),
Parameter('tracking/splitting_duration_min', 10, None,
'Track duration above which two overlapping tracks are split'),
Parameter('tracking/maximal_gap', 10, UNIT.TIME_FRAMES,
'Maximal gap length where we will use linear interpolation to '
'determine the mouse position'),
Parameter('tracking/maximal_jump', 50, UNIT.LENGTH_PIXEL,
'Maximal distance between two tracks where we will use linear '
'interpolation to determine the intermediated mouse positions.'),
Parameter('tracking/position_smoothing_window', 5, UNIT.TIME_FRAMES,
'The number of frames over which the mouse position is smoothed '
'in order to calculate its velocity'),
Parameter('tracking/mouse_distance_threshold', 500, UNIT.LENGTH_PIXEL,
'Distance over which an object must move in order to call it a '
'mouse. This is used to identify tracks which surely belong to '
'mice. Graph matching is then used to fill in the gaps.'),
Parameter('tracking/mouse_min_mean_speed', 0.5, UNIT.SPEED_PIXEL_FRAME,
'Minimal average speed an object must have in order to be '
'surely considered as a mouse. This is introduced to prevent '
'stationary objects to be called a mouse.'),
Parameter('tracking/max_track_count', 5000, UNIT.INTEGER,
'Maximal number of tracks that can be connected. If there are '
'more tracks, we throw out small tracks until the count '
'decreased to the one given here.'),
# Burrows
Parameter('burrows/enabled_pass1', False, UNIT.BOOLEAN,
'Whether burrows should be located in the first pass'),
Parameter('burrows/enabled_pass3', True, UNIT.BOOLEAN,
'Whether burrows should be located in the third pass'),
Parameter('burrows/enabled_pass4', True, UNIT.BOOLEAN,
'Whether burrows should be located in the fourth pass'),
Parameter('burrows/adaptation_interval', 100, UNIT.TIME_FRAMES,
'How often are the burrow shapes adapted'),
Parameter('burrows/cage_margin', 30, UNIT.LENGTH_PIXEL,
'Margin of a potential burrow to the cage boundary'),
Parameter('burrows/width', 20, UNIT.LENGTH_PIXEL,
'Typical width of a burrow'),
Parameter('burrows/width_min', 10, UNIT.LENGTH_PIXEL,
'Minimal width of a burrow'),
Parameter('burrows/chunk_area_min', 50, UNIT.AREA_PIXEL,
'Minimal area a burrow chunk needs to have in order to be '
'considered.'),
Parameter('burrows/area_min', 400, UNIT.AREA_PIXEL,
'Minimal area a burrow cross section has to have'),
Parameter('burrows/ground_point_distance', 10, UNIT.LENGTH_PIXEL,
'Maximal distance of ground profile to outline points that are '
'considered exit points'),
Parameter('burrows/shape_threshold_distance', 50, UNIT.LENGTH_PIXEL,
'Threshold value for the distance of burrow points from the '
'ground points. If all points are closer than this threshold, '
'the burrow is called a "wide burrow". Otherwise, the burrow '
'will be treated as a "long burrow".'),
Parameter('burrows/centerline_segment_length', 15, UNIT.LENGTH_PIXEL,
'Length of a segment of the center line of a burrow'),
Parameter('burrows/curvature_radius_max', 30, UNIT.LENGTH_PIXEL,
'Maximal radius of curvature the centerline is allowed to have'),
Parameter('burrows/grabcut_burrow_core_area_min', 500, UNIT.AREA_PIXEL,
'Minimal area the sure region of the mask for the grab cut '
'algorithm is supposed to have'),
Parameter('burrows/fitting_length_threshold', 100, UNIT.LENGTH_PIXEL,
'Length above which burrows are refined by fitting'),
Parameter('burrows/fitting_width_threshold', 30, UNIT.LENGTH_PIXEL,
'Width below which burrows are refined by fitting'),
Parameter('burrows/fitting_edge_width', 3, UNIT.LENGTH_PIXEL,
'Width of the burrow edge used in the template for fitting'),
Parameter('burrows/fitting_edge_R2min', -10, UNIT.FACTOR,
'Minimal value of the Coefficient of Determination (R^2) above '
'which the fit of a burrow edge is considered good enough and '
'will be used'),
Parameter('burrows/outline_simplification_threshold', 0.005, UNIT.FACTOR,
'Determines how much the burrow outline might be simplified. '
'The quantity determines by what fraction the total outline '
'length is allowed to change'),
Parameter('burrows/simplification_threshold_area', 50, UNIT.AREA_PIXEL,
'Burrow outline points are removed when the resulting effective '
'change is below this threshold'),
Parameter('burrows/chunk_dist_max', 30, UNIT.LENGTH_PIXEL,
'Maximal distance between a burrow chunk and another structure '
'(either another chunk or the ground line), such that the chunk '
'is connected to the other structure.'),
Parameter('burrows/image_statistics_window', 50, UNIT.LENGTH_PIXEL,
'Half of the size of the window over which the statistics of '
'the image are calculated.'),
Parameter('burrows/image_statistics_overlap_threshold', 0.5, UNIT.FRACTION,
'The threshold value of the allowed overlap of the background '
'and foreground statistics. If the distributions overlap more '
'than this value the point is considered to be background since '
'it cannot be discriminated reliably.'),
Parameter('burrows/activity_ignore_interval', 30*60*5, #< 5 minutes
UNIT.TIME_FRAMES,
'The time interval of the burrow trajectory that is ignored in '
'the activity analysis. This is mainly done to circumvent '
'problems with the initial predug.'),
Parameter('burrows/activity_smoothing_interval', 30*60*30, #< 30 minutes
UNIT.TIME_FRAMES,
'The standard deviation of the Gaussian that is used for '
'smoothing temporal data that is associated with activity '
'measurements.'),
Parameter('burrows/predug_analyze_time', 30*60, UNIT.TIME_FRAMES, #< 1 min
'The time duration after burrow detection at which the predug is '
'analyzed.'),
Parameter('burrows/predug_area_threshold', 1000, UNIT.AREA_PIXEL,
'The minimal area in pixels the burrow has to have in order to '
'be considered as a predug.'),
Parameter('burrows/initiation_threshold', 300, UNIT.AREA_PIXEL,
'Minimal area that a burrow has to be dug in order to be counted '
'as `initiated`'),
Parameter('burrows/active_contour/blur_radius', 2, UNIT.LENGTH_PIXEL,
'Blur radius of the active contour algorithm used for refining '
'the burrow shape.'),
Parameter('burrows/active_contour/stiffness', 1e4, UNIT.AREA_PIXEL,
'Stiffness of the active contour algorithm used for refining the '
'burrow shape.'),
Parameter('burrows/active_contour/convergence_rate', 1e-2, UNIT.FACTOR,
'Convergence rate of the active contour algorithm used for '
'refining the burrow shape.'),
Parameter('burrows/active_contour/max_iterations', 100, UNIT.FACTOR,
'Maximal number of iterations of the active contour algorithm '
'used for refining the burrow shape.'),
# analysis after tracking
Parameter('analysis/frames', None, None,
'Frames of the video which are included in the report of the '
'analysis [start and end index should be given]. If this is '
'omitted, all analyzed frames are included'),
Parameter('analysis/burrow_pass', 3, UNIT.INTEGER,
'Determines the video analysis pass from which the burrow data '
'is loaded to do analysis.'),
# Computation resources
Parameter('project/symlink_folder', None, None,
'If set, a symlink pointing to the base_folder will be created '
'in this directory when a project is created.'),
Parameter('resources/notification_email', 'dzwicker@seas.harvard.edu', None,
'Email address of the user to be notified in case of problems.'),
Parameter('resources/slurm_partition', 'general', None,
'Name of the slurm partition to use for submitting jobs'),
Parameter('resources/pass0/job_id', None, None, 'Job id of pass 0'),
Parameter('resources/pass0/cores', 1, UNIT.INTEGER,
'Number of cores for pass 0'),
Parameter('resources/pass0/time', 10*60, None,
'Maximal computation minutes for pass 0'),
Parameter('resources/pass0/memory', 1000, None,
'Maximal RAM per core for pass 0 [in MB]'),
Parameter('resources/pass1/job_id', None, None, 'Job id of pass 1'),
Parameter('resources/pass1/cores', 3, UNIT.INTEGER,
'Number of cores for pass 1'),
Parameter('resources/pass1/time', 50*60, None,
'Maximal computation minutes for pass 1'),
Parameter('resources/pass1/memory', 2000, None,
'Maximal RAM per core for pass 1 [in MB]'),
Parameter('resources/pass2/job_id', None, None, 'Job id of pass 2'),
Parameter('resources/pass2/cores', 1, UNIT.INTEGER,
'Number of cores for pass 2'),
Parameter('resources/pass2/time', 25*60, None,
'Maximal computation minutes for pass 2'),
Parameter('resources/pass2/memory', 24000, None,
'Maximal RAM per core for pass 2 [in MB]'),
Parameter('resources/pass3/job_id', None, None, 'Job id of pass 3'),
Parameter('resources/pass3/cores', 2, UNIT.INTEGER,
'Number of cores for pass 3'),
Parameter('resources/pass3/time', 30*60, None,
'Maximal computation minutes for pass 3'),
Parameter('resources/pass3/memory', 6000, None,
'Maximal RAM per core for pass 3 [in MB]'),
Parameter('resources/pass4/job_id', None, None, 'Job id of pass 4'),
Parameter('resources/pass4/cores', 2, UNIT.INTEGER,
'Number of cores for pass 4'),
Parameter('resources/pass4/time', 25*60, None,
'Maximal computation minutes for pass 4'),
Parameter('resources/pass4/memory', 2000, None,
'Maximal RAM per core for pass 4 [in MB]'),
Parameter('resources/pass7/cores', 2, UNIT.INTEGER,
'Number of cores for pass 7'),
Parameter('resources/pass7/job_id', None, None, 'Job id of pass 7'),
Parameter('resources/pass7/time', 6*24*60, None,
'Maximal computation minutes for pass 7'),
Parameter('resources/pass7/memory', 2000, None,
'Maximal RAM per core for pass 7 [in MB]'),
Parameter('resources/pass9/cores', 2, UNIT.INTEGER,
'Number of cores for pass 9'),
Parameter('resources/pass9/job_id', None, None, 'Job id of pass 9'),
Parameter('resources/pass9/time', 3*24*60, None,
'Maximal computation minutes for pass 9'),
Parameter('resources/pass9/memory', 2000, None,
'Maximal RAM per core for pass 9 [in MB]'),
]
# test the default values of the parameters
for p in PARAMETER_LIST:
try:
UNIT.parser[p.unit](p.default_value)
except:
print('The default value for parameter `%s` was not consistent '
'with the given unit.' % p.key)
raise
# collect all parameters in a convenient dictionary
PARAMETERS = {p.key: p for p in PARAMETER_LIST}
# collect the default values of all parameters
PARAMETERS_DEFAULT = {p.key: p.default_value for p in PARAMETER_LIST
if p.unit != UNIT.DEPRECATED}
# clear namespace
del p
def set_base_folder(parameters, folder, include_default=False):
""" changes the base folder of all folders given in the parameter
dictionary.
include_default is a flag indicating whether the default parameters
describing folders should also be included. """
warnings.warn("Base folder is a parameter now.", DeprecationWarning)
# convert to plain dictionary if it is anything else
parameters_type = type(parameters)
if parameters_type != dict:
parameters = parameters.to_dict(flatten=True)
# add all default folders, which will be changed later
if include_default:
for p in PARAMETER_LIST:
if p.unit == UNIT.SUBFOLDER and p.key not in parameters:
parameters[p.key] = p.default_value
# set the base folder
parameters['base_folder'] = folder
# return the result as the original type
return parameters_type(parameters)
def scale_parameters(parameters, factor_length=1, factor_time=1):
""" takes a NestedDict dictionary of parameters and scales them according to
their unit and the given scale factors """
# scale each parameter in the list
for key in parameters.iterkeys(flatten=True):
unit = PARAMETERS[key].unit
if unit == UNIT.LENGTH_PIXEL:
parameters[key] *= factor_length
elif unit == UNIT.AREA_PIXEL:
parameters[key] *= factor_length**2
elif unit == UNIT.TIME_FRAMES:
parameters[key] *= factor_time
elif unit == UNIT.RATE_FRAMES:
parameters[key] /= factor_time
elif unit == UNIT.SPEED_PIXEL_FRAME:
parameters[key] *= factor_length/factor_time
|
david-zwicker/cv-mouse-burrows
|
mouse_burrows/algorithm/parameters.py
|
Python
|
bsd-3-clause
| 39,885
|
[
"Gaussian"
] |
147189dba110af9c6a68eb536e393306ce7f10d22ffcf048a02e150fe32437f3
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------------
# Name: misc.py
# Purpose: Miscellaneous utility functions.
# Licence: MIT License
# This file is subject to the terms and conditions of the MIT License.
# For further details, please refer to LICENSE.txt
#-------------------------------------------------------------------------------
'''miscellaneous utility functions.
'''
from __future__ import print_function, division
import os as _os
import zdde as _pyz
import collections as _co
import pyzdde.config as _config
_global_pyver3 = _config._global_pyver3
if _global_pyver3:
xrange = range
try:
import numpy as _np
except ImportError:
_global_np = False
else:
_global_np = True
def _draw_plane(ln, space='img', dist=0, surfName=None, semiDia=None):
"""function to draw planes at the points specified by `dist` in the
space specified by `space`
Parameters
----------
ln : pyzdde object
active link object
space : string (`img` or `obj`), optional
image space or object space in which the plane is specified. 'img' for
image space, 'obj' for object space. This info is required because
Zemax returns distances that are measured w.r.t. surface 1 (@LDE) in
object space, and w.r.t. IMG in image space. See the Assumptions.
dist : float, optional
distance along the optical axis of the plane from surface 2 (@LDE) if
`space` is `obj` else from the IMG surface. This assumes that surface 1
is a dummy surface
surfName : string, optional
name to identify the surf in the LDE, added to the comments column
semiDia : real, optional
semi-diameter of the surface to set
Returns
-------
None
Assumptions (important to read)
-------------------------------
The function assumes (for the purpose of this study) that surface 1 @ LDE is
a dummy surface at certain distance preceding the first actual lens surface.
This enables the rays entering the lens to be visible in the Zemax layout
plots even if the object is at infinity. So the function inserts the planes
(and their associated dummy surfaces) beginning at surface 2.
"""
numSurf = ln.zGetNumSurf()
inSurfPos = numSurf if space=='img' else 2 # assuming that the first surface will be a dummy surface
ln.zInsertDummySurface(surfNum=inSurfPos, thick=dist, semidia=0, comment='dummy')
ln.zInsertSurface(inSurfPos+1)
ln.zSetSurfaceData(inSurfPos+1, ln.SDAT_COMMENT, surfName)
if semiDia:
ln.zSetSemiDiameter(surfNum=inSurfPos+1, value=semiDia)
thickSolve, pickupSolve = 1, 5
frmSurf, scale, offset, col = inSurfPos, -1, 0, 0
ln.zSetSolve(inSurfPos+1, thickSolve, pickupSolve, frmSurf, scale, offset, col)
def gaussian_lens_formula(u=None, v=None, f=None, infinity=10e20):
"""return the third value of the Gaussian lens formula, given any two
Parameters
----------
u : float, optional
object distance from first principal plane.
v : float, optional
image distance from rear principal plane
f : float, optional
focal length
infinity : float
numerical value to represent infinity (default=10e20)
Returns
-------
glfParams : namedtuple
named tuple containing the Gaussian Lens Formula parameters
Notes
-----
Both object and image distances are considered positive.
Examples
--------
>>> gaussian_lens_formula(u=30, v=None, f=10)
glfParams(u=30, v=15.0, f=10)
>>> gaussian_lens_formula(u=30, v=15)
glfParams(u=30, v=15, f=10.0)
>>> gaussian_lens_formula(u=1e20, f=10)
glfParams(u=1e+20, v=10.0, f=10)
"""
glfParams = _co.namedtuple('glfParams', ['u', 'v', 'f'])
def unknown_distance(knownDistance, f):
try:
unknownDistance = (knownDistance * f)/(knownDistance - f)
except ZeroDivisionError:
unknownDistance = infinity
return unknownDistance
def unknown_f(u, v):
return (u*v)/(u+v)
if sum(i is None for i in [u, v, f]) > 1:
raise ValueError('At most only one parameter can be None')
if f is None:
if not u or not v:
raise ValueError('f cannot be determined from input')
else:
f = unknown_f(u, v)
else:
if u is None:
u = unknown_distance(v, f)
else:
v = unknown_distance(u, f)
return glfParams(u, v, f)
def get_cardinal_points(ln):
"""Returns the distances of the cardinal points (along the optical axis).
For multiple wavelengths, the distances are averaged.
Parameters
----------
ln : object
PyZDDE object
Returns
-------
fpObj : float
distance of object side focal plane from surface # 1 in the LDE,
irrespective of which surface is defined as the global reference
fpImg : float
distance of image side focal plane from IMG surface
ppObj : float
distance of the object side principal plane from surface # 1 in the
LDE, irrespective of which surface is defined as the global
reference surface
ppImg : float
distance of the image side principal plane from IMG
Notes
-----
1. The data is consistant with the cardinal data in the Prescription file
in which, the object side data is with respect to the first surface in the LDE.
2. If there are more than one wavelength, then the distances are averaged.
"""
zmxdir = _os.path.split(ln.zGetFile())[0]
textFileName = _os.path.join(zmxdir, "tmp.txt")
ln.zGetTextFile(textFileName, 'Pre', "None", 0)
line_list = _pyz._readLinesFromFile(_pyz._openFile(textFileName))
ppObj, ppImg, fpObj, fpImg = 0.0, 0.0, 0.0, 0.0
count = 0
for line_num, line in enumerate(line_list):
# Extract the Focal plane distances
if "Focal Planes" in line:
fpObj += float(line.split()[3])
fpImg += float(line.split()[4])
# Extract the Principal plane distances.
if "Principal Planes" in line and "Anti" not in line:
ppObj += float(line.split()[3])
ppImg += float(line.split()[4])
count +=1 #Increment (wavelength) counter for averaging
# Calculate the average (for all wavelengths) of the principal plane distances
# This is only there for extracting a single point ... ideally the design
# should have just one wavelength define!
if count > 0:
fpObj = fpObj/count
fpImg = fpImg/count
ppObj = ppObj/count
ppImg = ppImg/count
# Delete the temporary file
_pyz._deleteFile(textFileName)
cardinals = _co.namedtuple('cardinals', ['Fo', 'Fi', 'Ho', 'Hi'])
return cardinals(fpObj, fpImg, ppObj, ppImg)
def draw_pupil_cardinal_planes(ln, firstDummySurfOff=10, cardinalSemiDia=1.2, push=True):
"""Insert paraxial pupil and cardinal planes surfaces in the LDE for rendering in
layout plots. This is a semi-automated process; see notes.
Parameters
----------
ln : object
pyzdde object
firstDummySurfOff : float, optional
the thickness of the first dummy surface. This first dummy surface is
inserted by this function. See Notes.
cardinalSemiDia : float, optional
semidiameter of the cardinal surfaces. (Default=1.2)
push : bool
push lens in the DDE server to the LDE
Assumptions
-----------
The function assumes that the lens is already focused appropriately,
for either finite or infinite conjugate imaging.
Notes
-----
1. 'first dummy surface' is a dummy surface in LDE position 1 (between the
OBJ and the actual first lens surface) whose function is show the input
rays to the left of the first optical surface.
2. The cardinal and pupil planes are drawn using standard surfaces in the LDE.
To ensure that the ray-tracing engine does not treat these surfaces as real
surfaces, we need to instruct Zemax to "ignore" rays to these surfaces.
Unfortunately, we cannot do it programmatically. So, after the planes have
been drawn, we need to manually do the following:
1. 2D Layout settings
a. Set number of rays to 1 or as needed
2. For the pupil (ENPP and EXPP) and cardinal surfaces (H, H', F, F'),
and the dummy surfaces (except for the dummy surface named "dummy 2
c rays" go to "Surface Properties" >> Draw tab
a. Select "Skip rays to this surface"
3. Set field points to be symmetric about the optical axis
3. For clarity, the semi-diameters of the dummy sufaces are set to zero.
"""
ln.zSetWave(0, 1, 1)
ln.zSetWave(1, 0.55, 1)
# insert dummy surface at 1 for showing the input ray
ln.zRemoveVariables()
# before inserting surface check to see if the object is at finite
# distance. If the object is at finite distance, inserting a dummy
# surface with finite thickness will change the image plane distance.
# so first decrease the thickness of the object surface by the
# thickness of the dummy surface
objDist = ln.zGetSurfaceData(surfNum=0, code=ln.SDAT_THICK)
assert firstDummySurfOff < objDist, ("dummy surf. thick ({}) must be < "
"than obj dist ({})!".format(firstDummySurfOff, objDist))
if objDist < 1.0E+10:
ln.zSetSurfaceData(surfNum=0, code=ln.SDAT_THICK, value=objDist - firstDummySurfOff)
ln.zInsertDummySurface(surfNum=1, thick=firstDummySurfOff, semidia=0, comment='dummy 2 c rays')
ln.zGetUpdate()
# Draw Exit and Entrance pupil planes
print("Textual information about the planes:\n")
expp = ln.zGetPupil().EXPP
print("Exit pupil distance from IMG:", expp)
_draw_plane(ln, 'img', expp, "EXPP")
enpp = ln.zGetPupil().ENPP
print("Entrance pupil from Surf 1 @ LDE:", enpp)
_draw_plane(ln, 'obj', enpp - firstDummySurfOff, "ENPP")
# Get and draw the Principal planes
fpObj, fpImg, ppObj, ppImg = get_cardinal_points(ln)
print("Focal plane obj F from surf 1 @ LDE: ", fpObj, "\nFocal plane img F' from IMA: ", fpImg)
_draw_plane(ln,'img', fpImg, "F'", cardinalSemiDia)
_draw_plane(ln,'obj', fpObj - firstDummySurfOff, "F", cardinalSemiDia)
print("Principal plane obj H from surf 1 @ LDE: ", ppObj, "\nPrincipal plane img H' from IMA: ", ppImg)
_draw_plane(ln,'img', ppImg, "H'", cardinalSemiDia)
_draw_plane(ln,'obj', ppObj - firstDummySurfOff, "H", cardinalSemiDia)
# Check the validity of the distances
ppObjToEnpp = ppObj - enpp
ppImgToExpp = ppImg - expp
focal = ln.zGetFirst().EFL
print("Focal length: ", focal)
print("Principal plane H to ENPP: ", ppObjToEnpp)
print("Principal plane H' to EXPP: ", ppImgToExpp)
v = gaussian_lens_formula(u=ppObjToEnpp, v=None, f=focal).v
print("Principal plane H' to EXPP (abs.) "
"calc. using lens equ.: ", abs(v))
ppObjTofpObj = ppObj - fpObj
ppImgTofpImg = ppImg - fpImg
print("Principal plane H' to rear focal plane: ", ppObjTofpObj)
print("Principal plane H to front focal plane: ", ppImgTofpImg)
print(("""\nCheck "Skip rays to this surface" under "Draw Tab" of the """
"""surface property for the dummy and cardinal plane surfaces. """
"""See Docstring Notes for details."""))
if push:
ln.zPushLens(1)
|
indranilsinharoy/PyZDDE
|
pyzdde/misc.py
|
Python
|
mit
| 11,707
|
[
"Gaussian"
] |
56ad6ea67a418a8bcb1838ffba194c65fe324e44ca69853da25f09d25c96cdcf
|
# -*- coding: utf-8 -*-
# Author: Josh Dick <josh@joshdick.net>
# <https://github.com/joshdick/weeprowl>
#
# Requires WeeChat version 0.3.7 or greater
# Released under GNU GPL v2
#
# Based on the 'notify' plugin version 0.0.5 by lavaramano <lavaramano AT gmail DOT com>:
# <http://www.weechat.org/scripts/source/stable/notify.py.html/>
#
# 2014-05-10, Sébastien Helleu <flashcode@flashtux.org>
# Version 0.7: Change hook_print callback argument type of
# displayed/highlight (WeeChat >= 1.0)
# 2013-12-22, Josh Dick <josh@joshdick.net>
# Version 0.6: Fixed bug that was preventing negative numbers from working with
# the prowl_priority setting
# 2013-12-20, Josh Dick <josh@joshdick.net>
# Version 0.5: Now backgrounds Prowl API requests, added prowl_priority setting,
# now requires WeeChat version 0.3.7 or greater
# 2013-08-13, Josh Dick <josh@joshdick.net>
# Version 0.4: No longer sending notifications for text you send in private messages
# 2012-09-16, Josh Dick <josh@joshdick.net>
# Version 0.3: Removed 'smart_notification' and away_notification' settings
# in favor of more granular notification settings
# 2012-09-16, Josh Dick <josh@joshdick.net>
# Version 0.2: Added 'away_notification' setting
# 2012-03-25, Josh Dick <josh@joshdick.net>
# Version 0.1: Initial release
import urllib, weechat
weechat.register('weeprowl', 'Josh Dick', '0.7', 'GPL', 'Prowl notifications for WeeChat', '', '')
# Plugin settings
settings = {
'prowl_api_key': '',
'prowl_priority': '0', # An integer value ranging [-2, 2] per http://www.prowlapp.com/api.php#add
'show_hilights': 'on',
'show_priv_msg': 'on',
'nick_separator': ': ',
'notify_focused_active': 'on', # If 'on', send Prowl notifications for the currently-focused buffer when not away
'notify_focused_away': 'on', # If 'on', send Prowl notifications for the currently-focused buffer when away
'notify_unfocused_active': 'on', # If 'on', send Prowl notifications for non-focused buffers when not away
'notify_unfocused_away': 'on' # If 'on', send Prowl notifications for non-focused buffers when away
}
# Hook for private messages/hilights
weechat.hook_print('', 'irc_privmsg', '', 1, 'notification_callback', '')
# Shows an error/help message if prowl_api_key is not set
def show_config_help():
weechat.prnt('', '%sweeprowl - Error: Your Prowl API key is not set!' % weechat.prefix('error'))
weechat.prnt('', '%sweeprowl - To obtain a Prowl API key, visit <http://prowlapp.com>.' % weechat.prefix('error'))
weechat.prnt('', '%sweeprowl - Once you have a Prowl API key, configure weeprowl to use it by running:' % weechat.prefix('error'))
weechat.prnt('', '%sweeprowl - /set plugins.var.python.weeprowl.prowl_api_key "your_prowl_api_key_here"' % weechat.prefix('error'))
# Shows an error when there was a problem sending a Prowl notification.
def show_notification_error():
weechat.prnt('', '%sweeprowl - Could not send Prowl notification.' % weechat.prefix('error'))
# Triggered by the weechat hook above
def notification_callback(data, bufferp, uber_empty, tagsn, isdisplayed, ishilight, prefix, message):
is_away = weechat.buffer_get_string(bufferp, 'localvar_away')
is_focused = bufferp == weechat.current_buffer()
do_prowl = True # If set to False depending on state and settings, no Prowl notification will be sent
if (is_away):
if (is_focused and weechat.config_get_plugin('notify_focused_away') != 'on'):
do_prowl = False
elif (not is_focused and weechat.config_get_plugin('notify_unfocused_away') != 'on'):
do_prowl = False
else:
if (is_focused and weechat.config_get_plugin('notify_focused_active') != 'on'):
do_prowl = False
elif (not is_focused and weechat.config_get_plugin('notify_unfocused_active') != 'on'):
do_prowl = False
if (do_prowl):
if (weechat.buffer_get_string(bufferp, 'localvar_type') == 'private' and weechat.config_get_plugin('show_priv_msg') == 'on' and prefix != weechat.buffer_get_string(bufferp, 'localvar_nick')):
send_prowl_notification(prefix, message, True)
elif (int(ishilight) and weechat.config_get_plugin('show_hilights') == 'on'):
buffer = (weechat.buffer_get_string(bufferp, 'short_name') or weechat.buffer_get_string(bufferp, 'name'))
send_prowl_notification(buffer, prefix + weechat.config_get_plugin('nick_separator') + message, False)
return weechat.WEECHAT_RC_OK
# Send a Prowl notification via the Prowl API (API documentation: <http://www.prowlapp.com/api.php>)
def send_prowl_notification(chan, message, isPrivate):
# Make sure a Prowl API key has been configured
prowl_api_key = weechat.config_get_plugin('prowl_api_key')
if (prowl_api_key == ''):
show_config_help()
show_notification_error()
return
# Make sure a valid Prowl priority has been configured
prowl_priority = weechat.config_get_plugin('prowl_priority')
valid_prowl_priority = True
try:
if (int(prowl_priority) > 2 or int(prowl_priority) < -2):
valid_prowl_priority = False
except ValueError:
valid_prowl_priority = False
if (not valid_prowl_priority):
weechat.prnt('', '%sweeprowl - Current prowl_priority setting "%s" is invalid.' % (weechat.prefix('error'), prowl_priority))
weechat.prnt('', '%sweeprowl - Please set prowl_priority to an integer value ranging from [-2, 2].' % weechat.prefix('error'))
show_notification_error()
return
# Build the Prowl API request parameters
params = urllib.urlencode({
'apikey': prowl_api_key,
'application': 'weechat',
'event': 'IRC ' + 'Private Message' if isPrivate else 'Mention/Hilight',
'description': 'Channel: ' + chan + '\n' + message,
'priority': prowl_priority
})
# Build the complete Prowl API request URL
prowl_api_url = 'https://api.prowlapp.com/publicapi/add?' + params
# Make the Prowl API request
weechat.hook_process_hashtable(
'url:' + prowl_api_url,
{ 'post': '1' },
30 * 1000,
'send_prowl_notification_callback',
''
)
# Callback that handles the result of the Prowl API request
def send_prowl_notification_callback(data, command, rc, stdout, stderr):
# Show an error if the Prowl API request failed
if (rc > 0):
weechat.prnt('', '%sweeprowl - Error: There was a problem communicating with the Prowl API!' % weechat.prefix('error'))
weechat.prnt('', '%sweeprowl - Prowl API response information:' % weechat.prefix('error'))
weechat.prnt('', '%sweeprowl - Response code = %s' % (weechat.prefix('error'), rc))
weechat.prnt('', '%sweeprowl - STDOUT = %s' % (weechat.prefix('error'), stdout))
weechat.prnt('', '%sweeprowl - STDERR = %s' % (weechat.prefix('error'), stderr))
show_notification_error()
return weechat.WEECHAT_RC_OK
# Initialization
for option, default_value in settings.items():
if weechat.config_get_plugin(option) == '':
weechat.config_set_plugin(option, default_value)
if (weechat.config_get_plugin('prowl_api_key') == ''):
show_config_help()
# vim: autoindent expandtab smarttab shiftwidth=4
|
qguv/config
|
weechat/plugins/python/weeprowl.py
|
Python
|
gpl-3.0
| 7,394
|
[
"VisIt"
] |
dbd163050dbebb7355cd1c7b7a41788babd0d58a8374f054c83873ef03f51202
|
#!/usr/bin/python
import time
from bioblend.galaxy import GalaxyInstance
gi = GalaxyInstance('http://galaxytest', key='admin')
gi.histories.create_history()
#print gi.tools.get_tool_panel()
history = gi.histories.get_most_recently_used_history()
#print dir(history)
history_id = history['id']
#print history_id
tool_output = gi.tools.run_tool(
history_id=history_id,
tool_id="outputhostname",
tool_inputs={}
)
#print tool_output
# loop until job finish timeout is 40sec as same as slurm
result="noresult"
for x in range(0, 40):
time.sleep(1)
show_history=gi.histories.show_history(history_id)
if len(show_history['state_ids']['ok']) > 0:
dataset_id=show_history['state_ids']['ok'][0]
dataset= gi.datasets.show_dataset(dataset_id)
result=dataset['peek']
break
print result
|
chambm/docker-galaxy-stable
|
test/gridengine/test_outputhostname.py
|
Python
|
mit
| 831
|
[
"Galaxy"
] |
5d08906a705938fce530370155d33122ec0afbbd0ae5e974d7a0960f8ecfb0cf
|
'''
example07 -- crystal -- dynamical diffraction (ocelot.optics.bragg)
'''
from ocelot.optics.utils import *
def save_filter(filt, f_name):
f= open(f_name,'w')
for i in xrange( len(filt.ev)):
f.write(str(filt.ev[i]) + '\t' + str(np.abs(filt.tr[i])**2) + '\t' + str(np.abs(filt.ref[i])**2) + '\n')
E_ev = 10000
ref_idx = (2,2,0)
thickness = 5 * mum
cr1 = Crystal(r=[0,0,0*cm], size=[5*cm,5*cm,thickness], no=[0,0,-1], id="cr1")
cr1.lattice = CrystalLattice('Si')
#cr1.lattice = CrystalLattice('Si')
#cr1.psi_n = -(pi/2. - 54.7356*(pi/180.0)) #input angle psi_n according to Authier
cr1.psi_n = -pi/2. #input angle psi_n according to Authier (symmetric reflection, Si)
r = Ray(r0=[0,0.0,-0.5], k=[0,0.0,1])
r.lamb = 2 * pi * hbar * c / E_ev
print('wavelength', r.lamb)
w1 = read_signal(file_name='data/pulse_9kev_20fs.txt', npad =10, E_ref = E_ev)
plt.figure()
plot_signal(w1)
#plt.figure()
f_test = get_crystal_filter(cryst=cr1, ray=r, nk=3000, ref_idx = ref_idx)
filt = get_crystal_filter(cr1, r, ref_idx = ref_idx, k = w1.freq_k)
#save_filter(f_test, 'C400_8000ev_filter.txt')
plot_filters(filt, f_test)
plot_filters(filt, f_test, param='ref')
fig=plt.figure()
plot_spec_filt(w1, filt, ax=fig.add_subplot(111))
cr1.filter = filt
i1=np.sum(w1.sp*np.conj(w1.sp))*(w1.freq_ev[1] - w1.freq_ev[0])
def transform_field(cr, wave):
print('transforming field')
wave.sp = wave.sp * cr.filter.tr
wave.sp_ref = wave.sp * cr.filter.ref
wave.f = np.fft.ifft(wave.sp)
fig = plt.figure()
plt.grid(True)
ax = fig.add_subplot(111)
plt.plot(w1.t, np.abs(w1.f))
transform_field(cr1, w1)
i2 = np.sum(w1.sp*np.conj(w1.sp))*(w1.freq_ev[1] - w1.freq_ev[0])
i3 = np.sum(w1.sp_ref*np.conj(w1.sp_ref))*(w1.freq_ev[1] - w1.freq_ev[0])
print('transmission (%)', 100*np.real(i2/i1), 'reflection (%)', 100*np.real(i3/i1))
plt.plot(w1.t, np.abs(w1.f))
ax.set_yscale('log')
plt.figure(), plt.grid(True)
plt.plot(w1.freq_ev, np.abs(w1.sp))
plt.show()
|
iagapov/ocelot
|
demos/optics/ex7.py
|
Python
|
gpl-3.0
| 1,987
|
[
"CRYSTAL"
] |
128b75b727c56a05d89db0783274f82ffd0ee03b5f4210d2a6b5ef2f804a6263
|
"""Support for building and spinnaker releases."""
# pylint: disable=wrong-import-position
# These would be required if running from source code
SPINNAKER_RUNNABLE_REPOSITORY_NAMES = [
'clouddriver',
'deck',
'echo', 'fiat', 'front50',
'gate', 'igor', 'kayenta', 'orca', 'rosco']
SPINNAKER_HALYARD_REPOSITORY_NAME = 'halyard'
SPINNAKER_GITHUB_IO_REPOSITORY_NAME = 'spinnaker.github.io'
from buildtool.util import (
DEFAULT_BUILD_NUMBER,
add_parser_argument,
unused_port,
log_timestring,
timedelta_string,
log_embedded_output,
ensure_dir_exists,
write_to_path)
from buildtool.errors import (
BuildtoolError,
ConfigError,
ExecutionError,
ResponseError,
TimeoutError,
UnexpectedError,
exception_to_message,
maybe_log_exception,
raise_and_log_error,
check_kwargs_empty,
check_options_set,
check_path_exists,
# This is very specialized, but here to share
# between validate_bom__deploy and image_commands
scan_logs_for_install_errors)
from buildtool.subprocess_support import (
start_subprocess,
wait_subprocess,
run_subprocess,
check_subprocess,
check_subprocess_sequence,
run_subprocess_sequence,
check_subprocesses_to_logfile,
determine_subprocess_outcome_labels)
from buildtool.git_support import (
GitRepositorySpec,
GitRunner,
CommitMessage,
CommitTag,
RepositorySummary,
SemanticVersion)
from buildtool.hal_support import (
HalRunner)
from buildtool.scm import (
SourceInfo,
SpinnakerSourceCodeManager)
from buildtool.bom_scm import (
SPINNAKER_BOM_REPOSITORY_NAMES,
BomSourceCodeManager)
from buildtool.branch_scm import (
BranchSourceCodeManager)
from buildtool.command import (
CommandFactory,
CommandProcessor)
from buildtool.repository_command import (
RepositoryCommandProcessor,
RepositoryCommandFactory)
from buildtool.gradle_support import (
GradleCommandFactory,
GradleCommandProcessor,
GradleRunner)
from buildtool.metrics import MetricsManager
|
skim1420/spinnaker
|
dev/buildtool/__init__.py
|
Python
|
apache-2.0
| 2,095
|
[
"ORCA"
] |
3e49b1d3e61660055080cd7114fd2d4c433540c7b6b602233e13002b1e258e83
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Google Inc. All Rights Reserved.
#
# Authors:
# Arkadiusz Socała <as277575@mimuw.edu.pl>
# Michael Cohen <scudette@google.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""A visitor collecting types from an AST tree."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from layout_expert.c_ast import visitor_mixin
class DependencyVisitor(visitor_mixin.CASTVisitorWalker):
"""Recursively collect the dependent types.
This calls back into the type manager to discover types that are not already
known.
"""
def __init__(self, type_manager):
self.type_manager = type_manager
def get_dependencies(self, node):
"""Collects all the type names that are used by node."""
self.dependencies = set()
return self.visit(node)
def visit_c_type_definition(self, type):
self.dependencies.add(type.name)
self.type_manager.add_type(type.name, type.type_definition)
type.type_definition.accept(self)
def visit_c_type_reference(self, reference):
"""Resolve type references."""
type_name = reference.name
if type_name is not None and type_name not in self.dependencies:
self.dependencies.add(type_name)
# Resolve the reference by loading it from the type manager.
reference_ast = self.type_manager.get_type_ast(type_name)
# Recurse into it.
reference_ast.accept(self)
def visit_c_variable(self, variable):
"""Variables are constants like Enum fields."""
self.dependencies.add(variable.name)
|
dsweet04/rekall
|
tools/layout_expert/layout_expert/visitors/type_collecting_visitor.py
|
Python
|
gpl-2.0
| 2,224
|
[
"VisIt"
] |
f4a9e754044d3c2c3b67b916fa71055ab6fff200b31dfa75a9a1d4fc2bc91161
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# # Testing Glider CMRE access in Python
#
# This is a url from NATO CMRE "grid" format, where glider data are stored as profiles. These follow the GROOM convention, and in contrast the the IOOS Glide DAC 2.0 format, instead of one combined profile for the down and up paths, down and up are split into separate profiles.
#
# In addition to the "grid" files, there are also "raw" and "processed" glider netcdf files:
#
# http://comt.sura.org/thredds/catalog/comt_2_full/testing/glider_cmre/catalog.html
# <codecell>
# <codecell>
import iris
url = 'http://comt.sura.org/thredds/dodsC/comt_2_full/testing/glider_cmre/GL-20140621-elettra-MEDREP14depl005-grid-R.nc'
# adding coordinates attribute so that Iris can find the coordinates
url = 'http://comt.sura.org/thredds/dodsC/comt_2_full/testing/glider_cmre/foo2.ncml'
cubes = iris.load_raw(url)
print(cubes)
# <codecell>
cube = cubes.extract('sea_water_salinity')[0] #<- it always returns a list!
print(cube)
# <codecell>
import numpy as np
import numpy.ma as ma
import seawater as sw
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
%matplotlib inline
# <codecell>
def plot_glider(cube, mask_topo=False, **kw):
"""Plot glider cube."""
cmap = kw.pop('cmap', plt.cm.rainbow)
lon = cube.coord(axis='X').points.squeeze()
lat = cube.coord(axis='Y').points.squeeze()
z = cube.coord(axis='Z').points.squeeze()
data = cube.data
data = ma.masked_invalid(data,copy=True)
z = ma.masked_invalid(z,copy=True)
t = cube.coord(axis='T')
t = t.units.num2date(t.points)
dist, pha = sw.dist(lat, lon, units='km')
dist = np.r_[0, np.cumsum(dist)]
dist, z = np.broadcast_arrays(dist[..., None], z)
fig, ax = plt.subplots(figsize=(9, 3.75))
cs = ax.pcolor(dist, z, data, cmap=cmap, snap=True, **kw)
plt.colorbar(cs)
if mask_topo:
h = z.max(axis=1)
x = dist[:, 0]
ax.plot(x, h, color='black', linewidth='0.5', zorder=3)
ax.fill_between(x, h, y2=h.max(), color='0.9', zorder=3)
ax.invert_yaxis()
ax.set_title('Glider track from {} to {}'.format(t[0], t[-1]))
fig.tight_layout()
return fig, ax, cs
# <codecell>
c = cube[:,:]
fig, ax, cs = plot_glider(c, mask_topo=True)
# <codecell>
|
rsignell-usgs/notebook
|
Glider_CMRE_test.py
|
Python
|
mit
| 2,362
|
[
"NetCDF"
] |
35464b81a82dc4414e202821ea7ea20c545d5f1847fb53ca019629d81c941564
|
# -*- coding: utf-8 -*-
{
"'Sounds-like' name search allowing search even the spelling of the name is not known exactly": "'Sounds-like'名稱搜尋或搜尋,即使名稱的拼字不完全",
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": '一個位置指定地理區域的這個區域。 這可以是位置的位置階層,或"群組位置",或位置有界限的區域。',
"Acronym of the organization's name, eg. IFRC.": '縮寫的組織的名稱,例如: IFRC。',
"Authenticate system's Twitter account": '系統的認證Twitter帳戶',
"Can't import tweepy": '無法匯入tweepy',
"Caution: doesn't respect the framework rules!": '警告:不符合架構規則!',
"Click on 'Pledge' button in the left-hand column to make a Pledge to match a request for aid.": '按一下"抵押"按鈕左手邊的直欄來進行抵押以符合要求的協助。',
"Detailed address of the site for informational/logistics purpose. Please note that you can add GIS/Mapping data about this site in the 'Location' field mentioned below.": '詳細地址的站點的參考/後勤的用途。 請注意,您可以新增GIS/對映資料中的關於此月臺"位置"欄位下面的說明。',
"Facilitate uploading of missing person's photograph": '促進上傳失蹤人口的照片',
"Format the list of attribute values & the RGB value to use for these as a JSON object, e.g.: {Red: '#FF0000', Green: '#00FF00', Yellow: '#FFFF00'}": '清單格式屬性的值" & RGB值用於為JSON物件,例如: {0}紅色: \'#FF0000,綠色: \'#00FF00,黃色: \'#FFFF00的',
"Grouping by 'Family Unit' or other group category": '分組\'系列單元"或"其他"群組種類',
"If selected, then this Asset's Location will be updated whenever the Person's Location is updated.": '如果選取,則此資產的位置將會被更新時,人員的位置已更新。',
"If this configuration represents a region for the Regions menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": '如果此配置代表一個區域的區域功能表上,請提供一個名稱,以使用在功能表中。 名稱的個人對映配置將會設為使用者的名稱。',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": '如果這個欄位會移入,則使用者指定此組織時,註冊將指定為一個人員的組織,除非它們的網域不符合網域欄位。',
"If this is ticked, then this will become the user's Base Location & hence where the user is shown on the Map": '如果這是起來,則這會成為使用者的基本位置和因此使用者在地圖上顯示',
"If this setting is enabled then all deleted records are just flagged as deleted instead of being really deleted. They will appear in the raw database access but won't be visible to normal users.": '如果啟用了這項設定,則所有刪除的記錄只是標示為刪除而確定刪除。 它們會顯示在原始資料庫存取,但不會看到一般使用者。',
"If you cannot find the record of the person you want to report missing, you can add it by clicking 'Add Person' below:": '如果您無法找到該記錄的人員您要報告丟失了,您可以將它新增至按一下"新增人員"如下:',
"If you don't see the Hospital in the list, you can add a new one by clicking link 'Create Hospital'.": '如果您沒有看到"以在清單中,您可以新增一個新的按一下鏈結新增醫院。',
"If you don't see the Office in the list, you can add a new one by clicking link 'Create Office'.": '如果您沒有看到"辦事處清單中,您可以新增一個新的按一下鏈結新增Out of Office。',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": '如果您沒有看到"組織清單中,您可以新增一個新的按一下鏈結新增組織。',
"Instead of automatically syncing from other peers over the network, you can also sync from files, which is necessary where there's no network. You can use this page to import sync data from files and also export data to sync files. Click the link on the right to go to this page.": '而自動從其他同步對等網路上,您也可以同步檔案,這是必需的,沒有網路。 您可以利用這個頁面來匯入同步檔案資料,匯出資料要同步化的檔案。 上的鏈結,按一下滑鼠右鍵,前往這個頁面。',
"Level is higher than parent's": '母項的層次高於',
"NB SMS requests are filtered to just those which are 'actionable', whilst the Tweet requests are unfiltered, so that is likely to be a good place to start Searching.": "Nb SMS要求過濾,只是一個actionable',時, Tweet要求過濾,因此可能會是一個好開始搜尋。",
"Need a 'url' argument!": "需要一個'URL'引數!",
"Note that the dropdowns won't refresh automatically. Refresh the page if you wish to verify that the locations have gone.": '請注意,清單不會自動重新整理。 如果您想要重新整理頁面,以驗證"位置不存在。',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": "選用。 幾何形狀的名稱直欄。 在PostGIS預設為'the_geom'.",
"Parent level should be higher than this record's level. Parent level is": '母項層次應該高於此記錄的層次。 母項層次是',
"Password fields don't match": '密碼欄位不符',
"Phone number to donate to this organization's relief efforts.": '捐贈撥打電話號碼這個組織的釋放工作。',
"Please come back after sometime if that doesn't help.": '請回到之後,如果該時間不說明。',
"Press the 'Delete Old' button to have all records which reference this one be repointed at the new one & then the old record will be deleted.": '按下"刪除舊的按鈕,使所有記錄參照此一個被repointed在新的一個,則舊記錄將被刪除。',
"Quantity in %s's Inventory": '以百分比的庫存數量',
"Search here for a person's record in order to:": '搜尋這裡的人員的記錄,以便:',
"Select a Room from the list or click 'Create Room'": '選取會議室從清單,或按一下新增空間"',
"Select a person in charge for status 'assigned'": "選取一個人員負責的狀態'指定的'",
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": '選取這個如果所有特定位置需要母項在最深層次的位置階層。 例如,如果"地區"的最小部門階層中,則所有特定位置所需的要區域作為母項。',
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": '選取這個如果所有特定位置需要一個母項位置階層。 這可協助在設定"地區"代表一個受影響的區域。',
"Sorry, things didn't get done on time.": '抱歉,項目沒有取得完成的時間。',
"Sorry, we couldn't find that page.": '很抱歉,我們找不到該頁面。',
"System's Twitter account updated": '系統的Twitter更新帳戶',
"The Donor(s) for this project. Multiple values can be selected by holding down the 'Control' key.": '在Donor(S)適用於這個專案。 可以選取多個值,請按住控制鍵。',
"The Sector(s) this organization works in. Multiple values can be selected by holding down the 'Control' key.": '部門(s)此組織運作中。 可以選取多個值,請按住控制鍵。',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": '映射檔的URL。 如果您不上傳影像檔案,則您必須指定其位置在這裡。',
"The person's manager within this Office/Project.": '人員的管理員在這個辦事處/專案。',
"To search by person name, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '搜尋人員名稱,輸入任何的第一個,中間或最後一個名稱,以空格區隔。 您可以使用%作為萬用字元。 按一下"搜尋"不需要輸入,會列出所有的人。',
"To search for a body, enter the ID tag number of the body. You may use % as wildcard. Press 'Search' without input to list all bodies.": '要搜尋的主體,請輸入ID標籤的主體。 您可以使用%作為萬用字元。 按一下"搜尋"不需要輸入,會列出所有的主體。',
"To search for a hospital, enter any of the names or IDs of the hospital, or the organisation name or acronym, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '要搜尋的醫院,輸入的任何名稱或ID的醫院,或組織名稱或縮寫,以空格區隔。 您可以使用%作為萬用字元。 按一下"搜尋"不需要輸入,以列出所有醫院。',
"To search for a hospital, enter any of the names or IDs of the hospital, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '要搜尋的醫院,輸入的任何名稱或ID的醫院,以空格區隔。 您可以使用%作為萬用字元。 按一下"搜尋"不需要輸入,以列出所有醫院。',
"To search for a hospital, enter any part of the name or ID. You may use % as wildcard. Press 'Search' without input to list all hospitals.": '要搜尋的醫院,輸入的任何部分的名稱或ID。 您可以使用%作為萬用字元。 按一下"搜尋"不需要輸入,以列出所有醫院。',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": '要搜尋的位置,輸入該名稱。 您可以使用%作為萬用字元。 按一下"搜尋"不需要輸入,會列出所有的位置。',
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '搜尋人員,輸入任何的第一個,中間或最後一個名稱和/或ID號碼的人員,以空格區隔。 您可以使用%作為萬用字元。 按一下"搜尋"不需要輸入,會列出所有的人。',
"To search for a person, enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": '搜尋人員,輸入任何的第一個,中間或最後一個名稱,以空格區隔。 您可以使用%作為萬用字元。 按一下"搜尋"不需要輸入,會列出所有的人。',
"To search for a request, enter some of the text that you are looking for. You may use % as wildcard. Press 'Search' without input to list all requests.": '搜尋要求時,輸入您要尋找的部分文字。 您可以使用%作為萬用字元。 按一下"搜尋"不需要輸入,會列出所有的要求。',
"To search for an assessment, enter any portion the ticket number of the assessment. You may use % as wildcard. Press 'Search' without input to list all assessments.": '要搜尋的評估,輸入任何部分的票據號碼的評估。 您可以使用%作為萬用字元。 按一下"搜尋"不需要輸入,以列出所有評估。',
"Type the first few characters of one of the Person's names.": '輸入前幾個字元的其中一個人員的名稱。',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": '上傳影像檔案在這裡。 如果您不上傳影像檔案,則您必須指定其位置在URL欄位中。',
"View and/or update details of the person's record": '檢視及/或更新詳細資料的人員的記錄',
"View/Edit the Database directly (caution: doesn't respect the framework rules!)": '檢視/編輯資料庫直接(警告:不符合架構規則! )',
"What are the people's normal ways to obtain food in this area?": '在這個地區通常大家怎麼取得食物?',
"What should be done to reduce women and children's vulnerability to violence?": '如何做才能減少婦女和小孩遭受暴力?',
"When syncing data with others, conflicts happen in cases when two (or more) parties want to sync information which both of them have modified, i.e. conflicting information. Sync module tries to resolve such conflicts automatically but in some cases it can't. In those cases, it is up to you to resolve those conflicts manually, click on the link on the right to go to this page.": '當與他人同步數據 ,兩個(或多方)要同步的資料都已經修改的情況下衝突發生,即資訊相互矛盾。 同步模組嘗試解析這類衝突自動,但是在某些情湟下不能。 在這些情湟下,您有來解決這些衝突,請手動按一下上的鏈結,才能進入這個頁面。',
"You have personalised settings, so changes made here won't be visible to you. To change your personalised settings, click": '您已經設定密碼,因此在這裡進行的變更不會顯示給您。 若要變更您的設定,請按一下個人化',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": "您有未儲存的變更。 現在按一下'取消',然後'儲存',以儲存它們。 按一下確定以立即舍棄它們。",
"You haven't made any calculations": '您尚未進行任何計算',
"couldn't be parsed so NetworkLinks not followed.": '無法剖析,因此NetworkLinks不遵循。',
"includes a GroundOverlay or ScreenOverlay which aren't supported in OpenLayers yet, so it may not work properly.": '包括一個GroundOverlay或ScreenOverlay都不支援在OpenLayers尚未,因此可能無法正常運作。',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update"是選用的表示式類似"field1=\'newvalue\'"。您不能更新或刪除結果的結合',
'# of Houses Damaged': '損壞的房屋數',
'# of Houses Destroyed': '損毀的房屋數',
'# of International Staff': '國際人員的人數',
'# of National Staff': '#的國家人員',
'# of People Affected': '#的人員分配',
'# of People Deceased': '#的人員死亡',
'# of People Injured': '#的人員受傷',
'# of Vehicles': '#的媒介',
'%(count)s rows deleted': '%(count)s已刪除的橫列',
'%(count)s rows updated': '%(count)s已更新的橫列',
'%(msg)s\nIf the request type is "%(type)s", please enter the %(type)s on the next screen.': '%(msg)s\nIf的要求類型是"%(type)s",請輸入 %(type)s 在下一個畫面。',
'%(system_name)s - Verify Email': '%(system_name)s - 驗證電子郵件',
'%.1f km': '%.1f公里',
'& then click on the map below to adjust the Lat/Lon fields': '&,然後按一下"對映"下面的調整平面/長欄位',
'* Required Fields': '* 必填欄位',
'0-15 minutes': '〇-15分鐘',
'1 Assessment': '一評量',
'1 location, shorter time, can contain multiple Tasks': '一位置,較短的時間,可以包含多個作業',
'1-3 days': '1-3 天',
'1. Fill the necessary fields in BLOCK letters.': '一,填入必要的欄位元區塊字母。',
'15-30 minutes': '15-30分鐘',
'2 different options are provided here currently:': '二個不同的選項此處提供目前:',
'2. Always use one box per letter and leave one box space to seperate words.': '二一律使用有一個方框依字母,並保留空間有一個方框來分隔文字。',
'2x4 Car': '2x4車',
'30-60 minutes': '30-60分鐘',
'4-7 days': '四-七天',
'4x4 Car': '電腦(4x4)車',
'8-14 days': '八-14天',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': '一個標記,指派給個別位置設定時需要置換的記號指派給功能類別。',
'A Reference Document such as a file, URL or contact person to verify this data. You can type the 1st few characters of the document name to link to an existing document.': '一個參考檔,如:檔案, URL或聯絡人,以驗證這項資料。 您可以鍵入第1幾個字元的檔案名稱,以鏈結至現有的檔。',
'A Warehouse/Site is a physical location with an address and GIS data where Items are Stored. It can be a Building, a particular area in a city or anything similar.': '倉庫/網站是一個實體位置的位址與GIS資料位置的項目會儲存。 它可以是建置,一個特定區域中的城市或任何類似。',
'A brief description of the group (optional)': '群組的簡要說明(選用)',
'A file downloaded from a GPS containing a series of geographic points in XML format.': '從全球定位系統下載的檔包含了一系列XML格式的地理點。',
'A file in GPX format taken from a GPS whose timestamps can be correlated with the timestamps on the photos to locate them on the map.': '在來自全球定位系統GPX格式的檔,其時間戳可與照片的時間戳關聯以在地圖上找到它們。',
'A library of digital resources, such as Photos, signed contracts and Office documents.': '一個庫的數位資源,如照片,已簽署的合約和Office檔。',
'A library of digital resources, such as photos, documents and reports': '一個庫的數位資源,如照片,文檔和報告',
'A location group can be used to define the extent of an affected area, if it does not fall within one administrative region.': '位置群組可用來定義的範圍的受影響的區域,如果它未落在一個管理區域。',
'A location group is a set of locations (often, a set of administrative regions representing a combined area). Member locations are added to a location group here. Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group. A location group can be used to define the extent of an affected area, if it does not fall within one administrative region. Location groups can be used in the Regions menu.': '位置群組是一組的位置(通常是一個管理區域表示結合"地區")。 成員位置會新增至位置群組在這裡。 位置群組可能用來過濾顯示的內容在地圖上和在搜尋結果中只能實體所涵蓋的位置群組。 位置群組可用來定義的範圍的受影響的區域,如果它未落在一個管理區域。 位置群組可用於區域的功能表。',
'A location group is a set of locations (often, a set of administrative regions representing a combined area).': '位置群組是一組的位置(通常是一個管理區域表示結合"地區")。',
'A location group must have at least one member.': '位置群組必須至少有一個成員。',
'A place within a Site like a Shelf, room, bin number etc.': '一個位置網站內的類似層板,房間,貯存箱號碼等等。',
'A practical example can be of a report of lost person. Now if one machine register him to be found on 16th August and another machine registers him to found on 17th August, then e.g. Newer timestamp will replace data entry of your machine with that of foriegn machine because that is newer one.': '一個實用的範例可以是一個報告的遺失人員。 現在如果一臺機器註冊該上找到第16 8月及另一部機器登錄該使用者上找第17 8月,例如,然後新的時間戳記會取代資料項目的機器的外部機器的原因是新的。',
'A snapshot of the bin or additional documents that contain supplementary information about it can be uploaded here.': 'Snapshot的bin或其他檔包含有關的增補資訊可以上傳這裡。',
'A snapshot of the location or additional documents that contain supplementary information about the Site Location can be uploaded here.': '一個Snapshot的位置或其他檔包含有關的增補資訊位置可以上傳在這裡。',
'A snapshot of the location or additional documents that contain supplementary information about the Site can be uploaded here.': '一個Snapshot的位置或其他檔包含有關的增補資訊的網站可以上傳在這裡。',
'A survey series with id %s does not exist. Please go back and create one.': '一個調查系列ID為%s不存在。 請回上頁,並建立一個。',
'ABOUT THIS MODULE': '關於此模組',
'ABOUT': '關於',
'ACCESS DATA': '存取資料',
'ANY': '任何',
'API is documented here': 'API是記載在這裡',
'ATC-20 Rapid Evaluation modified for New Zealand': 'ATC-20快速評估修改新西蘭',
'Abbreviation': '縮寫',
'Ability to Fill Out Surveys': '能夠填寫調查',
'Ability to customize the list of details tracked at a Shelter': '能夠自訂清單的詳細追蹤, Shelter',
'Ability to customize the list of human resource tracked at a Shelter': '能夠自訂清單的人力資源上追蹤一個Shelter',
'Ability to customize the list of important facilities needed at a Shelter': '能夠自訂清單的重要設備需要在一個Shelter',
'Ability to track partial fulfillment of the request': '能夠追蹤部分履行的要求',
'Ability to view Results of Completed and/or partially filled out Surveys': '可用來檢視結果的完成和/或部分填寫調查',
'About Sahana Eden': '關於Sahana Eden',
'About Sahana': '關於Sahana',
'About this module': '關於此模組',
'About': '關於',
'Access denied': '拒絕存取',
'Access to Shelter': '若要存取Shelter',
'Access to education services': '若要存取教育服務',
'Accessibility of Affected Location': '協助工具的受影響的位置',
'Account Registered - Please Check Your Email': '帳戶已註冊-請檢查您的電子郵件',
'Account registered, however registration is still pending approval - please wait until confirmation received.': '帳戶登錄,但是登錄仍在擱置核准-請稍候直到收到確認。',
'Acronym': '字首語',
'Actionable by all targeted recipients': '所有可執行目標收件者',
'Actionable only by designated exercise participants; exercise identifier SHOULD appear in <note>': '僅可由指定的練習"參與者"練習應該出現在ID<note>',
'Actionable': '可行',
'Actioned?': '大通?',
'Actions taken as a result of this request.': '採取的動作的結果,這個要求。',
'Actions': '動作',
'Activate Events from Scenario templates for allocation of appropriate Resources (Human, Assets & Facilities).': '從方案範本啟動活動以分配適當的資源(人力,資產及設施)。',
'Active Problems': '作用中問題',
'Active': '作用中',
'Activities matching Assessments:': '相符的活動評量:',
'Activities of boys 13-17yrs before disaster': '活動的男女13-17yrs前災難',
'Activities of boys 13-17yrs now': '活動的男女13-17yrs現在',
'Activities of boys <12yrs before disaster': '活動的男孩<12yrs之前災難',
'Activities of boys <12yrs now': '活動的<12yrs現在男孩',
'Activities of children': '活動的子項',
'Activities of girls 13-17yrs before disaster': '活動的女孩13 17yrs之前災難',
'Activities of girls 13-17yrs now': '活動的女孩13 17yrs現在',
'Activities of girls <12yrs before disaster': '活動的女孩<12yrs之前災難',
'Activities of girls <12yrs now': '活動的女孩<12yrs現在',
'Activities': '活動',
'Activities:': '活動:',
'Activity Added': '新增活動',
'Activity Deleted': '刪除活動',
'Activity Details': '活動明細',
'Activity Report': '活動報告',
'Activity Reports': '活動報告',
'Activity Type': '活動類型',
'Activity Updated': '更新活動',
'Activity': '活動',
'Add Activity Type': '新增活動類型',
'Add Address': '新增地址',
'Add Aid Request': '新增輔助請求',
'Add Alternative Item': '新增替代項目',
'Add Assessment Summary': '新增評量摘要',
'Add Assessment': '新增評量',
'Add Asset Log Entry - Change Label': '新增資產日誌項目-變更標籤',
'Add Availability': '新增可用性',
'Add Baseline Type': '新增基準線類型',
'Add Baseline': '新增基準線',
'Add Bin Type': '新增bin類型',
'Add Bins': '新增圖表匣',
'Add Bundle': '新增軟體組',
'Add Camp Service': 'Camp新增服務',
'Add Camp Type': 'Camp新增類型',
'Add Camp': '新增Camp',
'Add Catalog.': '新增型錄。',
'Add Category': '新增種類',
'Add Category<>Sub-Category<>Catalog Relation': '新增Category<>Sub-Category<>Catalog關系',
'Add Certification': '新增認證',
'Add Competency': '新增能力',
'Add Config': '新增配置',
'Add Contact': '新增聯絡人',
'Add Contact Information': '新增聯絡資訊',
'Add Course Certicate': '新增進程憑證',
'Add Credential': '新增認證',
'Add Credentials': '新增認證',
'Add Disaster Victims': '新增災難受害者',
'Add Distribution': '新增配送',
'Add Document': '新增文件',
'Add Donor': '新增Donor',
'Add Flood Report': '新增水災報告',
'Add GIS Feature': '新增GIS功能',
'Add Group Member': '新增群組成員',
'Add Human Resource': '新增人力資源',
'Add Identity': '新增新的身分',
'Add Identity': '新增身分',
'Add Image': '新增影像',
'Add Impact Type': '新增影響類型',
'Add Impact': '新增影響',
'Add Inventory Item': '新增庫存項目',
'Add Inventory Location': '新增庫存位置',
'Add Inventory Store': '新增資產儲存庫',
'Add Item (s)': '新增項目(S)',
'Add Item Catalog Category': '新增項目型錄種類',
'Add Item Catalog': '新增項目型錄',
'Add Item Sub-Category': '添加子類別',
'Add Item to Catalog': '新增項目到型錄',
'Add Item to Commitment': '新增項目至承諾',
'Add Item to Inventory': '新增項目至庫存',
'Add Item to Request': '新增項目至要求',
'Add Item to Shipment': '新增項目至出貨',
'Add Item': '新增項目',
'Add Job Role': '新增工作角色',
'Add Key': '新增金鑰',
'Add Kit': '新增套件',
'Add Landmark': '新增里程碑',
'Add Level 1 Assessment': '新增層次一評量',
'Add Level 2 Assessment': '新增層次二評量',
'Add Line': '新增一行',
'Add Location': '新增位置',
'Add Locations': '新增位置',
'Add Log Entry': '新增日誌項目',
'Add Member': '新增成員',
'Add Membership': '新增成員資格',
'Add Message': '新增訊息',
'Add Metadata': '新增元數據',
'Add Mission': '新增任務',
'Add Need Type': '新增需要類型',
'Add Need': '新增需要',
'Add New Aid Request': '新增輔助請求',
'Add New Assessment Summary': '新增評量摘要',
'Add New Baseline Type': '新增基準線類型',
'Add New Baseline': '新增基準線',
'Add New Bin Type': '新增bin類型',
'Add New Bin': '新增新貯存箱',
'Add New Budget': '新增新預算',
'Add New Bundle': '新增軟體組',
'Add New Camp Service': '新增Camp服務',
'Add New Camp Type': '新增Camp類型',
'Add New Camp': '新增Camp',
'Add New Cluster Subsector': '新增叢集Subsector',
'Add New Cluster': '新增叢集',
'Add New Commitment Item': '新增承諾書項目',
'Add New Config': '新增配置',
'Add New Distribution Item': '新增分配項目',
'Add New Distribution': '新增分配',
'Add New Document': '新增文件',
'Add New Donor': '新增Donor',
'Add New Entry': '新增項目',
'Add New Event': '新增事件',
'Add New Flood Report': '新增水災報告',
'Add New Human Resource': '新增人力資源',
'Add New Image': '新增影像',
'Add New Impact Type': '新增影響類型',
'Add New Impact': '新增新影響',
'Add New Inventory Item': '新增庫存項目',
'Add New Inventory Location': '新增庫存位置',
'Add New Inventory Store': '新增至資產儲存庫',
'Add New Item Catalog Category': '新增項目型錄種類',
'Add New Item Catalog': '新增項目型錄',
'Add New Item Sub-Category': '新增項目子類別',
'Add New Item to Kit': '新增項目至套件',
'Add New Key': '新增金鑰',
'Add New Landmark': '新增里程碑',
'Add New Level 1 Assessment': '新增層次一評量',
'Add New Level 2 Assessment': '新增層次二評量',
'Add New Member': '新增成員',
'Add New Membership': '新增組員',
'Add New Metadata': '新增meta資料',
'Add New Need Type': '新增需要類型',
'Add New Need': '新增需要',
'Add New Note': '新增附註',
'Add New Partner': '新增夥伴',
'Add New Patient': '新增病人',
'Add New Peer': '新增同層級',
'Add New Population Statistic': '新增人口統計資料',
'Add New Position': '新增位置',
'Add New Problem': '新增問題',
'Add New Rapid Assessment': '新增快速評量',
'Add New Received Item': '新增接收項目',
'Add New Record': '新增記錄',
'Add New Request Item': '新增要求項目',
'Add New Request': '新增要求',
'Add New Response': '新增回應',
'Add New River': '新增金水河',
'Add New Role to User': '新增角色至使用者',
'Add New Scenario': '新增實務',
'Add New School District': '新增學校特區',
'Add New School Report': '新增學校報告',
'Add New Section': '新增區段',
'Add New Sent Item': '新增傳送的項目',
'Add New Setting': '新增設定',
'Add New Shipment to Send': '新增出貨以傳送',
'Add New Site': '新增網站',
'Add New Solution': '新增解決方案',
'Add New Source': '新增來源',
'Add New Staff Type': '新增工作人員類型',
'Add New Staff': '新增人員',
'Add New Storage Location': '新增儲存位置',
'Add New Subsector': '新增Subsector',
'Add New Survey Answer': '新增問卷調查回答',
'Add New Survey Question': '新增問卷調查問題',
'Add New Survey Section': '新增問卷調查部分',
'Add New Survey Series': '新增問卷調查系列',
'Add New Survey Template': '新增調查範本',
'Add New Team': '新增團隊',
'Add New Ticket': '新增問題單',
'Add New Track': '新增追蹤',
'Add New Unit': '新增單位',
'Add New Update': '新增更新',
'Add New User to Role': '新增使用者至角色',
'Add New': '新增',
'Add Note': '新增附註',
'Add Partner': '新增夥伴',
'Add Peer': '新增同層級',
'Add Person': '新增人員',
'Add Photo': '新增照片',
'Add Point': '新增點',
'Add Polygon': '新增多邊形',
'Add Population Statistic': '新增人口統計資料',
'Add Position': '新增位置',
'Add Problem': '新增問題',
'Add Projections': '新增估算',
'Add Question': '新增問題',
'Add Rapid Assessment': '新增快速評量',
'Add Recipient Site': '新增收件者網站',
'Add Recipient': '新增接收者',
'Add Record': '新增記錄',
'Add Recovery Report': '新增回復報告',
'Add Reference Document': '新增參照文件',
'Add Relief Item': '新增浮雕項目',
'Add Report': '新增報告',
'Add Request Detail': '新增要求詳細資料',
'Add Request Item': '新增要求項目',
'Add Request': '新增要求',
'Add Resource': '新增資源',
'Add Response': '新增回應',
'Add School District': '新增學校特區',
'Add School Report': '新增學校報告',
'Add Section': '新增區段',
'Add Sender Organization': '新增寄件者組織',
'Add Sender Site': '新增寄件者網站',
'Add Setting': '新增設定',
'Add Shipment Transit Log': '新增出貨傳輸日誌',
'Add Shipment/Way Bills': '新增出貨/方式賬單',
'Add Site': '新增月臺',
'Add Skill Equivalence': '新增等值技能',
'Add Skill Provision': '新增供應技能',
'Add Skill Types': '技能新增類型',
'Add Solution': '新增解決方案',
'Add Source': '新增來源',
'Add Staff Type': '新增人員類型',
'Add Staff': '新增人員',
'Add Storage Bin Type': '新增儲存體bin類型',
'Add Storage Bin': '新增儲存體bin',
'Add Storage Location': '新增儲存體位置',
'Add Sub-Category': '新增子種類',
'Add Subscription': '新增訂閱',
'Add Subsector': '新增Subsector',
'Add Survey Answer': '新增調查回答',
'Add Survey Question': '新增調查問題',
'Add Survey Section': '新增調查區段',
'Add Survey Series': '新增調查系列',
'Add Survey Template': '新增調查範本',
'Add Team Member': '新增成員',
'Add Team': '新增團隊',
'Add Ticket': '新增問題單',
'Add Training': '新增訓練',
'Add Unit': '新增單位',
'Add Update': '新增更新',
'Add Volunteer Availability': '新增自願可用性',
'Add Volunteer Registration': '新增自願登錄',
'Add a New Inventory Location': '新增一個新庫存位置',
'Add a New Relief Item': '新增一個新的項目',
'Add a Reference Document such as a file, URL or contact person to verify this data. If you do not enter a Reference Document, your email will be displayed instead.': '新增一個參考檔,如:檔案, URL或聯絡人,以驗證這項資料。 如果您不輸入一個參照檔,您的電子郵件將不顯示。',
'Add a Reference Document such as a file, URL or contact person to verify this data.': '新增一個參考檔,如:檔案, URL或聯絡人,以驗證這項資料。',
'Add a Volunteer': '新增一個主動',
'Add a new Relief Item.': '新增一個新的項目。',
'Add a new Site from where the Item is being sent.': '新增一個新的站點的項目被送出。',
'Add a new Site where the Item is being sent to.': '新增一個新的項目的場所傳送。',
'Add a new certificate to the catalog.': '於目錄添加新的證書。',
'Add a new competency rating to the catalog.': '新增一個新的能力的分級目錄。',
'Add a new course to the catalog.': '新增一個新的進程至型錄。',
'Add a new job role to the catalog.': '新增一個新的工作角色至型錄。',
'Add a new skill provision to the catalog.': '新增一個新技術供應至型錄。',
'Add a new skill to the catalog.': '新增一個新技術的型錄。',
'Add a new skill type to the catalog.': '新增一個新的技術類型至型錄。',
'Add an Photo.': '新增一個照片。',
'Add main Item Category.': '新增主要項目種類。',
'Add main Item Sub-Category.': '新增主要項目子類別。',
'Add new Group': '新增群組',
'Add new Individual': '新增個別',
'Add new person.': '新增人員。',
'Add new position.': '新增位置。',
'Add new project.': '新增專案。',
'Add new staff role.': '新增工作人員角色。',
'Add new staff.': '新增人員。',
'Add or Update': '新增或更新',
'Add staff members': '新增人員成員',
'Add the Storage Bin Type.': '新增儲存體bin類型。',
'Add the Storage Location where this bin is located.': '新增儲存體位置這位的位置。',
'Add the Storage Location where this this Bin belongs to.': '新增儲存體位置這個紙匣所屬。',
'Add the main Warehouse/Site information where this Bin belongs to.': '新增主要倉儲/月臺資訊在此Bin屬於。',
'Add the main Warehouse/Site information where this Item is to be added.': '新增主要倉儲/月臺資訊在這個項目是要新增。',
'Add the main Warehouse/Site information where this Storage location is.': '新增主要倉儲/資訊為的儲存體位置。',
'Add the unit of measure if it doesnt exists already.': '新增測量單位如果不存在。',
'Add to Bundle': '新增至軟體組',
'Add to Catalog': '新增至型錄',
'Add to budget': '新增至預算',
'Add volunteers': '新增志願者',
'Add': '新增',
'Add/Edit/Remove Layers': '新增/編輯/移除層',
'Added to Group': '組員已新增',
'Added to Team': '組員已新增',
'Additional Beds / 24hrs': '其他Beds / 24hrs',
'Additional Comments': '其他註解',
'Additional quantity quantifier – i.e. “4x5”.': '其他數量限量元-也就是"4x5"。',
'Address Details': '位址詳細資料',
'Address Type': '位址類型',
'Address added': '新增位址',
'Address deleted': '刪除地址',
'Address updated': '更新位址',
'Address': '地址',
'Addresses': '地址',
'Adequate food and water available': '足夠的食物和水可用',
'Adequate': '足夠',
'Adjust Item(s) Quantity': '調整項目(s)的數量',
'Adjust Items due to Theft/Loss': '調整項目由於遭竊/遺失',
'Admin Email': '管理電子郵件',
'Admin Name': 'Admin 名稱',
'Admin Tel': '管理TEL',
'Admin': '管理權',
'Administration': '管理模組',
'Administrator': '管理者',
'Adolescent (12-20)': '青少年 (13-17)',
'Adolescent participating in coping activities': 'Adolescent參與複製活動',
'Adult (21-50)': '成人 (16-64)',
'Adult ICU': '成人ICU',
'Adult Psychiatric': '成人Psychiatric',
'Adult female': '成人女性',
'Adult male': '成人男性',
'Adults in prisons': 'prisons中的成人',
'Advanced Bin Search': '進階搜尋bin',
'Advanced Catalog Search': '進階搜尋型錄',
'Advanced Category Search': '進階搜尋種類',
'Advanced Item Search': '進階搜尋項目',
'Advanced Location Search': '進階搜尋位置',
'Advanced Site Search': '進階網站搜尋',
'Advanced Sub-Category Search': '先進的子分類搜索',
'Advanced Unit Search': '進階單位搜索',
'Advanced:': '進階:',
'Advisory': '諮詢',
'Affectees Families settled in the school belong to district': '受影響家庭定居在區內學校',
'After clicking on the button, a set of paired items will be shown one by one. Please select the one solution from each pair that you prefer over the other.': '之後,按一下按鈕時,一個組成對的項目會顯示一個。 請選取一個解決方案中每一對您喜好的"其他"。',
'Age Group': '年齡層',
'Age group does not match actual age.': '群組不符合實際經歷時間。',
'Age group': '年齡層',
'Aggravating factors': 'Aggravating因素',
'Aggregate Items': '聚集項目',
'Agriculture': '農業',
'Aid Request Details': '輔助要求詳細資料',
'Aid Request added': '輔助請求添加',
'Aid Request deleted': '輔助刪除要求',
'Aid Request updated': '要求更新輔助',
'Aid Request': '輔助請求',
'Aid Requests': '輔助要求',
'Air Transport Service': '空氣傳輸服務',
'Aircraft Crash': '墜機',
'Aircraft Hijacking': '飛機強制存取',
'Airport Closure': '機場關閉',
'Airport': '機場',
'Airspace Closure': 'Airspace關閉',
'Alcohol': '酒精',
'Alert': '警示',
'All Inbound & Outbound Messages are stored here': '所有入埠及出埠訊息儲存在這裡',
'All Locations': '所有位置',
'All Pledges': '所有抵押',
'All Requested Items': '所有要求的項目',
'All Resources': '所有資源',
'All data is able to be shared with other sites in real time.': '所有資料可以共用其他月臺的實際時間。',
'All data provided by the Sahana Software Foundation from this site is licenced under a Creative Commons Attribution licence. However, not all data originates here. Please consult the source field of each entry.': '所有資料所提供的Sahana Software Foundation從這個網站授權下的創意Commons聲明授權。 然而,並非所有資料產生在這裡。 請參閱來源欄位的每一個項目。',
'All': '所有',
'Allowed to push': '允許推送',
'Allows a Budget to be drawn up': '容許預算要繪制設置',
'Allows authorized users to control which layers are available to the situation map.': '可讓授權使用者來控制層可用的狀湟對映。',
'Allows authorized users to upload multiple features into the situation map.': '容許授權的使用者上傳多個特性的狀湟對映。',
'Alternative Item Details': '替代項目詳細資料',
'Alternative Item added': '新增替代項目',
'Alternative Item deleted': '替代項目刪除',
'Alternative Item updated': '替代更新項目',
'Alternative Item': '替代項目',
'Alternative Items': '替代項目',
'Alternative infant nutrition in use': '替代嬰兒營養使用中',
'Alternative places for studying available': '替代的工作區研究可用',
'Alternative places for studying': '替代工作區的研究',
'Ambulance Service': '救護車服務',
'An Inventory Store is a physical place which contains Relief Items available to be Distributed.': '一個資產儲存庫是一個實體位置包含的項目可用的。',
'An intake system, a warehouse management system, commodity tracking, supply chain management, procurement and other asset and resource management capabilities.': '一個進氣區系統,倉儲管理系統,商品追蹤,供應鏈管理,採購,及其他資產和資源管理功能。',
'An interactive map of the situation.': '互動式對映的狀湟。',
'An item which can be used in place of another item': '一個項目可用於代替另一個項目',
'Analysis of Completed Surveys': '分析完成的調查',
'Animal Die Off': '動物骰子關閉',
'Animal Feed': '動物饋送',
'Animals': '動物',
'Answer Choices (One Per Line)': '答案選項(每行一)',
'Antibiotics available': 'Antibiotics可用',
'Antibiotics needed per 24h': 'Antibiotics需要每小時',
'Any available Metadata in the files will be read automatically, such as Timestamp, Author, Latitude & Longitude.': '任何可用的meta資料中,將檔案自動讀取,例如時間戳記,作者,緯度和經度。',
'Any comments about this sync partner.': '任何相關註解這個同步夥伴。',
'Apparent Age': '明顯經歷時間',
'Apparent Gender': '明顯性別',
'Application Deadline': '應用程式截止時間',
'Appropriate clothing available': '適當的衣服可用',
'Appropriate cooking equipment/materials in HH': '烹飪適當設備/材料hh',
'Approve': '核准',
'Approved': '已核准',
'Approver': '核准者',
'Approx. number of cases/48h': '大約 號碼的案例/小時',
'Approximately how many children under 5 with diarrhea in the past 48 hours?': '大約有多少下的五diarrhea在過去48小時內?',
'Archive not Delete': '無法刪除保存',
'Arctic Outflow': '北極串流',
'Are basic medical supplies available for health services since the disaster?': '災後基本的醫療用品是否可用於衛生服務?',
'Are breast milk substitutes being used here since the disaster?': '在災難發生後母乳代用品是否被用在這裡?',
'Are the areas that children, older people, and people with disabilities live in, play in and walk through on a daily basis physically safe?': '兒童,老人,和殘疾人士每天生活,嬉戲和走過的地區是否實際安全?',
'Are the chronically ill receiving sufficient care and assistance?': '長期病患者是否得到足夠的關心和幫助?',
'Are there adults living in prisons in this area?': '有成人活中prisons在此區域嗎?',
'Are there alternative places for studying?': '有替代工作區的研究嗎?',
'Are there cases of diarrhea among children under the age of 5?': '有diarrhea之間的情湟五歲以下兒童嗎?',
'Are there children living in adult prisons in this area?': '有子項活中有prisons在此區域嗎?',
'Are there children living in boarding schools in this area?': '有子項活在學校登機前在此區域嗎?',
'Are there children living in homes for disabled children in this area?': '有子項活在住家中的停用中這個區域嗎?',
'Are there children living in juvenile detention in this area?': '有子項活中青少年detention在此區域嗎?',
'Are there children living in orphanages in this area?': '有子項活中orphanages在此區域嗎?',
'Are there children with chronical illnesses in your community?': '有子項,含chronical疾病的社群嗎?',
'Are there health services functioning for the community since the disaster?': '有狀態服務運作的社群,因為災難?',
'Are there older people living in care homes in this area?': '有舊的人員使用者在管理Home在此區域嗎?',
'Are there older people with chronical illnesses in your community?': '有舊的人chronical疾病的社群嗎?',
'Are there people with chronical illnesses in your community?': '有人chronical疾病的社群嗎?',
'Are there separate latrines for women and men available?': '有個別latrines的男人或婦女,老人可用嗎?',
'Are there staff present and caring for the residents in these institutions?': '有人員存在與維護的居民在這些機構嗎?',
'Area': '區域 (area)',
'Areas inspected': '已視察地區',
'Assessment Details': '評量詳細資料',
'Assessment Reported': '評量報告',
'Assessment Summaries': '評量摘要',
'Assessment Summary Details': '評量摘要詳細資料',
'Assessment Summary added': '新增評量摘要',
'Assessment Summary deleted': '刪除評量摘要',
'Assessment Summary updated': '評量摘要更新',
'Assessment Type': '評量類型',
'Assessment Type:': '評量類型:',
'Assessment added': '新增評量',
'Assessment admin level': '評量管理層次',
'Assessment deleted': '評量刪除',
'Assessment timeline': '評量時間表',
'Assessment updated': '評量更新',
'Assessment': '評量',
'Assessments Needs vs. Activities': '需要評估與活動',
'Assessments and Activities': '評量及活動',
'Assessments are structured reports done by Professional Organizations - data includes WFP Assessments': '評估是結構化報告來完成專業組織的資料包括WFP評量',
'Assessments are structured reports done by Professional Organizations': '評估是結構化報告來完成專業組織',
'Assessments': '評量',
'Assessments:': '評量:',
'Assessor': '評量者',
'Asset Assigned': '指派資產',
'Asset Assignment Details': '資產分派明細',
'Asset Assignments deleted': '資產分派刪除',
'Asset Assignments updated': '資產更新工作分派',
'Asset Assignments': '資產分派',
'Asset Details': '資產明細',
'Asset Log Details': '資產詳細資料日誌',
'Asset Log Empty': '資產空日誌',
'Asset Log Entry Added - Change Label': '資產日誌項目新增-變更標籤',
'Asset Log Entry deleted': '資產日誌項目刪除',
'Asset Log Entry updated': '資產日誌項目更新',
'Asset Log': '資產日誌',
'Asset Management': '資產管理',
'Asset Number': '資產編號',
'Asset added': '已新增資產',
'Asset deleted': '已刪除資產',
'Asset removed': '移除資產',
'Asset updated': '已更新資產',
'Asset': '資產',
'Assets are resources which are not consumable but are expected back, so they need tracking.': '資產是資源不易損耗部件,但是預期返回,所以他們所需要的追蹤。',
'Assets': '資產',
'Assign Asset': '指派資產',
'Assign Group': '指派給群組',
'Assign Staff': '指派人員',
'Assign Storage Location': '指定存儲位置',
'Assign to Org.': '指派給組織。',
'Assign to Organization': '指派給組織',
'Assign to Person': '指派給人員',
'Assign to Site': '指派給網站',
'Assign': '指派',
'Assigned By': '由指派',
'Assigned To': '指派給',
'Assigned to Organization': '指派給組織',
'Assigned to Person': '指派給人員',
'Assigned to Site': '指派給網站',
'Assigned to': '指派給',
'Assigned': '已指派',
'Assignments': '指派',
'Assistance for immediate repair/reconstruction of houses': '協助立即的修復/的重新安置',
'Assistant': '助理',
'Assisted Family Care': '輔助管理系列',
'Assisted Self-care': '輔助自我管理',
'At/Visited Location (not virtual)': '在/瀏覽位置(非虛擬)',
'Attend to information sources as described in <instruction>': '參加"以資訊來源中所述<instruction>',
'Attribution': '賦值',
'Audit Read': '審核讀取',
'Audit Write': '寫入審核',
'Authentication failed!': '鑒別失敗!',
'Authentication information of foreign server.': '鑒別資訊的外來伺服器。',
'Author': '作者',
'Author:': '作者:',
'Automatic Database Synchronization History': '自動同步化歷程資料庫',
'Automotive': '汽車',
'Availability': '可用性',
'Available Alternative Inventories': '可用的替代庫存',
'Available Beds': '可用Beds',
'Available Inventories': '可用的庫存',
'Available Messages': '可用的訊息',
'Available Records': '可用的記錄',
'Available databases and tables': '可用的資料庫及表格',
'Available for Location': '可用的位置',
'Available from': '可用開始時間',
'Available in Viewer?': '可用的檢視器中?',
'Available until': '截止有效期',
'Availablity': '可用性',
'Avoid the subject event as per the <instruction>': '避免在主旨事件作為每個<instruction>',
'Babies who are not being breastfed, what are they being fed on?': '不接受母乳喂養的嬰兒吃什麼?',
'Baby And Child Care': '嬰兒及幼兒護理',
'Background Colour for Text blocks': '文字區塊的背景顏色',
'Background Colour': '背景顏色',
'Baldness': '禿頭',
'Banana': '香蕉',
'Bank/micro finance': '銀行/MICRO財務',
'Barricades are needed': 'Barricades需要',
'Base Layer?': '基本層?',
'Base Layers': '基本層',
'Base Location': '基本位置',
'Base Site Set': '基本網站設定',
'Base Unit': '基本裝置',
'Baseline Data': '基準線資料',
'Baseline Number of Beds': '基線床位數',
'Baseline Type Details': '基準線類型詳細資料',
'Baseline Type added': '新增基準線類型',
'Baseline Type deleted': '刪除基準線類型',
'Baseline Type updated': '更新基準線類型',
'Baseline Type': '基準線類型',
'Baseline Types': '基準線類型',
'Baseline added': '新增基準線',
'Baseline deleted': '刪除基準線',
'Baseline number of beds of that type in this unit.': '在本單位這種類型病床的基線數目。',
'Baseline updated': '更新基準線',
'Baselines Details': '基準線詳細資料',
'Baselines': '基準線',
'Basic Assessment Reported': '基本評量報告',
'Basic Assessment': '基本評量',
'Basic Details': '基本詳細資料',
'Basic information on the requests and donations, such as category, the units, contact details and the status.': '基本資訊的要求和捐款,如:種類,裝置,請聯絡詳細資料和狀態。',
'Basic medical supplies available prior to disaster': '基本醫療用品可用之前災難',
'Basic medical supplies available since disaster': '醫療用品基本後提供災難',
'Basic reports on the Shelter and drill-down by region': '基本報告在Shelter和往下探查"區域',
'Baud rate to use for your modem - The default is safe for most cases': '傳輸速率,以用於您的數據機的預設值是安全的大部分情湟',
'Baud': '傳輸速率',
'Beacon Service URL': '引標服務URL',
'Beam': '光束',
'Bed Capacity per Unit': '每單位元床容量',
'Bed Capacity': '床容量',
'Bed Type': '床型',
'Bed type already registered': '床型已註冊',
'Bedding materials available': '床上用品材料可用',
'Below ground level': '地面以下',
'Beneficiary Type': '受益人類型',
'Biological Hazard': '生物危害',
'Blood Type (AB0)': '渾身類型(AB)',
'Blowing Snow': '沒有吹向他人雪',
'Boat': '船班',
'Bodies found': '找到主體',
'Bodies recovered': '回復主體',
'Body Recovery Reports': '主體回復報告',
'Body Recovery Request': '回復要求主體',
'Body Recovery Requests': '回復要求主體',
'Body': '主體',
'Bomb Explosion': 'Bomb爆炸',
'Bomb Threat': 'Bomb威脅',
'Border Colour for Text blocks': '邊框顏色的文字區塊',
'Bounding Box Insets': '嵌入外框',
'Bounding Box Size': '外框框大小',
'Boys 13-18 yrs in affected area': '13男女-18年期中受影響的區域',
'Boys 13-18 yrs not attending school': '13男女-18年期不參加學校',
'Boys 6-12 yrs in affected area': '六男女-12年期中受影響的區域',
'Boys 6-12 yrs not attending school': '六男女-12年期不參加學校',
'Brand Details': '品牌詳細資料',
'Brand added': '品牌新增',
'Brand deleted': '品牌刪除',
'Brand updated': '品牌更新',
'Brand': '產品',
'Brands': '品牌',
'Breast milk substitutes in use since disaster': 'Breast替換espresso使用中,因為災難',
'Breast milk substitutes used prior to disaster': 'Breast替換espresso使用之前災難',
'Bricks': '磚',
'Bridge Closed': '關閉橋接器',
'Bridge': '橋接器',
'Bucket': '儲存器 (bucket)',
'Buddhist': '佛教徒',
'Budget Details': '預算明細',
'Budget Updated': '更新預算',
'Budget added': '新增預算',
'Budget deleted': '刪除預算',
'Budget updated': '更新預算',
'Budget': '預算',
'Budgeting Module': '預算模組',
'Budgets': '預算',
'Buffer': 'buffer',
'Bug': '錯誤',
'Building Aide': 'AIDE建置',
'Building Assessments': '建置評量',
'Building Collapsed': '建置收合',
'Building Name': '大樓名稱',
'Building Safety Assessments': '建置安全評量',
'Building Short Name/Business Name': '建置簡短名稱/商業名稱',
'Building or storey leaning': '建置或storey leaning',
'Built using the Template agreed by a group of NGOs working together as the': '使用內建的範本所認可群組的迫切合作的',
'Bulk Uploader': '大量Multi File Uploader',
'Bundle Contents': '銷售組合內容',
'Bundle Details': '軟體組詳細資料',
'Bundle Updated': '更新軟體組',
'Bundle added': '新增軟體組',
'Bundle deleted': '刪除組',
'Bundle updated': '更新軟體組',
'Bundle': '組合 (bundle)',
'Bundles': '軟體組',
'Burn ICU': 'ICU燒錄',
'Burn': '燒錄',
'Burned/charred': '燒錄/charred',
'Business damaged': '商業損壞',
'By Facility': '由機能',
'By Inventory': '由庫存',
'By Site': '依網站',
'By Warehouse': '由倉儲',
'CBA Women': 'CBA婦女',
'CSS file %s not writable - unable to apply theme!': 'CSS檔%無法寫入-無法套用佈景主題!',
'Calculate': '計算',
'Camp Coordination/Management': 'Camp協調/管理',
'Camp Details': 'Camp詳細資料',
'Camp Service Details': 'Camp服務詳細資料',
'Camp Service added': 'Camp服務新增',
'Camp Service deleted': 'Camp服務刪除',
'Camp Service updated': 'Camp服務更新',
'Camp Service': 'Camp服務',
'Camp Services': 'Camp服務',
'Camp Type Details': 'Camp類型詳細資料',
'Camp Type added': 'Camp新增類型',
'Camp Type deleted': 'Camp刪除類型',
'Camp Type updated': 'Camp更新類型',
'Camp Type': 'Camp類型',
'Camp Types and Services': 'Camp類型和服務',
'Camp Types': 'Camp類型',
'Camp added': 'Camp新增',
'Camp deleted': 'Camp刪除',
'Camp updated': 'Camp更新',
'Can only disable 1 record at a time!': '只能停用一個記錄時間!',
'Can users register themselves for authenticated login access?': '使用者可以自行登錄的鑒別登入嗎?',
'Cancel Log Entry': '取消日誌項目',
'Cancel Shipment': '取消出貨',
'Cancel': '取消',
'Canceled': '已取消',
'Candidate Matches for Body %s': '候選相符的主體%',
'Canned Fish': '預錄fish',
'Cannot be empty': '不能是空的',
'Cannot delete whilst there are linked records. Please delete linked records first.': '無法刪除時有的記錄。 請刪除鏈結的第一個記錄。',
'Cannot disable your own account!': '無法停用您自己的帳戶!',
'Capacity (Max Persons)': '容量(最大人員)',
'Capacity (W x D X H)': '容量(寬x深x)',
'Capacity': '容量',
'Capture Information on Disaster Victim groups (Tourists, Passengers, Families, etc.)': '擷取資訊的意外受損群組(Tourists,乘客,系列,等等。 )',
'Capture Information on each disaster victim': '擷取資訊在每個意外受損',
'Capturing organizational information of a relief organization and all the projects they have in the region': '擷取組織資訊的浮雕屬組織與其所有的專案區域中它們',
'Capturing the essential services each Volunteer is providing and where': '在擷取基本服務每個主動提供和位置',
'Capturing the projects each organization is providing and where': '在擷取專案每個組織提供和位置',
'Care Report': '管理報告',
'Care Strategy': '管理策略',
'Cash available to restart business': '現金用於重新啟動"商業',
'Casual Labor': '訪客勞工',
'Casualties': '意外',
'Catalog Details': '型錄詳細資料',
'Catalog Item added': '型錄項目新增',
'Catalog Item deleted': '型錄項目已刪除',
'Catalog Item updated': '型錄項目更新',
'Catalog Item': '型錄項目',
'Catalog Items': '型錄商品項目',
'Catalog Name': '型錄名稱',
'Catalog added': '新增型錄',
'Catalog deleted': '已刪除型錄',
'Catalog updated': '型錄更新',
'Catalog': '型錄 (catalog)',
'Catalogs': '型錄',
'Categories': '種類',
'Category': '類別',
'Category<>Sub-Category<>Catalog Relation added': 'Category<>Sub-Category<>Catalog新增關系',
'Category<>Sub-Category<>Catalog Relation deleted': 'Category<>Sub-Category<>Catalog關系刪除',
'Category<>Sub-Category<>Catalog Relation updated': 'Category<>Sub-Category<>Catalog關系更新',
'Category<>Sub-Category<>Catalog Relation': 'Category<>Sub-Category<>Catalog關系',
'Ceilings, light fixtures': '天花板,燈退回',
'Central point to record details on People': '中心點來記錄詳細資料的人員',
'Certificate Catalog': '憑證型錄',
'Certificate Details': '憑證明細',
'Certificate Status': '憑證狀態',
'Certificate added': '添加憑證',
'Certificate deleted': '已刪除憑證',
'Certificate updated': '已更新憑證',
'Certificate': '憑證',
'Certificates': '憑證',
'Certification Details': '認證詳細資料',
'Certification added': '新增認證',
'Certification deleted': '刪除認證',
'Certification updated': '更新認證',
'Certification': '認證',
'Certifications': '認證',
'Certifying Organization': '組織認證',
'Change Password': '變更密碼',
'Check Request': '檢查要求',
'Check for errors in the URL, maybe the address was mistyped.': '檢查錯誤中的URL,可能的地址是輸入錯誤。',
'Check if the URL is pointing to a directory instead of a webpage.': '請檢查URL是否指向一個目錄,而一個網頁。',
'Check outbox for the message status': '檢查寄件匣的訊息狀態',
'Check to delete': '勾選以刪除',
'Check to delete:': '勾選以刪除:',
'Check': '檢查',
'Check-in': '移入',
'Check-out': '退房',
'Checklist created': '已建立核對清單',
'Checklist deleted': '刪除清單',
'Checklist of Operations': '作業核對清單',
'Checklist updated': '更新清單',
'Checklist': '核對清單',
'Chemical Hazard': '化學危害',
'Chemical, Biological, Radiological, Nuclear or High-Yield Explosive threat or attack': '化學,生物, Radiological,核能或高產生爆炸的威脅或攻擊',
'Chicken': '雞肉',
'Child (2-11)': '兒童 (2-12)',
'Child (< 18 yrs)': '子項(< 18年期)',
'Child Abduction Emergency': '子項Abduction緊急',
'Child headed households (<18 yrs)': '子項頭住家(<18年期)',
'Child': '子項',
'Children (2-5 years)': '小孩(二到五歲)',
'Children (5-15 years)': '小孩(五到十五歲)',
'Children (< 2 years)': '小孩(不到兩歲)',
'Children in adult prisons': '小孩關在成人的監牢裡',
'Children in boarding schools': '小孩在住宿學校裡',
'Children in homes for disabled children': '子項在住家中的停用子項',
'Children in juvenile detention': '小孩在青少年監獄裡',
'Children in orphanages': '子項中orphanages',
'Children living on their own (without adults)': '子項使用者在自己的(不含成人)',
'Children not enrolled in new school': '子項不登記新學校',
'Children orphaned by the disaster': '子項遺留的災難',
'Children separated from their parents/caregivers': '子項分開母項/caregivers',
'Children that have been sent to safe places': '子項已傳送到安全位置',
'Children who have disappeared since the disaster': '子項擁有消失,因為災難',
'Children with chronical illnesses': '與子項chronical疾病',
'Chinese (Taiwan)': '中文(臺灣)',
'Chinese': '中文',
'Cholera Treatment Capability': 'Cholera處理功能',
'Cholera Treatment Center': 'Cholera處理中心',
'Cholera Treatment': 'Cholera處理',
'Cholera-Treatment-Center': 'Cholera-處理"-"置中"',
'Choose Manually': '選擇手動',
'Choose a new posting based on the new evaluation and team judgement. Severe conditions affecting the whole building are grounds for an UNSAFE posting. Localised Severe and overall Moderate conditions may require a RESTRICTED USE. Place INSPECTED placard at main entrance. Post all other placards at every significant entrance.': '選擇一個新的張貼根據新的評估和團隊判斷。 嚴重狀湟影響整個建置資格是安全的發布。 嚴重區域化和整體中度條件可能需要使用上有限制。 檢查位置placard在禦路。 所有其他後置placards在每個重要的正門入口。',
'Choose from one of the following options': '選擇下列其中一個選項',
'Choosing Skill and Resources of Volunteers': '選擇技能和資源的主動參與者',
'Christian': '基督徒',
'Church': '教堂',
'Circumstances of disappearance, other victims/witnesses who last saw the missing person alive.': '的情湟disappearance,其他受害者/證人前次發現遺漏的人員在作用中。',
'City': '城市',
'Civil Emergency': '民事緊急',
'Clear Selection': '取消選擇',
'Click here to open log': '請按一下這裡來開啟日誌',
'Click on a Map': '按一下上一個對映',
'Click on an ID in the left-hand column to make a Pledge to match a request for aid.': '按一下ID中左手邊的直欄來進行抵押以符合要求的協助。',
'Click on the link %(url)s to reset your password': '按一下的鏈結 %(url)s 若要重設您的密碼',
'Click on the link %(url)s to verify your email': '按一下的鏈結 %(url)s 若要驗證您的電子郵件',
'Client IP': '用戶端 IP',
'Clinical Laboratory': '臨床實驗室',
'Clinical Operations': '臨床作業',
'Clinical Status': '臨床狀態',
'Closed': '結案',
'Closure': '結束',
'Clothing': '衣服',
'Cluster Details': '叢集詳細資料',
'Cluster Distance': '叢集距離',
'Cluster Subsector Details': '集群界別分組詳細資料',
'Cluster Subsector added': 'Subsector新增至叢集',
'Cluster Subsector deleted': 'Subsector刪除叢集',
'Cluster Subsector updated': '集群界別分組更新',
'Cluster Subsector': '叢集Subsector',
'Cluster Subsectors': '集群界別分組',
'Cluster Threshold': '集群臨界值',
'Cluster added': '新增叢集',
'Cluster deleted': '刪除叢集',
'Cluster updated': '更新集群',
'Cluster': '叢集',
'Cluster(s)': '叢集(S)',
'Clusters': '叢集',
'Code': '程式碼',
'Cold Wave': '冷Wave',
'Collapse, partial collapse, off foundation': '收合,局部收合", "關閉"基礎',
'Collective center': '群體中心',
'Colour for Underline of Subheadings': '顏色的底線的標題',
'Colour of Buttons when hovering': '的顏色按鈕限於暫留時',
'Colour of bottom of Buttons when not pressed': '新增顏色至底端的按鈕時按下',
'Colour of bottom of Buttons when pressed': '新增顏色至底端的按鈕按下時',
'Colour of dropdown menus': '顏色的下拉功能表',
'Colour of selected Input fields': '新增顏色至選取的輸入欄位',
'Colour of selected menu items': '新增顏色至選取的功能表項目',
'Column Choices (One Per Line': '直欄選項(每行一',
'Columns, pilasters, corbels': '直欄, pilasters, corbels',
'Combined Method': '合併方法',
'Come back later. Everyone visiting this site is probably experiencing the same problem as you.': '稍後回來。 每個造訪此網站可能發生相同的問題。',
'Come back later.': '稍後回來。',
'Comments': '備註',
'Commercial/Offices': '商業/辦公室',
'Commit Date': '確定日期',
'Commit from %s': '確定從%s',
'Commit': '確定',
'Commit Status': '承諾狀態',
'Commiting a changed spreadsheet to the database': '正在確定變更資料庫,試算表',
'Commitment Added': '新增的承諾書',
'Commitment Canceled': '取消承諾',
'Commitment Details': '承諾書細節',
'Commitment Item Details': '承諾項目細節',
'Commitment Item added': '新增承諾項目',
'Commitment Item deleted': '已刪除之承諾項目',
'Commitment Item updated': '承諾項目更新',
'Commitment Item': '承諾項目',
'Commitment Items': '承諾項目',
'Commitment Status': '承諾狀態',
'Commitment Updated': '更新承諾',
'Commitment': '確定',
'Commitments': '承諾',
'Committed By': '確定由',
'Committed': '已確定',
'Committing Inventory': '確定庫存',
'Communication problems': '通訊問題',
'Community Centre': '中心社群',
'Community Health Center': '健康中心社群',
'Community Member': '社群成員',
'Competencies': '競爭力',
'Competency Details': '能力詳細資料',
'Competency Rating Catalog': '能力分級目錄',
'Competency Rating Details': '能力詳細分級',
'Competency Rating added': '能力新增分級',
'Competency Rating deleted': '能力刪除分級',
'Competency Rating updated': '能力更新評比',
'Competency Ratings': '能力等級',
'Competency added': '新增能力',
'Competency deleted': '刪除能力',
'Competency updated': '更新能力',
'Competency': '能力',
'Complete Database Synchronized': '完成資料庫同步',
'Complete Unit Label for e.g. meter for m.': '完成單元的標籤(如的計量M。',
'Complete': '完成',
'Completed': '已完成',
'Compose': '傳訊',
'Compromised': '受損',
'Concrete frame': '具體框架',
'Concrete shear wall': '具體銳角牆面',
'Condition': '條件',
'Config added': '新增配置',
'Config deleted': '刪除配置',
'Config updated': '更新配置',
'Config': '配置',
'Configs': 'configs',
'Configurations': '配置',
'Configure Run-time Settings': '配置執行時期設定',
'Confirm Shipment Received': '確認出貨接收',
'Confirmed Incidents': '確認事件',
'Confirmed': '已確認',
'Confirming Organization': '確認組織',
'Conflict Details': '衝突明細',
'Conflict Resolution': '衝突解決',
'Consignment Note': '寄售附註',
'Constraints Only': '僅限制',
'Consumable': '消耗品',
'Contact Data': '聯絡資料',
'Contact Details': '聯絡人詳細資料',
'Contact Info': '聯絡資訊',
'Contact Information Added': '新增聯絡資訊',
'Contact Information Deleted': '刪除聯絡資訊',
'Contact Information Updated': '更新聯絡資訊',
'Contact Information': '聯絡資訊',
'Contact Method': '聯絡方式',
'Contact Name': '聯絡人名稱',
'Contact Person': '聯絡人',
'Contact Phone': '聯絡電話',
'Contact details': '聯絡人詳細資料',
'Contact information added': '新增聯絡資訊',
'Contact information deleted': '刪除聯絡資訊',
'Contact information updated': '更新聯絡資訊',
'Contact person in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '聯絡人的情湟下新聞或其他問題(如果不同報告人員)。 包括電話號碼,地址和電子郵件作為可用。',
'Contact person(s) in case of news or further questions (if different from reporting person). Include telephone number, address and email as available.': '聯絡人(s)的情湟下新聞或其他問題(如果與報告人員)。 包括電話號碼,地址和電子郵件作為可用。',
'Contact us': '聯絡我們',
'Contact': '聯絡人',
'Contacts': '聯絡人',
'Contents': '目錄',
'Contradictory values!': '互相矛盾的值!',
'Contributor': '提供者',
'Conversion Tool': '轉換工具',
'Cooking NFIs': 'NFIs調理油',
'Cooking Oil': '食用油',
'Coordinate Conversion': '座標轉換',
'Coping Activities': '複製活動',
'Copy any data from the one to be deleted into the one to keep': '複製任何資料從一個要刪除的一個保留',
'Copy': '複製',
'Corn': '玉米粉',
'Cost Type': '成本類型',
'Cost per Megabyte': '每MB成本',
'Cost per Minute': '成本每分鐘',
'Country of Residence': '居住國家',
'Country': '國家',
'County': '州政府',
'Course Catalog': '課程型錄',
'Course Certicate Details': '課程憑證詳細資料',
'Course Certicate added': '課程憑證新增至',
'Course Certicate deleted': '課程刪除憑證',
'Course Certicate updated': '課程更新憑證',
'Course Certicates': '課程憑證',
'Course Certificates': '課程憑證',
'Course Details': '課程詳細資料',
'Course added': '課程新增',
'Course deleted': '刪除進程',
'Course updated': '課程更新',
'Course': '課程',
'Courses': '課程',
'Create & manage Distribution groups to receive Alerts': '建立並管理發送通知的群組',
'Create Activity Report': '新增活動報告',
'Create Activity Type': '新增活動類型',
'Create Activity': '新增活動',
'Create Assessment': '新增評量',
'Create Asset': '新增資產',
'Create Bed Type': '新增平臺類型',
'Create Brand': '新增品牌',
'Create Budget': '新增預算',
'Create Catalog Item': '新增型錄項目',
'Create Catalog': '新增型錄',
'Create Certificate': '新增憑證',
'Create Checklist': '建立核對清單',
'Create Cholera Treatment Capability Information': '新增Cholera處理功能資訊',
'Create Cluster Subsector': '新增叢集Subsector',
'Create Cluster': '新增叢集',
'Create Competency Rating': '新增能力分級',
'Create Contact': '新增聯絡人',
'Create Course': '新增課程',
'Create Dead Body Report': '新增停用主體報告',
'Create Event': '建立新的事件',
'Create Facility': '新增機能',
'Create Feature Layer': '新增功能層',
'Create Group Entry': '新增群組',
'Create Group': '新增群組',
'Create Hospital': '新增醫院',
'Create Identification Report': '新增識別報告',
'Create Impact Assessment': '建立影響評估',
'Create Import Job': '建立匯入工作',
'Create Incident Report': '新增事件報告',
'Create Incident': '新增事件',
'Create Item Category': '新增項目種類',
'Create Item Pack': '新增項目套件',
'Create Item': '新增項目',
'Create Kit': '新增套件',
'Create Layer': '新增層',
'Create Location': '新增位置',
'Create Map Profile': '新增對映配置',
'Create Marker': '新增標記',
'Create Member': '新增成員',
'Create Mobile Impact Assessment': '建立行動式影響評估',
'Create Office': '新增辦公室',
'Create Organization': '新增組織',
'Create Personal Effects': '新增個人效果',
'Create Project': '新增專案',
'Create Projection': '新增投射',
'Create Rapid Assessment': '建立快速評量',
'Create Report': '新增新報告',
'Create Request': '建立要求',
'Create Resource': '新增資源',
'Create River': '新增金水河',
'Create Role': '新增角色',
'Create Room': '新增室',
'Create Scenario': '建立新情境',
'Create Sector': '新增行業',
'Create Service Profile': '新增服務設定檔',
'Create Shelter Service': '新增Shelter服務',
'Create Shelter Type': '新增Shelter類型',
'Create Shelter': '新增Shelter',
'Create Skill Type': '新增技術類型',
'Create Skill': '新增技能',
'Create Staff Member': '新增人員',
'Create Status': '新增狀態',
'Create Task': '新增作業',
'Create Theme': '新增佈景主題',
'Create User': '新增使用者',
'Create Volunteer': '新增志工',
'Create Warehouse': '新增倉儲',
'Create a Person': '新增人員',
'Create a group entry in the registry.': '在登錄表中建立群組.',
'Create, enter, and manage surveys.': '建立,進入,以及管理調查。',
'Creation of Surveys': '建立的調查',
'Credential Details': '認證詳細資料',
'Credential added': '新增認證',
'Credential deleted': '刪除認證',
'Credential updated': '更新認證',
'Credentialling Organization': 'Credentialling組織',
'Credentials': '認證',
'Credit Card': '信用卡',
'Crime': '犯罪',
'Criteria': '準則',
'Currency': '貨幣',
'Current Entries': '現行項目',
'Current Group Members': '現有組員',
'Current Identities': '現行身分',
'Current Location': '目前地點',
'Current Log Entries': '現行日誌項目',
'Current Memberships': '現行的成員資格',
'Current Notes': '現行Notes',
'Current Records': '現行記錄',
'Current Registrations': '目前登錄',
'Current Status': '現行狀態',
'Current Team Members': '現行團隊成員',
'Current Twitter account': '現行Twitter帳戶',
'Current community priorities': '現行社群優先順序',
'Current general needs': '目前的一般需求',
'Current greatest needs of vulnerable groups': '現行最大的需求有漏洞的群組',
'Current health problems': '現行性能問題',
'Current main income sources': '目前主要收入來源',
'Current major expenses': '目前主要費用',
'Current number of patients': '病患的現行數目',
'Current problems, categories': '現行問題,種類',
'Current problems, details': '現行問題,詳細資料',
'Current request': '現行要求',
'Current response': '現行回應',
'Current session': '現行階段作業',
'Current type of health problems, adults': '現行類型的性能問題,成人',
'Current type of health problems, children': '現行類型的性能問題,子項',
'Current type of source for drinking water': '現行的來源類型為"Drinking Water Protection"臨界值',
'Current type of source for sanitary water': '現行類型的來源衛生臨界值',
'Currently no Certifications registered': '目前沒有認證登錄',
'Currently no Competencies registered': '目前沒有登錄能力',
'Currently no Course Certicates registered': '目前沒有進程憑證登錄',
'Currently no Credentials registered': '目前沒有認證登錄',
'Currently no Missions registered': '目前沒有任務註冊',
'Currently no Skill Equivalences registered': '技能目前沒有同等登錄',
'Currently no Trainings registered': '目前沒有登錄撰文',
'Currently no entries in the catalog': '在型錄中目前沒有項目',
'Currently your system has default username and password. Username and Password are required by foriegn machines to sync data with your computer. You may set a username and password so that only those machines can fetch and submit data to your machines which your grant access by sharing your password.': '目前您的系統已預設使用者名稱及密碼。 使用者名稱及密碼所需的外部機器來同步資料與您的電腦。 您可以設定使用者名稱和密碼,以便只機器可以提取及提交資料至您的機器的存取權授予的共用密碼。',
'Custom Database Resource (e.g., anything defined as a resource in Sahana)': '自訂資料庫資源(例如,任何定義為中的資源Sahana)',
'Customisable category of aid': '可輔助的種類',
'DECISION': '決策',
'DNA Profile': 'dna設定檔',
'DNA Profiling': 'dna側寫',
'DVI Navigator': 'DVI導覽器',
'Daily': '每日',
'Dam Overflow': 'DAM溢位',
'Damage': '損壞',
'Dangerous Person': '危險的人員',
'Dashboard': '儀表版',
'Data import policy': '資料匯入原則',
'Data uploaded': '上傳資料',
'Data': '資料',
'Database': '資料庫',
'Date & Time': '日期和時間',
'Date Avaialble': 'install.log日期',
'Date Available': '可出貨日期',
'Date Received': '收到的日期',
'Date Requested': '要求日期',
'Date Required': '需要的日期',
'Date Sent': '傳送日期',
'Date Until': '日期之前',
'Date and Time of Goods receipt. By default shows the current time but can be modified by editing in the drop down list.': '日期和時間的貨品收據。 依預設顯示目前的時間,但可以修改中編輯的下拉清單。',
'Date and Time': '日期與時間',
'Date and time this report relates to.': '報告日期與時間相關。',
'Date of Birth': '出生日期',
'Date of Latest Information on Beneficiaries Reached': '日期的最新資訊達到受益人',
'Date of Report': '報告的日期',
'Date': '日期',
'Date/Time of Find': '尋找的日期/時間',
'Date/Time of disappearance': '日期/時間disappearance',
'Date/Time when found': '找到日期/時間',
'Date/Time when last seen': '日期/時間前次看到',
'Date/Time': '日期/時間',
'De-duplicator': 'DE-duplicator',
'Dead Body Details': '停用主體詳細資料',
'Dead Body Reports': '停用主體報告',
'Dead Body': '停用主體',
'Dead body report added': '停用主體新增報告',
'Dead body report deleted': '停用主體報告刪除',
'Dead body report updated': '停用主體報告更新',
'Deaths in the past 24h': '過去24小時的死亡人數',
'Debug': '除錯',
'Decimal Degrees': '小數度',
'Decision': '決策',
'Decomposed': '分解',
'Default Height of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': '預設高度的對映"視窗。 視窗佈置對映maximises來填入視窗,讓您不需要設定較大的值在這裡。',
'Default Height of the map window.': '預設高度的對映"視窗。',
'Default Map': '預設對映',
'Default Marker': '預設標記',
'Default Width of the map window. In Window layout the map maximises to fill the window, so no need to set a large value here.': '預設寬度的對映"視窗。 視窗佈置對映maximises來填入視窗,讓您不需要設定較大的值在這裡。',
'Default Width of the map window.': '預設寬度的對映"視窗。',
'Default synchronization policy': '預設同步化原則',
'Defaults updated': '預設更新',
'Defaults': '預設值',
'Defecation area for animals': 'Defecation區域的祥禽瑞獸',
'Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).': '定義的實務配置適當的資源(人力,資產和設備)。',
'Defines the icon used for display of features on handheld GPS.': '用於定義圖示的顯示功能的掌上型GPS。',
'Defines the icon used for display of features on interactive map & KML exports. A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class. If neither are defined, then the Default Marker is used.': '用於定義圖示的顯示功能的互動式對映和KML匯出。 一個標記,指派給個別位置設定時需要置換的記號指派給功能類別。 如果未定義,則預設記號使用。',
'Defines the icon used for display of features on interactive map & KML exports.': '用於定義圖示的顯示功能的互動式對映和KML匯出。',
'Defines the marker used for display & the attributes visible in the popup.': '定義標記用於顯示的屬性顯示在蹦現式畫面。',
'Degrees must be a number between -180 and 180': '度必須是一個數字的180和180',
'Degrees must be between -180 and 180': '度必須介於180和180',
'Degrees should be greater than 0 and less than 180': '度應該大於〇且小於180',
'Delete Aid Request': '刪除輔助請求',
'Delete Alternative Item': '刪除替代項目',
'Delete Assessment Summary': '刪除評量摘要',
'Delete Assessment': '刪除評量',
'Delete Asset Assignments': '刪除資產分派',
'Delete Asset Log Entry': '刪除資產日誌項目',
'Delete Asset': '刪除資產',
'Delete Baseline Type': '刪除基準線類型',
'Delete Baseline': '刪除基準線',
'Delete Brand': '刪除品牌',
'Delete Budget': '刪除預算',
'Delete Bundle': '刪除軟體組',
'Delete Catalog Item': '刪除型錄項目',
'Delete Catalog': '刪除型錄',
'Delete Certificate': '刪除憑證',
'Delete Certification': '刪除認證',
'Delete Cluster Subsector': '刪除叢集Subsector',
'Delete Cluster': '刪除叢集',
'Delete Commitment Item': '刪除承諾項目',
'Delete Commitment': '刪除承諾',
'Delete Competency Rating': '刪除能力分級',
'Delete Competency': '刪除能力',
'Delete Config': '刪除配置',
'Delete Contact Information': '刪除聯絡人資訊',
'Delete Course Certicate': '刪除進程證書',
'Delete Course': '刪除進程',
'Delete Credential': '刪除認證',
'Delete Distribution Item': '刪除分配項目',
'Delete Distribution': '刪除配送',
'Delete Document': '刪除文件',
'Delete Donor': '刪除Donor',
'Delete Entry': '刪除項目',
'Delete Event': '刪除事件',
'Delete Feature Layer': '刪除功能層',
'Delete Group': '刪除群組',
'Delete Hospital': '刪除醫院',
'Delete Image': '刪除影像',
'Delete Impact Type': '刪除影響類型',
'Delete Impact': '刪除影響',
'Delete Incident Report': '刪除事故報告',
'Delete Incident': '刪除事件',
'Delete Inventory Item': '刪除庫存項目',
'Delete Inventory Store': '刪除資產儲存庫',
'Delete Item Category': '刪除項目種類',
'Delete Item Pack': '刪除項目套件',
'Delete Item': '刪除項目',
'Delete Job Role': '刪除工作角色',
'Delete Key': '刪除金鑰',
'Delete Kit': '刪除套件',
'Delete Landmark': '刪除里程碑',
'Delete Layer': '刪除層',
'Delete Level 1 Assessment': '刪除層次一評量',
'Delete Level 2 Assessment': '刪除層次二評量',
'Delete Location': '刪除位置',
'Delete Map Profile': '刪除對映配置',
'Delete Marker': '刪除標記',
'Delete Membership': '刪除組員',
'Delete Message': '刪除訊息',
'Delete Metadata': '刪除 Meta 資料',
'Delete Mission': '刪除任務',
'Delete Need Type': '刪除需求類型',
'Delete Need': '需要刪除',
'Delete Office': '刪除辦公室',
'Delete Old': '刪除舊',
'Delete Organization': '刪除組織',
'Delete Peer': '刪除同層級',
'Delete Person': '刪除人員',
'Delete Photo': '刪除照片',
'Delete Population Statistic': '刪除人口統計資料',
'Delete Position': '刪除位置',
'Delete Project': '刪除專案',
'Delete Projection': '刪除投射',
'Delete Rapid Assessment': '刪除快速評量',
'Delete Received Item': '刪除接收項目',
'Delete Received Shipment': '刪除接收出貨',
'Delete Record': '刪除記錄',
'Delete Recovery Report': '刪除回復報告',
'Delete Report': '刪除報告',
'Delete Request Item': '刪除要求項目',
'Delete Request': '刪除要求',
'Delete Resource': '刪除資源',
'Delete Room': '刪除會議室',
'Delete Scenario': '刪除實務範例',
'Delete Section': '刪除區段',
'Delete Sector': '刪除磁區',
'Delete Sent Item': '刪除傳送項目',
'Delete Sent Shipment': '刪除傳送出貨',
'Delete Service Profile': '刪除服務設定檔',
'Delete Setting': '刪除設定',
'Delete Skill Equivalence': '刪除技術等值',
'Delete Skill Provision': '刪除技術供應',
'Delete Skill Type': '刪除技術類型',
'Delete Skill': '刪除技術',
'Delete Staff Type': '刪除人員類型',
'Delete Status': '刪除狀態',
'Delete Subscription': '刪除訂閱',
'Delete Subsector': '刪除Subsector',
'Delete Survey Answer': '刪除調查回答',
'Delete Survey Question': '刪除調查問題',
'Delete Survey Section': '刪除調查區段',
'Delete Survey Series': '刪除調查系列',
'Delete Survey Template': '刪除調查範本',
'Delete Training': '刪除訓練',
'Delete Unit': '刪除單元',
'Delete User': '刪除使用者',
'Delete Volunteer': '刪除志願者',
'Delete Warehouse': '刪除倉庫',
'Delete from Server?': '刪除從伺服器嗎?',
'Delete': '刪除',
'Delivered': '已遞送',
'Delphi Decision Maker': '專案群組決策',
'Demographic': '人口統計學',
'Demonstrations': '示範',
'Dental Examination': '牙齒檢查',
'Dental Profile': '牙齒設定檔',
'Department/Unit Name': '部門/單元名稱',
'Deployment': '部署',
'Describe the condition of the roads to your hospital.': '描述條件的街道的醫院。',
"Describe the procedure which this record relates to (e.g. 'medical examination')": '描述程式此記錄的關系(例如, "醫學examination")',
'Description of Bin Type': '說明bin的類型',
'Description of Contacts': '說明的聯絡人',
'Description of defecation area': '說明的defecation區域',
'Description of drinking water source': '說明的"Drinking Water Protection"臨界值來源',
'Description of sanitary water source': '說明的衛生臨界值來源',
'Description of water source before the disaster': '說明的水來源前的災難',
'Description': '說明',
'Descriptive Text (e.g., Prose, etc)': '說明文字(例如, Prose等)',
'Designated for': '指定的',
'Desire to remain with family': '希望與家人同處',
'Destination': '目的地',
'Destroyed': '已毀損',
'Details field is required!': '詳細資料欄位是必要的!',
'Details': '詳細資料',
'Diaphragms, horizontal bracing': '膜片,水準支撐',
'Diarrhea among children under 5': '5歲以下兒童腹瀉',
'Dignitary Visit': '要人訪問',
'Dimensions of the storage bin. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '維度的儲存體bin。 輸入下列格式的一x二x三的寬度x深度x高度,然後選擇單位下拉清單。',
'Dimensions of the storage location. Input in the following format 1 x 2 x 3 for width x depth x height followed by choosing the unit from the drop down list.': '維度的儲存體位置。 輸入下列格式的一x二x三的寬度x深度x高度,然後選擇單位下拉清單。',
'Direction': '方向',
'Disabilities': '殘障人士',
'Disable': '停用',
'Disabled participating in coping activities': '殘疾人士參與應對活動',
'Disabled': '已停用',
'Disabled?': '殘疾人士?',
'Disaster Victim Identification': '災民身份識別',
'Disaster Victim Registry': '災民登錄',
'Disaster clean-up/repairs': '災難up/repairs清除',
'Discharge (cusecs)': '放電(cusecs)',
'Discharges/24hrs': '放電/24hrs',
'Discussion Forum on item': '討論論壇上項目',
'Discussion Forum': '討論區',
'Disease vectors': '疾病向量',
'Diseases': '疾病',
'Dispatch Items': '分派項目',
'Dispatch': '分派',
'Displaced Populations': '移離個體群',
'Displaced': '移離',
'Display Polygons?': '顯示多邊形?',
'Display Routes?': '顯示路由?',
'Display Tracks?': '顯示追蹤?',
'Display Waypoints?': '顯示路逕點?',
'Dispose Expired/Unusable Items': '處置過期/無法使用的項目',
'Dispose': '處置',
'Distance between defecation area and water source': '距離defecation區域和臨界值來源',
'Distance between latrines and temporary shelter in meters': '距離latrines及暫時shelter以公尺為單位',
'Distance between shelter and latrines': '距離shelter和latrines',
'Distance from %s:': '距離%s:',
'Distance(Kms)': '距離(Kms)',
'Distribution Details': '配送明細',
'Distribution Item Details': '分配項目詳細資料',
'Distribution Item added': '分配項目新增',
'Distribution Item deleted': '分配項目刪除',
'Distribution Item updated': '配送更新項目',
'Distribution Item': '項目分配',
'Distribution Items': '項目分配',
'Distribution added': '配送新增',
'Distribution deleted': '刪除分配',
'Distribution groups': '收件群組',
'Distribution updated': '配送更新',
'Distribution': '發行套件',
'Distributions': '分配',
'District': 'district',
'Do adolescent and youth in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '請adolescent和泉您社群中參與活動,協助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)',
'Do households each have at least 2 containers (10-20 litres each) to hold water?': '每個家庭至少有兩個儲水器(每個10-20公升)儲水嗎?',
'Do households have appropriate equipment and materials to cook their food (stove, pots, dished plates, and a mug/drinking vessel, etc)?': '每個家庭是否有適當的烹調設備和材料來煮食(爐,壺,盤,碟,杯等)?',
'Do households have bedding materials available (tarps, plastic mats, blankets)?': '每個家庭是否有被鋪(防水布,塑膠墊子,毯子)?',
'Do households have household water storage containers?': '每個家庭是否有儲水器?',
'Do minority members in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '做生意您社群中成員參與活動,協助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)',
'Do older people in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '舊做您社群中的人員參與活動,協助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)',
'Do people have at least 2 full sets of clothing (shirts, pants/sarong, underwear)?': '人們是否有至少2套完整的服裝(襯衫,褲子/紗籠,內衣)?',
'Do people have reliable access to sufficient sanitation/hygiene items (bathing soap, laundry soap, shampoo, toothpaste and toothbrush)?': '人們是否能可靠地獲得足夠的衛生/衛生用品(沐浴香皂,洗衣皁,洗髮水,牙膏和牙刷)?',
'Do people with disabilities in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '請殘障人士您社群中參與活動,協助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)',
'Do women and girls have easy access to sanitary materials?': '婦女做和女孩輕鬆存取衛生資料嗎?',
'Do women in your community participate in activities that help them cope with the disaster? (ex. meetings, religious activities, volunteer in the community clean-up, etc)': '婦女做您社群中參與活動,協助他們處理災難? (例如, 會議,宗教活動,主動社群中的清除,等等)',
'Do you have access to cash to restart your business?': '您有權存取現金重新啟動您的業務?',
'Do you know of any incidents of violence?': '您知道的任何事件的暴力嗎?',
'Do you know of children living on their own (without adults)?': '您知道子項使用者在自己的(不含成人)?',
'Do you know of children separated from their parents or caregivers?': '您知道子項分開母項或caregivers嗎?',
'Do you know of children that have been orphaned by the disaster?': '您知道子項已遺留的災難?',
'Do you know of children that have been sent to safe places?': '您知道子項的已傳送至安全工作區嗎?',
'Do you know of children that have disappeared without explanation in the period since the disaster?': '您知道子項的消失而無任何說明在此期間,因為災難?',
'Do you know of older people who are primary caregivers of children?': '您知道舊的人員是主要caregivers的子項嗎?',
'Do you know of parents/caregivers missing children?': '您知道的母項/caregivers遺漏子項嗎?',
'Do you really want to delete these records?': '您確定要刪除這些記錄嗎?',
'Do you want to cancel this received shipment? The items will be removed from the Inventory. This action CANNOT be undone!': '您要取消此接收出貨? 項目將從庫存。 這個動作無法復原!',
'Do you want to cancel this sent shipment? The items will be returned to the Inventory. This action CANNOT be undone!': '您要取消此傳送出貨? 項目將回到庫存。 這個動作無法復原!',
'Do you want to over-write the file metadata with new default values?': '您要改寫檔案寫入meta資料與新的預設值嗎?',
'Do you want to receive this shipment?': '您要接收此出貨?',
'Do you want to send these Committed items?': '您要傳送這些已確定的項目嗎?',
'Do you want to send this shipment?': '您要傳送此出貨?',
'Document Details': '文件詳細資料',
'Document Library': '文件庫',
'Document Scan': '文件掃描',
'Document added': '新增文件',
'Document deleted': '文件已刪除',
'Document updated': '已更新的檔',
'Document': '文件',
'Documents and Photos': '檔和照片',
'Documents': '文件',
'Does this facility provide a cholera treatment center?': '該設施是否提供霍亂治療中心?',
'Doing nothing (no structured activity)': '什麼都不做(沒有結構化的活動)',
'Dollars': '美元',
'Domain': '網域',
'Domestic chores': '家務',
'Donated': '已捐贈',
'Donation Certificate': '捐贈證書',
'Donation Phone #': '捐贈電話號碼',
'Donor Details': 'Donor詳細資料',
'Donor added': '新增Donor',
'Donor deleted': '刪除Donor',
'Donor updated': '已更新捐贈者',
'Donors Report': 'Donors報告',
'Door frame': '門框架',
'Download PDF': '下載 PDF',
'Draft Features': '草稿功能',
'Draft': '草稿',
'Drainage': '排水',
'Drawing up a Budget for Staff & Equipment across various Locations.': '一個繪圖預算人員和設備各位置。',
'Drill Down by Group': '展開依群組',
'Drill Down by Incident': '展開事件',
'Drill Down by Shelter': '往下探查來Shelter',
'Driving License': '駕照',
'Drugs': '藥物',
'Dug Well': '挖出以及',
'Duplicate?': '重複?',
'Duration': '持續時間',
'Dust Storm': '暴雨灰塵',
'Dwelling': '住宅',
'EMS Reason': 'EMS原因',
'EMS Status Reason': 'EMS狀態原因',
'EMS Status': 'EMS狀態',
'EMS Traffic Status': 'EMS狀態傳輸',
'ER Status Reason': 'ER狀態原因',
'ER Status': 'ER狀態',
'Early Recovery': '早期回復',
'Earthquake': '地震',
'Easy access to sanitation items for women/girls': '容易存取設施的項目婦女/女孩',
'Edit Activity': '編輯活動',
'Edit Address': '編輯地址',
'Edit Aid Request': '編輯輔助請求',
'Edit Alternative Item': '編輯替代項目',
'Edit Application': '編輯應用程式',
'Edit Assessment Summary': '編輯評量摘要',
'Edit Assessment': '編輯評量',
'Edit Asset Assignment': '編輯資產分派',
'Edit Asset Log Entry': '編輯資產日誌項目',
'Edit Asset': '編輯資產',
'Edit Baseline Type': '編輯基準線類型',
'Edit Baseline': '編輯基準線',
'Edit Brand': '編輯品牌',
'Edit Budget': '編輯預算',
'Edit Bundle': '編輯軟體組',
'Edit Camp Service': 'Camp編輯服務',
'Edit Camp Type': '編輯營式',
'Edit Camp': '編輯Camp',
'Edit Catalog Item': '編輯型錄項目',
'Edit Catalog': '編輯目錄',
'Edit Category<>Sub-Category<>Catalog Relation': '編輯Category<>Sub-Category<>Catalog關系',
'Edit Certificate': '編輯證書',
'Edit Certification': '編輯認證',
'Edit Cluster Subsector': '編輯叢集Subsector',
'Edit Cluster': '編輯叢集',
'Edit Commitment Item': '編輯承諾項目',
'Edit Commitment': '編輯承諾',
'Edit Competency Rating': '編輯能力分級',
'Edit Competency': '編輯能力',
'Edit Config': '編輯配置',
'Edit Contact Information': '編輯聯絡資訊',
'Edit Contact': '編輯聯絡人',
'Edit Contents': '編輯內容',
'Edit Course Certicate': '編輯課程證書',
'Edit Course': '編輯課程',
'Edit Credential': '編輯認證',
'Edit Dead Body Details': '編輯停用主體詳細資料',
'Edit Defaults': '編輯預設值',
'Edit Description': '編輯說明',
'Edit Details': '編輯詳細資料',
'Edit Disaster Victims': '編輯災難受害者',
'Edit Distribution Item': '編輯項目分配',
'Edit Distribution': '編輯配送',
'Edit Document': '編輯文件',
'Edit Donor': '編輯Donor',
'Edit Email Settings': '編輯電子郵件設定',
'Edit Entry': '編輯條目',
'Edit Event': '編輯事件',
'Edit Facility': '編輯設備',
'Edit Feature Layer': '編輯功能層',
'Edit Flood Report': '水災編輯報告',
'Edit Gateway Settings': '編輯設定閘道',
'Edit Group': '編輯群組',
'Edit Hospital': '編輯醫院',
'Edit Human Resource': '編輯人力資源',
'Edit Identification Report': '編輯識別報告',
'Edit Identity': '編輯身分',
'Edit Image Details': '編輯映射檔詳細資料',
'Edit Image': '編輯影像',
'Edit Impact Type': '編輯影響類型',
'Edit Impact': '編輯影響',
'Edit Incident Report': '編輯事件報告',
'Edit Incident': '編輯事件',
'Edit Inventory Item': '編輯庫存項目',
'Edit Inventory Location': '編輯庫存位置',
'Edit Inventory Store': '編輯配備盤點儲存',
'Edit Item Catalog Categories': '編輯型錄種類項目',
'Edit Item Catalog': '編輯項目型錄',
'Edit Item Category': '編輯項目種類',
'Edit Item Pack': '編輯項目套件',
'Edit Item Sub-Categories': '編輯項目子種類',
'Edit Item': '編輯項目',
'Edit Job Role': '編輯工作角色',
'Edit Key': '編輯索引鍵',
'Edit Kit': '編輯套件',
'Edit Landmark': '編輯里程碑',
'Edit Layer': '編輯層',
'Edit Level %d Locations?': '編輯層次%d位置?',
'Edit Level 1 Assessment': '編輯層次一評量',
'Edit Level 2 Assessment': '編輯層次二評量',
'Edit Location': '編輯位置',
'Edit Log Entry': '編輯日誌項目',
'Edit Map Profile': '編輯對映配置',
'Edit Map Services': '編輯對映服務',
'Edit Marker': '編輯標記',
'Edit Membership': '編輯成員資格',
'Edit Message': '編輯訊息',
'Edit Messaging Settings': '編輯傳訊設定',
'Edit Metadata': '編輯 meta 資料',
'Edit Mission': '編輯任務',
'Edit Modem Settings': '編輯數據機設定',
'Edit Need Type': '需要編輯類型',
'Edit Need': '需要編輯',
'Edit Note': '編輯附註',
'Edit Office': '編輯辦公室',
'Edit Options': '編輯選項',
'Edit Organization': '編輯組織',
'Edit Parameters': '編輯參數',
'Edit Partner': '編輯夥伴',
'Edit Peer Details': '編輯層級詳細資料',
'Edit Peer': '編輯同層級',
'Edit Person Details': '編輯人員詳細資料',
'Edit Personal Effects Details': '編輯個人效果詳細資料',
'Edit Photo': '編輯照片',
'Edit Pledge': '編輯質押',
'Edit Population Statistic': '編輯人口統計資料',
'Edit Position': '編輯位置',
'Edit Problem': '編輯問題',
'Edit Project': '編輯專案',
'Edit Projection': '編輯投射',
'Edit Rapid Assessment': '編輯快速評量',
'Edit Received Item': '編輯接收項目',
'Edit Received Shipment': '編輯收到出貨',
'Edit Record': '編輯記錄',
'Edit Recovery Details': '編輯回復明細',
'Edit Registration Details': '編輯登錄詳細資料',
'Edit Registration': '編輯登錄',
'Edit Report': '編輯報告',
'Edit Request Item': '編輯要求項目',
'Edit Request': '編輯要求',
'Edit Resource': '編輯資源',
'Edit Response': '編輯回應',
'Edit River': '編輯金水河',
'Edit Role': '編輯角色',
'Edit Room': '編輯室',
'Edit Scenario': '編輯範例情節',
'Edit School District': '編輯學校特區',
'Edit School Report': '編輯學校報告',
'Edit Section': '編輯區段',
'Edit Sector': '編輯磁區',
'Edit Sent Item': '傳送編輯項目',
'Edit Setting': '編輯設定',
'Edit Settings': '編輯設定',
'Edit Shelter Service': '編輯Shelter服務',
'Edit Shelter Type': '編輯Shelter類型',
'Edit Shelter': '編輯Shelter',
'Edit Shipment Transit Log': '編輯出貨傳輸日誌',
'Edit Shipment to Send': '編輯出貨以傳送',
'Edit Shipment/Way Bills': '出貨/編輯方式賬單',
'Edit Shipment<>Item Relation': '編輯Shipment<>Item關系',
'Edit Site': '編輯網站',
'Edit Skill Equivalence': '編輯等值技能',
'Edit Skill Provision': '編輯技術供應',
'Edit Skill Type': '編輯技術類型',
'Edit Skill': '編輯技術',
'Edit Solution': '編輯解決方案',
'Edit Source': '編輯原始碼',
'Edit Staff Type': '編輯人員類型',
'Edit Staff': '編輯人員',
'Edit Storage Bin Type(s)': '編輯儲存體bin類型(S)',
'Edit Storage Bins': '編輯儲存體紙匣',
'Edit Storage Location': '編輯儲存體位置',
'Edit Subscription': '編輯訂閱',
'Edit Subsector': '編輯Subsector',
'Edit Survey Answer': '編輯調查回答',
'Edit Survey Question': '編輯調查問題',
'Edit Survey Section': '編輯調查區段',
'Edit Survey Series': '編輯調查系列',
'Edit Survey Template': '編輯調查範本',
'Edit Sync Settings': '編輯同步設定',
'Edit Task': '編輯作業',
'Edit Team': '編輯團隊',
'Edit Theme': '編輯佈景主題',
'Edit Themes': '編輯佈景主題',
'Edit Ticket': '編輯單',
'Edit Track': '編輯追蹤',
'Edit Training': '編輯培訓',
'Edit Tropo Settings': '編輯Tropo設定',
'Edit Unit': '編輯單元',
'Edit Update': '編輯更新',
'Edit User': '編輯使用者',
'Edit Volunteer Availability': '編輯自願可用性',
'Edit Volunteer Details': '編輯自願詳細資料',
'Edit Volunteer Registration': '編輯自願登錄',
'Edit Warehouse': '編輯倉儲',
'Edit current record': '編輯現行記錄',
'Edit message': '編輯訊息',
'Edit the Application': '編輯應用程式',
'Edit': '編輯',
'Editable?': '可編輯?',
'Education materials received': '教育材料接收',
'Education materials, source': '教育材料,來源',
'Education': '教育',
'Effects Inventory': '效果庫存',
'Either a shelter or a location must be specified': '一shelter或位置必須指定',
'Either file upload or document URL required.': '可能是檔案上傳或檔URL是必要的。',
'Either file upload or image URL required.': '可能是檔案上傳或影像URL是必要的。',
'Elderly person headed households (>60 yrs)': '人員年長者家庭頭(>60年期)',
'Electrical': '電子',
'Electrical, gas, sewerage, water, hazmats': '電力,瓦斯, sewerage,水分, hazmats',
'Electricity': '靜電',
'Elevated': '高專用權',
'Elevators': '升降機',
'Email Address': '電子郵件位址',
'Email Settings': '電子郵件設定',
'Email address verified, however registration is still pending approval - please wait until confirmation received.': '電子郵件位址驗證,但是登錄仍在擱置核准-請稍候直到收到確認。',
'Email settings updated': '更新電子郵件設定',
'Email': '電子郵件',
'Embassy': '大使館',
'Emergency Capacity Building project': '緊急容量建置專案',
'Emergency Department': '緊急部門',
'Emergency Shelter': '緊急Shelter',
'Emergency Support Facility': '緊急支援機能',
'Emergency Support Service': '緊急服務支援',
'Emergency Telecommunications': '電信緊急',
'Enable/Disable Layers': '啟用/停用層',
'Enabled': '已啟用',
'Enabled?': '已啟用?',
'End Date': '結束日期',
'End date should be after start date': '結束日期應該晚於開始日期',
'End date': '結束日期',
'End of Period': '結束的期間',
'English': '英文',
'Enter Coordinates in Deg Min Sec': '以度分秒的格式輸入座標值',
'Enter Coordinates:': '輸入座標:',
'Enter a GPS Coord': '輸入一個GPS協調',
'Enter a date before': '輸入一個日期之前',
'Enter a name for the spreadsheet you are uploading (mandatory).': '輸入一個您上傳的電子表格的名稱(強制)。',
'Enter a new support request.': '輸入一個新的援助申請。',
'Enter a summary of the request here.': '在這裡輸入申請摘要。',
'Enter a unique label!': '輸入獨一無二的標籤!',
'Enter a valid date before': '輸入一個有效的日期前',
'Enter a valid email': '輸入一個有效的電子郵件位址',
'Enter a valid future date': '輸入一個有效的未來日期',
'Enter some characters to bring up a list of possible matches': '輸入部分字元以啟動清單的可能的相符項',
'Enter some characters to bring up a list of possible matches.': '輸入部分字元以啟動清單的可能的相符項。',
'Enter tags separated by commas.': '輸入以逗點區隔的標籤。',
'Enter the same password as above': '輸入與上面相同的密碼',
'Enter your firstname': '輸入你的名字',
'Entered': '已輸入',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': '輸入一個電話號碼是選用的,但這樣做可讓您訂閱以收到SMS訊息。',
'Entry deleted': '已刪除項目',
'Environment': '環境',
'Equipment': '設備',
'Error encountered while applying the theme.': '發生錯誤時的主題。',
'Error in message': '錯誤訊息中',
"Error logs for '%(app)s'": '錯誤日誌的 "%(app)s"',
'Errors': '錯誤',
'Est. Delivery Date': 'EST。 交付日期',
'Estimated # of households who are affected by the emergency': '估計的#家庭誰受到緊急',
'Estimated # of people who are affected by the emergency': '估計數目的人員所影響的緊急',
'Estimated Overall Building Damage': '估計整體建置損壞',
'Estimated total number of people in institutions': '估計總數中的人員機構',
'Euros': '歐元',
'Evacuation': '撤離',
'Evaluate the information in this message. (This value SHOULD NOT be used in public warning applications.)': '評估此訊息中的資訊。 (這個值不應該用於公開警告應用程式。 )',
'Event Details': '事件詳細資料',
'Event Time': '事件時間',
'Event Type': '事件類型',
'Event added': '新增事件',
'Event deleted': '刪除事件',
'Event type': '事件類型',
'Event updated': '更新事件',
'Event': '事件',
'Events': '事件',
'Example': '範例',
'Exceeded': '已超出',
'Excellent': '絕佳',
'Exclude contents': '排除內容',
'Excreta disposal': 'Excreta處置',
'Execute a pre-planned activity identified in <instruction>': '執行預先計畫中所識別的活動<instruction>',
'Exercise': '練習',
'Exercise?': '練習?',
'Exercises mean all screens have a watermark & all notifications have a prefix.': '練習表示所有畫面都具有一個浮水印和所有通知有一個字首。',
'Existing Placard Type': '現有Placard類型',
'Existing food stocks': '現有食品股票',
'Existing food stocks, main dishes': '現有的食物股票,主要餐盤',
'Existing food stocks, side dishes': '現有的食物股票,端餐盤',
'Existing location cannot be converted into a group.': '現有的位置無法轉換成一個群組。',
'Exits': '結束程式',
'Expected In': '預期中',
'Expected Out': '預期輸出',
'Experience': '經驗',
'Expiry Date': '到期日期',
'Expiry Time': '期限時間',
'Explosive Hazard': '爆炸性危害',
'Export Data': '匯出資料',
'Export Database as CSV': '資料庫匯出成CSV',
'Export in GPX format': '匯出中GPX格式',
'Export in KML format': '匯出中KML格式',
'Export in OSM format': '匯出中OSM格式',
'Export in PDF format': '匯出為PDF檔',
'Export in RSS format': '匯出為RSS格式',
'Export in XLS format': '匯出為XLS檔',
'Export': '匯出',
'Exterior Only': '僅外景',
'Exterior and Interior': '外部和內部',
'External Features': '外部特性',
'Eye Color': '眼睛顏色',
'Facebook': '臉書',
'Facial hair, color': 'Facial頭髮,顏色',
'Facial hair, type': 'Facial頭髮,類型',
'Facial hear, length': 'Facial聽,長度',
'Facilities': '設備',
'Facility Details': '機能詳細資料',
'Facility Operations': '設施營運',
'Facility Status': '機能狀態',
'Facility Type': '機能類型',
'Facility added': '新增機能',
'Facility or Location': '設備或位置',
'Facility removed': '移除機能',
'Facility updated': '機能更新',
'Facility': '機能',
'Factors affecting school attendance': '因素影響學校與會者',
'Fail': '失敗',
'Failed!': '失敗!',
'Fair': '普通',
'Falling Object Hazard': '落在物件危害',
'Families/HH': '系列/hh',
'Family Care': '系列Care',
'Family tarpaulins received': 'tarpaulins收到系列',
'Family tarpaulins, source': '系列tarpaulins,來源',
'Family': '家庭',
'Family/friends': '系列/朋友',
'Farmland/fishing material assistance, Rank': 'Farmland/釣魚物料幫助,等級',
'Fax': '傳真',
'Feature Layer Details': '功能層詳細資料',
'Feature Layer added': '功能層新增',
'Feature Layer deleted': '功能刪除層',
'Feature Layer updated': '功能更新層',
'Feature Layers': '功能層',
'Feature Namespace': '特性名稱空間',
'Feature Request': '功能要求',
'Feature Type': '功能類型',
'Feature': '特性 (feature)',
'Features Include': '功能包括',
'Female headed households': '女性頭家庭',
'Female': '女性',
'Few': '幾',
'Field Hospital': '欄位醫院',
'Field': '欄位',
'Fields tagged with a star': '標記星號',
'File': '檔案',
'Fill in Latitude': '填寫緯度',
'Fill in Longitude': '填寫經度',
'Filter Field': '過濾欄位',
'Filter Value': '過濾器值',
'Filter': '過濾器',
'Filtered search of aid pledges and requests': '過濾搜尋的輔助抵押和要求',
'Find Dead Body Report': '尋找傳送主體報告',
'Find Hospital': '尋找醫院',
'Find Person Record': '尋找人員記錄',
'Find Recovery Report': '尋找恢復報告',
'Find Volunteers': '尋找志願者',
'Find a Person Record': '尋找一個人員記錄',
'Find by Name': '依名稱搜尋',
'Find': '尋找',
'Finder': '搜尋器',
'Fingerprint': '指紋',
'Fingerprinting': '產生指紋',
'Fingerprints': '指紋',
'Finish': '完成',
'Finished Jobs': '完成工作',
'Fire suppression and rescue': '滅火和救援',
'Fire': '發動',
'First Name': '名',
'First name': '名',
'Fishing': '打撈',
'Flash Flood': 'Flash水災',
'Flash Freeze': 'Flash凍結',
'Fleet Management': '車隊管理',
'Flexible Impact Assessments': '彈性評量影響',
'Flood Alerts show water levels in various parts of the country': '水災顯示警示臨界值層次的各個部分的國家',
'Flood Alerts': '水災警示',
'Flood Report Details': '水災報告詳細資料',
'Flood Report added': '水災新增報告',
'Flood Report deleted': '水災報告刪除',
'Flood Report updated': '水災報告更新',
'Flood Report': '水災報告',
'Flood Reports': '水災報告',
'Flood': '填滿',
'Flow Status': '流程狀態',
'Focal Point': '中控系統',
'Food Supply': '食品供應',
'Food assistance available/expected': '預期可提供食品援助',
'Food assistance': '協助食品',
'Food': '食物',
'Footer file %s missing!': '缺少頁腳文件 %s!',
'Footer': '頁腳',
'For Eden instances enter the application base URL, e.g. http://sync.sahanfoundation.org/eden, for other peers the URL of the synchronization interface.': '的Eden實例輸入應用程式基本URL,例如, http://sync.sahanfoundation.org/eden,其他同層級的URL的同步化介面。',
'For POP-3 this is usually 110 (995 for SSL), for IMAP this is usually 143 (993 for IMAP).': '為彈出三通常是110 (995用於SSL),對於IMAP,這通常是143 (993為IMAP)。',
'For Warehouse': '倉儲',
'For a country this would be the ISO2 code, for a Town, it would be the Airport Locode.': '國家的,這將是ISO2代碼,一個城市,它將是機場的Locode。',
'For each sync partner, there is a default sync job that runs after a specified interval of time. You can also set up more sync jobs which could be customized on your needs. Click the link on the right to get started.': '每個同步夥伴,有一個預設同步執行的工作在指定的時間間隔。 您也可以設定更多同步工作可上自訂您的需求。 上的鏈結,按一下滑鼠右鍵來開始。',
'For enhanced security, you are recommended to enter a username and password, and notify administrators of other machines in your organization to add this username and password against your UUID in Synchronization -> Sync Partners': '為加強安全,建議您輸入使用者名稱和密碼,並通知其他機器的管理者組織中的新增這個使用者名稱和密碼對UUID同步->同步夥伴',
'For live help from the Sahana community on using this application, go to': '想要從 Sahana 社群取得使用方面的線上幫助,請前往',
'For messages that support alert network internal functions': '支援的訊息警示網路內部函數',
'For more details on the Sahana Eden system, see the': '更多關於 Sahana Eden 系統的資訊,請見',
'For more information, see': '想要瞭解更多資訊,請見',
'For': '適用於 的',
'Forest Fire': '樹系發動',
'Formal camp': '正式camp',
'Format': '格式',
'Forms': '表單',
'Found': '找到',
'Freezing Drizzle': '凍結毛毛雨',
'Freezing Rain': '凍結雨',
'Freezing Spray': '凍結噴灑',
'French': '法文',
'Friday': '星期五',
'From Inventory': '從庫存',
'From Location': '起點位置',
'From Organization': '來源組織',
'From Person': '從人員',
'From Warehouse': '從倉儲',
'From': '開始',
'Frost': 'frost',
'Fuel': '燃料',
'Fulfil. Status': '滿足。 狀態',
'Fulfillment Status': '供貨狀態',
'Full beard': '完整beard',
'Full': '滿載',
'Fullscreen Map': '全螢幕對映',
'Functional Tests': '功能測試',
'Functions available': '可用的函數',
'Funding Organization': '資金組織',
'Funeral': '喪葬',
'Further Action Recommended': '建議進一步的動作',
'GIS Reports of Shelter': '住房的地理資訊系統報告',
'GIS integration to view location details of the Shelter': '地理資訊系統集成查看住房的詳細位置介紹',
'GPS Marker': 'GPS標記',
'GPS Track File': 'GPS追蹤檔案',
'GPS Track': 'GPS跟蹤',
'GPX Track': 'GPX跟蹤',
'GRN Status': 'GRN狀態',
'Gale Wind': 'Gale wind',
'Gap Analysis Map': '差距分析對映',
'Gap Analysis Report': '差異分析報告',
'Gap Analysis': '間隙分析',
'Gap Map': '對映間隙',
'Gap Report': '報告間隙',
'Gateway Settings': '設定閘道',
'Gateway settings updated': '閘道設定更新',
'Gateway': '閘道',
'Gender': '性別',
'General Comment': '一般評論',
'General Medical/Surgical': '一般醫學/Surgical',
'General emergency and public safety': '緊急一般和公共安全',
'General information on demographics': '個人背景資訊的一般資訊',
'General': '一般',
'Generator': '產生者',
'Geocode': '地理',
'Geocoder Selection': '選擇地理編碼程式',
'Geometry Name': '幾何形狀名稱',
'Geonames.org search requires Internet connectivity!': 'Geonames.org搜尋需要網際網路連線功能!',
'Geophysical (inc. landslide)': 'Geophysical (收入。 landslide)',
'Geotechnical Hazards': 'Geotechnical危害',
'Geraldo module not available within the running Python - this needs installing for PDF output!': 'Geraldo模組內無法使用執行中的Python-這需要安裝PDF輸出!',
'Get incoming recovery requests as RSS feed': '取得送入的回復要求為RSS訊息饋送',
'Girls 13-18 yrs in affected area': '在受影響地區的13-18歲女孩',
'Girls 13-18 yrs not attending school': '不上學的13-18歲女孩',
'Girls 6-12 yrs in affected area': '在受影響地區的6-12歲女童',
'Girls 6-12 yrs not attending school': '不上學的6-12歲女童',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': '提供圖像的簡要描述,例如圖片的什麼地方可以看到什麼(可選)。',
'Give information about where and when you have seen the person': '提供位置資訊,當您已經看到了人員',
'Give information about where and when you have seen them': '提供位置資訊,當您已經看到了它們',
'Global Messaging Settings': '廣域傳訊設定',
'Go to Request': '跳至要求',
'Go': '執行',
'Good Condition': '狀湟良好',
'Good': '良好',
'Goods Received Note': '貨物收到附註',
'Government UID': '政府UID',
'Government building': '政府建築物',
'Government': '政府機關',
'Grade': '等級',
'Greek': '希臘文',
'Green': '綠色',
'Ground movement, fissures': '移動,接地fissures',
'Ground movement, settlement, slips': '移動接地,結算,跌倒而',
'Group %(group_id)s created': '群組 %(group_id)s 建立',
'Group Description': '群組說明',
'Group Details': '群組詳細資料',
'Group ID': '群組編號',
'Group Member added': '群組成員已新增',
'Group Members': '群組成員',
'Group Memberships': '加入群組',
'Group Name': '群組名稱',
'Group Title': '群組標題',
'Group Type': '群組類別',
'Group added': '群組已新增',
'Group deleted': '群組已刪除',
'Group description': '群組說明',
'Group name': '群組名稱',
'Group type': '群組類別',
'Group updated': '群組已更新',
'Group': '群組',
'Groups removed': '群組已刪除',
'Groups': '群組',
'Guest': '訪客',
'HR Manager': 'HR管理員',
'Hail': '冰雹',
'Hair Color': '頭髮顏色',
'Hair Length': '頭髮長度',
'Hair Style': '十字準線樣式',
'Has additional rights to modify records relating to this Organization or Site.': '有其他權限,以修改記錄相關的組織或站點。',
'Has data from this Reference Document been entered into Sahana?': '具有資料從這個參考檔被輸入Sahana嗎?',
'Has only read-only access to records relating to this Organization or Site.': '只有唯讀存取記錄相關的組織或站點。',
'Has the Certificate for receipt of the shipment been given to the sender?': '憑證已接收貨物的已被指定給寄件者?',
'Has the GRN (Goods Received Note) been completed?': '具有GRN (商品接收附註)已完成?',
'Has the safety and security of women and children in your community changed since the emergency?': '具有安全的女性和子項您社群中變更的緊急嗎?',
'Has your business been damaged in the course of the disaster?': '您的業務有損壞的過程中的災難?',
'Have households received any shelter/NFI assistance or is assistance expected in the coming days?': '已收到任何家庭shelter/NFI輔助或協助預期在未來的天?',
'Have normal food sources been disrupted?': '正常食品來源已中斷?',
'Have schools received or are expecting to receive any assistance?': '學校有接收或預期接收任何幫助嗎?',
'Have the people received or are you expecting any medical or food assistance in the coming days?': '具有人員收到了嗎?或者您預期任何醫學或食品協助在未來的天?',
'Hazard Pay': '危害支付',
'Hazard': '危害',
'Hazardous Material': '危害性物料',
'Hazardous Road Conditions': '危險道路條件',
'Header Background': '標頭背景',
'Header background file %s missing!': '標頭背景檔案%!',
'Headquarters': '總公司',
'Health care assistance, Rank': '醫療協助,等級',
'Health center with beds': '使用"健康中心" beds',
'Health center without beds': '健康中心而不beds',
'Health center': '性能檢測中心',
'Health services functioning prior to disaster': '健康服務運作之前,災難',
'Health services functioning since disaster': '健康服務運作,因為災難',
'Health services status': '服務性能狀態',
'Health': '性能狀態',
'Healthcare Worker': '醫療保健工作者',
'Heat Wave': 'Wave散熱器',
'Heat and Humidity': '散熱器和濕度',
'Height (cm)': '高度(公分)',
'Height (m)': '高度(公尺)',
'Height': '高度',
'Help': '說明',
'Helps to monitor status of hospitals': '有助於監視狀態的醫院',
'Helps to report and search for Missing Persons': '關於查詢與登錄災民的使用說明',
'Helps to report and search for missing persons': '關於查詢與登錄災民的使用說明',
'Here are the solution items related to the problem.': '以下是解決方案項目相關的問題。',
'Here you will find all synchronization attempts made by either your machine or foreign machines for data exchange. This also lists data exchanges made using Sahana API.': '在這裡,您將尋找所有的同步化嘗試,或您的機器或外部機器以交換資料。 這也會列出資料交換所使用Sahana API。',
'Heritage Listed': '遺產列出',
'Hierarchy Level 0 Name (i.e. Country)': '層次〇的姓名(例如,國家)',
'Hierarchy Level 1 Name (e.g. State or Province)': '層次一名稱(例如,州或省)',
'Hierarchy Level 2 Name (e.g. District or County)': '層次二名稱(例如,地區或縣)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': '層次三名稱(例如,城市/鄉鎮/村落)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': '層次四名稱(例如, Neighbourhood)',
'Hierarchy Level 5 Name': '階層層次五名稱',
'High Water': '高臨界值',
'High': '高',
'Hindi': '北印度文',
'Hindu': '印度教',
'History': '歷程',
'Hit the back button on your browser to try again.': '命中"上一頁"按鈕將您的瀏覽器然後再試一次。',
'Holiday Address': '假日位址',
'Home Address': '住家地址',
'Home Country': '住家所在國家或地區',
'Home Crime': '家庭犯罪',
'Home': '首頁',
'Hospital Details': '醫院詳細資料',
'Hospital Status Report': '醫院狀態報告',
'Hospital information added': '醫院資訊新增',
'Hospital information deleted': '醫院資訊刪除',
'Hospital information updated': '醫院資訊更新',
'Hospital status assessment.': '醫院狀態評量。',
'Hospital': '醫院',
'Hospitals': '醫院',
'Hot Spot': '熱點',
'Hour': '小時',
'Hourly': '每小時',
'Hours': '時數',
'Household kits received': '家庭套件接收',
'Household kits, source': '家庭套件,來源',
'How did boys 13-17yrs spend most of their time prior to the disaster?': '災難之前13到17歲的男孩是如何花費大部分的時間?',
'How did boys <12yrs spend most of their time prior to the disaster?': '災難之前小於12歲的男孩是如何花費大部分的時間?',
'How did boys girls 13-17yrs spend most of their time prior to the disaster?': '災難之前13到17歲的男孩女孩是如何花費大部分的時間?',
'How did girls <12yrs spend most of their time prior to the disaster?': '災難之前小於12歲的女孩是如何花費大部分的時間?',
'How do boys 13-17yrs spend most of their time now?': '13-17歲的男孩怎樣利用他們大部分的時間?',
'How do boys <12yrs spend most of their time now?': '現在小於12歲的男孩是如何花費大部分的時間?',
'How do girls 13-17yrs spend most of their time now?': '13-17歲的女孩如何利用她們大部分的時間?',
'How do girls <12yrs spend most of their time now?': '少於12歲的女孩如何利用她們大部分的時間?',
'How does it work?': '運作方式?',
'How is this person affected by the disaster? (Select all that apply)': '這是如何影響的人員災難? (請選取所有適用項目)',
'How long does it take you to reach the available water resources? Specify the time required to go there and back, including queuing time, by foot.': '多久需要您到可用的水資源嗎? 指定所需的時間,前往"和"反面",包括佇列時間,以英尺。',
'How long does it take you to walk to the health service?': '您需要多久走到健康服務?',
'How long will the food last?': '如何將長的食物最後嗎?',
'How long will this water resource last?': '多久將此臨界值的資源?',
'How many Boys (0-17 yrs) are Dead due to the crisis': '多少男孩(〇-17年期)被停用,因為危機',
'How many Boys (0-17 yrs) are Injured due to the crisis': '多少男孩(〇-17年期的傷害,因為危機',
'How many Boys (0-17 yrs) are Missing due to the crisis': '多少男孩(〇-17年期)丟失了,因為危機',
'How many Girls (0-17 yrs) are Dead due to the crisis': '多少女孩(〇-17年期)被停用,因為危機',
'How many Girls (0-17 yrs) are Injured due to the crisis': '多少女孩(〇-17年期的傷害,因為危機',
'How many Girls (0-17 yrs) are Missing due to the crisis': '多少女孩(〇-17年期)丟失了,因為危機',
'How many Men (18 yrs+) are Dead due to the crisis': '有多少人(18 yrs+)被停用,因為危機',
'How many Men (18 yrs+) are Injured due to the crisis': '有多少人(18 yrs+)是可能由於危機',
'How many Men (18 yrs+) are Missing due to the crisis': '有多少人(18 yrs+)丟失了,因為危機',
'How many Women (18 yrs+) are Dead due to the crisis': '多少婦女(18 yrs+)被停用,因為危機',
'How many Women (18 yrs+) are Injured due to the crisis': '多少婦女(18 yrs+)是可能由於危機',
'How many Women (18 yrs+) are Missing due to the crisis': '多少婦女(18 yrs+)丟失了,因為危機',
'How many days will the supplies last?': '多少天會在提供最後一個嗎?',
'How many doctors in the health centers are still actively working?': '多少醫師在健康中心是仍然有效作用嗎?',
'How many houses are uninhabitable (uninhabitable = foundation and structure destroyed)?': '多少儲存是uninhabitable (uninhabitable =基礎和結構損毀)?',
'How many houses suffered damage but remain usable (usable = windows broken, cracks in walls, roof slightly damaged)?': '多少儲存遭到損壞,但保留可用(可用= Windows中斷,是否在牆面,屋脊略有損壞)?',
'How many latrines are available in the village/IDP centre/Camp?': '多少latrines中可用的村落/發展中心/Camp?',
'How many midwives in the health centers are still actively working?': '多少midwives在健康中心是仍然有效作用嗎?',
'How many new cases have been admitted to this facility in the past 24h?': '多少新案例已送入此機能在過去小時?',
'How many nurses in the health centers are still actively working?': '多少nurses在健康中心是仍然有效作用嗎?',
'How many of the patients with the disease died in the past 24h at this facility?': '多少的病患的疾病已在過去小時在這個機能"?',
'How many of the primary school age boys (6-12) in the area are not attending school?': '多少的主要學校經歷時間提升(六-12)的範圍內不參加學校嗎?',
'How many of the primary school age girls (6-12) in the area are not attending school?': '多少的主要學校時間女孩(六-12)的範圍內不參加學校嗎?',
'How many of the primary/secondary schools are now open and running a regular schedule of class?': '多少的主要/次要學校現在開啟及執行定期的類別嗎?',
'How many of the secondary school age boys (13-18) in the area are not attending school?': '多少的次要學校經歷時間提升(13-18)的範圍內不參加學校嗎?',
'How many of the secondary school age girls (13-18) in the area are not attending school?': '多少的次要學校時間女孩(13-18)的範圍內不參加學校嗎?',
'How many patients with the disease are currently hospitalized at this facility?': '多少病患的疾病目前hospitalized在這個機能"?',
'How many primary school age boys (6-12) are in the affected area?': '多少主要學校經歷時間提升" (六-12)的受影響區域嗎?',
'How many primary school age girls (6-12) are in the affected area?': '多少主要學校時間女孩" (六-12)的受影響區域嗎?',
'How many primary/secondary schools were opening prior to the disaster?': '多少主要/次要學校已開啟之前,災難?',
'How many secondary school age boys (13-18) are in the affected area?': '多少次要學校經歷時間提升" (13-18)的受影響區域嗎?',
'How many secondary school age girls (13-18) are in the affected area?': '多少次要學校時間女孩" (13-18)的受影響區域嗎?',
'How many teachers have been affected by the disaster (affected = unable to work)?': '多少教師已受影響的災難(分配=無法運作)?',
'How many teachers worked in the schools prior to the disaster?': '多少教師已經在學校之前,災難?',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': '多少明細會出現。 高的縮放比例表示很多的詳細程度,而不是整個區域。 較低的縮放比例表示看到整個區域,但不是一個高層次的詳細資料。',
'Human Resource Details': '人力資源詳細資料',
'Human Resource Management': '人力資源管理',
'Human Resource added': '新增人力資源',
'Human Resource removed': '移除人力資源',
'Human Resource updated': '人力資源更新',
'Human Resource': '人力資源',
'Human Resources Management': '人力資源管理',
'Human Resources': '人力資源部',
'Hurricane Force Wind': '颶風強制wind',
'Hurricane': '臺風',
'Hygiene kits received': 'Hygiene收到的套件',
'Hygiene kits, source': 'Hygiene套件,來源',
'Hygiene practice': 'Hygiene實務',
'Hygiene problems': 'Hygiene問題',
'I am available in the following area(s)': '我可以在下列區域(S)',
'ID Label': '識別編號或符號',
'ID Label:': '識別編號或符號:',
'ID Tag Number': 'ID標籤號碼',
'ID Tag': 'ID標籤',
'ID Type': 'Id 類型',
'Ice Pressure': 'ICE壓力',
'Iceberg': '冰',
'Ideally a full URL to the source file, otherwise just a note on where data came from.': '理想的完整URL的原始檔,否則只注意事項在資料來源。',
'Identification Report': '識別報告',
'Identification Reports': '識別報告',
'Identification Status': '識別狀態',
'Identification label of the Storage bin.': '識別標籤的儲存體bin。',
'Identification': '識別',
'Identified as': '識別為',
'Identified by': '識別由',
'Identity Details': '身分詳細資料',
'Identity added': '新增身分',
'Identity deleted': '刪除身分',
'Identity updated': '更新身分',
'Identity': '身分',
'If Staff have login accounts then they are given access to edit the details of the': '如果人員已登入帳戶,然後它們會被授予存取編輯的詳細資料',
'If Unit = m, Base Unit = Km, then multiplicator is 0.0001 since 1m = 0.001 km.': '如果單元= M,基本單元=裡,然後multiplicator是0.0001,因為" 1 = 0.001公里。',
'If a ticket was issued then please provide the Ticket ID.': '如果一個單發出,則請提供的摘記卷ID。',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': '如果使用者驗證它們自己的電子郵件位址與此網域,核准者欄位是用來判定是否由誰進一步核准是必要的。',
'If enabled then a log is maintained of all records a user accesses. If disabled then it can still be enabled on a per-module basis.': '如果啟用,則日誌是維護所有記錄的使用者存取。 如果停用,則仍然可以啟用每一個模組的基準。',
'If enabled then a log is maintained of all records a user edits. If disabled then it can still be enabled on a per-module basis.': '如果啟用,則日誌是維護所有記錄的使用者編輯。 如果停用,則仍然可以啟用每一個模組的基準。',
'If it is a URL leading to HTML, then this will downloaded.': '如果它是一個URL產生HTML,則會下載。',
'If neither are defined, then the Default Marker is used.': '如果未定義,則預設記號使用。',
'If no marker defined then the system default marker is used': '如果沒有標記定義則系統預設標記使用',
'If no, specify why': '如果沒有,請指定原因',
'If none are selected, then all are searched.': '如果沒有選取,則所有搜尋。',
'If the location is a geographic area, then state at what level here.': '如果位置是地理區域,然後狀態層次為何在這裡。',
'If the request type is "Other", please enter request details here.': '若要求的類型為"其他",請輸入此要求的詳細資料。',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': '如果這個欄位會移入,則的使用者指定的網域會自動被指派為人員的組織',
'If this is set to True then mails will be deleted from the server after downloading.': '如果這是設為true,那麼郵件將從伺服器中刪除下載之後。',
'If this record should be restricted then select which role is required to access the record here.': '如果此記錄應限制,然後選取的角色需要存取記錄在這裡。',
'If this record should be restricted then select which role(s) are permitted to access the record here.': '如果此記錄應限制,然後選取的角色允許存取記錄在這裡。',
'If yes, specify what and by whom': '如果為"是",指定什麼和由誰',
'If yes, which and how': '如果為"是",以及如何',
'If you do not enter a Reference Document, your email will be displayed to allow this data to be verified.': '如果您不輸入一個參照檔,您的電子郵件會顯示允許此資料將被驗證。',
'If you know what the Geonames ID of this location is then you can enter it here.': '如果您知道什麼的GeoNames的ID這個位置之後,您可以在這裡輸入它。',
'If you know what the OSM ID of this location is then you can enter it here.': '如果您知道什麼的系統ID的這個位置之後,您可以在這裡輸入它。',
'If you need to add a new document then you can click here to attach one.': '如果您需要新增一個新的檔,然後您可以按一下這裡,以連接一個。',
'If you run multiple servers in a network, you would probably see this place listing some other machines. Sahana can automatically pick servers in your organization (if they have sync username and password of your machine or if it is set to default) and add them to your list of machines to perform synchronization with. You can modify individual sync policy for each server. You can also add username and password of that server to retrieve and send data to that server. You can also manually add other servers.': '如果您執行多個伺服器在網路中,您可能會看到此處列出的某個其他機器。 Sahana可以自動選取伺服器組織中(如果它們已同步使用者名稱和密碼在您的機器,或如果它是設為預設值),並將它們新增至清單中的機器來執行同步化。 您可以修改個別同步化原則的每一個伺服器。 您也可以新增使用者名稱和密碼的伺服器,以擷取並傳送資料至該伺服器。 您也可以手動新增其他伺服器。',
'If you want several values, then separate with': '如果您希望數個值,則隔開。',
'If you would like to help, then please': '如果您想要幫助,則請',
'Illegal Immigrant': '合法Immigrant',
'Image Details': '映射明細',
'Image Tags': '影像標籤',
'Image Type': '映射檔類型',
'Image Upload': '上載影像',
'Image added': '新增影像',
'Image deleted': '刪除影像',
'Image updated': '更新影像',
'Image': '映射檔',
'Image/Attachment': '影像/附件',
'Image/Other Attachment': '影像/其他附件',
'Imagery': '影像',
'Images': '影像',
'Immediate reconstruction assistance, Rank': '立即重新建構協助,等級',
'Impact Assessments': '評量影響',
'Impact Details': '影響詳細資料',
'Impact Type Details': '影響類型詳細資料',
'Impact Type added': '影響類型新增',
'Impact Type deleted': '影響類型刪除',
'Impact Type updated': '影響更新類型',
'Impact Type': '影響類型',
'Impact Types': '影響類型',
'Impact added': '影響新增',
'Impact deleted': '刪除影響',
'Impact updated': '影響更新',
'Impacts': '影響',
'Import & Export Data': '匯入及匯出資料',
'Import Data': '匯入資料',
'Import Job': '匯入工作',
'Import Jobs': '匯入工作',
'Import and Export': '匯入及匯出',
'Import from Ushahidi Instance': '從 Ushahidi實例進口',
'Import if Master': '如果主要匯入',
'Import job created': '匯入已建立工作',
'Import multiple tables as CSV': '多個表格匯入CSV',
'Import': '匯入',
'Import/Export': '匯入/匯出',
'Import/Master': '匯入/主要',
'Important': '重要性',
'Importantly where there are no aid services being provided': '重要有沒有輔助服務提供',
'Imported': '已匯入',
'Importing data from spreadsheets': '從試算表匯入資料',
'Improper decontamination': 'decontamination不當',
'Improper handling of dead bodies': '不當處理的停用主體',
'In Catalogs': '型錄中',
'In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '在GeoServer,這是層的名稱。 在WFS getCapabilities,這是FeatureType名稱部分之後,冒號(:)。',
'In GeoServer, this is the Workspace Name. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': '在GeoServer,這是工作區名稱。 在WFS getCapabilities,這是FeatureType名稱組件之前的冒號(:)。',
'In Inventories': '在庫存',
'In Process': '正在處理',
'In Progress': '進行中',
'In Transit': '轉移中',
'In Window layout the map maximises to fill the window, so no need to set a large value here.': '視窗佈置對映maximises來填入視窗,讓您不需要設定較大的值在這裡。',
'In general, what are the greatest needs of older people, people with disabilities, children, youth and women in your community?': '一般而言,什麼是最需要舊的人員,殘障人士,子項,泉和婦女的社群嗎?',
'Inbound Mail Settings': '入埠郵件設定',
'Incident Categories': '事件種類',
'Incident Details': '事件明細',
'Incident Report Details': '事故報告詳細資料',
'Incident Report added': '新增事件報告',
'Incident Report deleted': '刪除事故報告',
'Incident Report updated': '更新事故報告',
'Incident Report': '事件報告',
'Incident Reporting System': '事件報告系統',
'Incident Reporting': '事件報告',
'Incident Reports': '事件報告',
'Incident added': '新增事件',
'Incident deleted': '刪除事件',
'Incident updated': '更新事件',
'Incident': '發生事件',
'Incidents': '發生事件',
'Incoming Shipment canceled': '進入出貨取消',
'Incoming Shipment updated': '進入出貨更新',
'Incoming': '送入的',
'Incomplete': '未完成',
'Individuals': '個人',
'Industrial Crime': '工業犯罪',
'Industrial': '製造業',
'Industry Fire': '產業發動',
'Industry close to village/camp': '產業關閉村落/camp',
'Infant (0-1)': '嬰兒 (0-1)',
'Infectious Disease (Hazardous Material)': 'Infectious疾病(危險物料)',
'Infectious Disease': 'Infectious疾病',
'Infestation': '影響',
'Informal Leader': '非正式領導者',
'Informal camp': '非正式camp',
'Information gaps': '資訊間隙',
'Infusion catheters available': 'catheters可用注入',
'Infusion catheters need per 24h': 'catheters需要注入每小時',
'Infusion catheters needed per 24h': 'catheters需要注入每小時',
'Infusions available': 'Infusions可用',
'Infusions needed per 24h': 'Infusions需要每小時',
'Injuries': '傷害',
'Input Job': '輸入工作',
'Inspected': '已檢驗',
'Inspection Date': '檢驗日期',
'Inspection date and time': '檢驗日期和時間',
'Inspection time': '檢驗時間',
'Inspector ID': '視察者 ID',
'Instance Type': '實例類型',
'Instance URL': '實例URL',
'Instant Porridge': '即時Porridge',
'Institution': '機構',
'Insufficient vars: Need module, resource, jresource, instance': '不足變數:需要模組,資源, jresource,實例',
'Insufficient': '不足',
'Intake Items': '進氣區項目',
'Intergovernmental Organization': 'Intergovernmental組織',
'Interior walls, partitions': '內部牆壁,分割區',
'Internal Features': '內部功能',
'Internal State': '內部狀態',
'International NGO': '國際NGO',
'International Organization': '國際組織',
'International Staff': '國際人員',
'Intervention': '介入',
'Interview taking place at': '進行訪談在',
'Invalid Organization ID!': '無效組織標識!',
'Invalid Query': '無效的查詢',
'Invalid Request': '無效要求',
'Invalid UUID!': 'UUID無效!',
'Invalid email': '無效的電子郵件',
'Invalid request!': '要求無效!',
'Invalid ticket': '無效的票據',
'Invalid': '無效',
'Inventories with Item': '與庫存項目',
'Inventories with Items': '與庫存項目',
'Inventories': '庫存',
'Inventory Item Details': '庫存項目詳細資料',
'Inventory Item added': '添加庫存項目',
'Inventory Item deleted': '庫存項目已刪除',
'Inventory Item updated': '庫存項目更新',
'Inventory Item': '資產項目',
'Inventory Items include both consumable supplies & those which will get turned into Assets at their destination.': '庫存項目包括耗材與那些會變成資產在其目的地。',
'Inventory Items': '配備盤點項目',
'Inventory Location Details': '庫存位置詳細資料',
'Inventory Location added': '新增庫存位置',
'Inventory Location updated': '庫存位置更新',
'Inventory Location': '庫存位置',
'Inventory Locations': '庫存位置',
'Inventory Management': '庫存管理',
'Inventory Stock Position': '庫存位置',
'Inventory Store Details': '資產儲存庫明細',
'Inventory Store added': '資產新增至儲存庫',
'Inventory Store deleted': '庫存刪除儲存庫',
'Inventory Store updated': '庫存更新儲存庫',
'Inventory Store': '資產儲存庫',
'Inventory Stores': '儲存庫存',
'Inventory functionality is available for:': '資產功能可用於:',
'Inventory of Effects': '庫存的效果',
'Inventory': '庫存',
'Inventory/Ledger': '庫存/分類賬',
'Is adequate food and water available for these institutions?': '足夠食物和水用於這些機構嗎?',
'Is editing level L%d locations allowed?': '正在編輯層次L%d位置容許嗎?',
'Is it safe to collect water?': '它是安全來收集臨界值嗎?',
'Is there any industrial or agro-chemical production close to the affected area/village?': '是否有任何工業或agro-化學生產關閉至受影響的區域/村落?',
'Is this a strict hierarchy?': '這是一個嚴格階層?',
'Issuing Authority': '發出單位',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '它不只會擷取工作區所作用中,但也會擷取的相關資訊範圍的專案會提供每一個區域。',
'It gives four options: No Sync, Newer Timestamp, Keep All, Replace All': '它提供四個選項:沒有同步,新的時間戳記,保留所有,請更換所有',
'It is built using the Template agreed by a group of NGOs working together as the': '它建置於使用範本所認可群組的迫切合作的',
'It is suggested to open the 2 locations into new tabs so that it can be decided which is the best one to keep out of the 2.': '建議開啟這二個位置中新的標籤,以決定保留最好的一個。',
'Item Added to Shipment': '新增項目至出貨',
'Item Catalog Categories': '項目型錄種類',
'Item Catalog Category Details': '項目型錄種類詳細資料',
'Item Catalog Category added': '型錄項目新增種類',
'Item Catalog Category deleted': '項目刪除型錄種類',
'Item Catalog Category updated': '項目型錄種類更新',
'Item Catalog Category': '項目型錄種類',
'Item Catalog Details': '型錄項目詳細資料',
'Item Catalog added': '型錄項目新增',
'Item Catalog deleted': '型錄項目刪除',
'Item Catalog updated': '型錄項目更新',
'Item Catalogs': '型錄項目',
'Item Categories': '項目種類',
'Item Category Details': '項目種類明細',
'Item Category added': '項目新增種類',
'Item Category deleted': '刪除項目種類',
'Item Category updated': '更新項目種類',
'Item Category': '項目種類',
'Item Details': '項目明細',
'Item Pack Details': '項目套件詳細資料',
'Item Pack added': '項目套件新增',
'Item Pack deleted': '項目套件刪除',
'Item Pack updated': '項目更新套件',
'Item Packs': '項目套件',
'Item Sub-Categories': '項目子種類',
'Item Sub-Category Details': '項目子種類明細',
'Item Sub-Category added': '項目子新增種類',
'Item Sub-Category deleted': '項目子類別刪除',
'Item Sub-Category updated': '項目子類別更新',
'Item Sub-Category': '項目子種類',
'Item added to Inventory': '項目新增至庫存',
'Item added to shipment': '新增項目至出貨',
'Item added': '已新增項目',
'Item already in Bundle!': '項目已軟體組中!',
'Item already in Kit!': '項目已在套件!',
'Item already in budget!': '項目已在預算!',
'Item deleted': '已刪除項目',
'Item removed from Inventory': '從庫存移除的項目',
'Item updated': '更新項目',
'Item': '項目',
'Items in Category can be Assets': '項目中"類別"可以資產',
'Items': '項目',
'Japanese': '日文',
'Jerry can': 'Jerry可以',
'Jew': '猶太教',
'Job Role Catalog': '工作角色型錄',
'Job Role Details': '工作角色詳細資料',
'Job Role added': '工作角色新增',
'Job Role deleted': '工作角色刪除',
'Job Role updated': '工作角色更新',
'Job Role': '職位',
'Job Roles': '職務',
'Job Title': '工作職稱',
'Jobs': '工作',
'Journal Entry Details': '日誌項目詳細資料',
'Journal entry added': '新增日誌項目',
'Journal entry deleted': '日誌項目刪除',
'Journal entry updated': '日誌項目更新',
'Journal': '日誌',
'Just Once': '只要一次',
'KPIs': 'KPI',
'Keep All': '全部保留',
'Keep Local': '保持局部',
'Key Details': '索引鍵詳細資料',
'Key added': '新增金鑰',
'Key deleted': '鍵刪除',
'Key updated': '更新金鑰',
'Key': '注意',
'Keys': '索引鍵',
'Kit Contents': '套件內容',
'Kit Details': '套件明細',
'Kit Updated': '更新套件',
'Kit added': '新增套件',
'Kit deleted': '刪除套件',
'Kit updated': '更新套件',
'Kit': '套件',
'Kits': '配套',
'Known Identities': '已知身分',
'Known incidents of violence against women/girls': '已知事件的暴力對婦女/女孩',
'Known incidents of violence since disaster': '已知事件的暴力因為災難',
'LICENCE': '授權',
'LICENSE': '軟體使用權',
'LMS Administration': 'LMS管理',
'Label': '標籤(Label)',
'Lack of material': '缺少的物料',
'Lack of school uniform': '缺乏學校統一',
'Lack of supplies at school': '缺少的耗材校園',
'Lack of transport to school': '缺少的傳輸學校',
'Lactating women': 'Lactating婦女',
'Landmark Details': '里程碑詳細資料',
'Landmark added': '新增里程碑',
'Landmark deleted': '刪除里程碑',
'Landmark updated': '里程碑更新',
'Landmarks': '里程碑',
'Language': '語言',
'Last Name': '姓',
'Last known location': '前次已知位置',
'Last name': '姓',
'Last synchronization on': '上次同步化',
'Last synchronization time': '上次同步化時間',
'Last updated by': '上次更新的人',
'Last updated on': '上次更新於',
'Last updated': '前次更新',
'Latitude & Longitude': '緯度和經度',
'Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度是北美-南-(上下)。 緯度是〇equator與正在北部地區部分和負數在南部部分。',
'Latitude is North-South (Up-Down).': '緯度是北美-南-(上下)。',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': '緯度是〇equator與正在北部地區部分和負數在南部部分。',
'Latitude of Map Center': '緯度的對映中心',
'Latitude of far northern end of the region of interest.': '緯度的最北端結束的區域相關的。',
'Latitude of far southern end of the region of interest.': '緯度的最南部結束區域相關的。',
'Latitude should be between': '緯度必須介於',
'Latitude': '緯度',
'Law enforcement, military, homeland and local/private security': '法律強制,軍事,國土和本端/私密安全',
'Layer Details': '層詳細資料',
'Layer added': '新增層',
'Layer deleted': '刪除層',
'Layer updated': '更新層',
'Layer': '層',
'Layers updated': '層更新',
'Layers': '層',
'Layout': '配置',
'Leader': '領導人',
'Left-to-Right': '由左至右',
'Legend Format': '圖例格式',
'Length (m)': '長度(M)',
'Length': '長度',
'Level 1 Assessment Details': '層次一評量詳細資料',
'Level 1 Assessment added': '層次一評量新增',
'Level 1 Assessment deleted': '層次一評量刪除',
'Level 1 Assessment updated': '層次一評量更新',
'Level 1 Assessments': '層次一評量',
'Level 1': '層次 1',
'Level 2 Assessment Details': '層次二評量詳細資料',
'Level 2 Assessment added': '層次二評量新增',
'Level 2 Assessment deleted': '層次二評量刪除',
'Level 2 Assessment updated': '層次二評量更新',
'Level 2 Assessments': '層次二評量',
'Level 2 or detailed engineering evaluation recommended': '層次二或詳細工程評估建議',
'Level 2': '層次 2',
'Level': '層次',
'Library support not available for OpenID': '庫不支援可用於OpenID',
'Line': '明細行',
'LineString': '線串',
'Link Item & Shipment': '鏈結項目&出貨',
'Link an Item & Shipment': '鏈結項目與出貨',
'Linked Records': '鏈結記錄',
'Linked records': '鏈結記錄',
'List / Add Baseline Types': '清單/新增基準線類型',
'List / Add Impact Types': '清單/新增影響類型',
'List / Add Services': '清單/新增服務',
'List / Add Types': '清單/新增類型',
'List Activities': '列出活動',
'List Aid Requests': '需求列表',
'List All Assets': '所有資產清單',
'List All Catalog Items': '列出所有型錄項目',
'List All Commitments': '列出所有Commitments',
'List All Entries': '所有項目清單',
'List All Item Categories': '列出所有項目種類',
'List All Memberships': '顯示所有組員',
'List All Received Shipments': '列出所有接收出貨',
'List All Records': '所有記錄清單',
'List All Reports': '列示全部報告',
'List All Requested Items': '列出所有要求的項目',
'List All Requests': '所有要求清單',
'List All Sent Shipments': '列出所有傳送出貨',
'List All': '列示全部',
'List Alternative Items': '替代清單項目',
'List Assessment Summaries': '清單評量摘要',
'List Assessments': '評量清單',
'List Asset Assignments': '列示資產分派',
'List Assets': '列出資產',
'List Availability': '清單可用性',
'List Baseline Types': '列舉基準線類型',
'List Baselines': '列舉基準線',
'List Brands': '列舉品牌',
'List Budgets': '列舉預算',
'List Bundles': '列舉捆綁',
'List Camp Services': 'Camp清單服務',
'List Camp Types': 'Camp清單類型',
'List Camps': '清單Camps',
'List Catalog Items': '列舉目錄項目',
'List Catalogs': '目錄清單',
'List Category<>Sub-Category<>Catalog Relation': '清單Category<>Sub-Category<>Catalog關系',
'List Certificates': '憑證清單',
'List Certifications': '認證清單',
'List Checklists': '核對清單',
'List Cluster Subsectors': '叢集清單Subsectors',
'List Clusters': '叢集清單',
'List Commitment Items': '清單項目承諾',
'List Commitments': '清單Commitments',
'List Competencies': '清單能力',
'List Competency Ratings': '清單能力等級',
'List Configs': '配置清單',
'List Conflicts': '衝突清單',
'List Contact Information': '聯絡資訊清單',
'List Contacts': '列出聯絡人',
'List Course Certicates': '清單進程憑證',
'List Courses': '課程清單',
'List Credentials': '認證清單',
'List Current': '現行清單',
'List Distribution Items': '配送清單項目',
'List Distributions': '配送清單',
'List Documents': '清單文件',
'List Donors': '清單Donors',
'List Events': '事件清單',
'List Facilities': '設備清單',
'List Feature Layers': '清單功能層',
'List Flood Reports': '水災清單報告',
'List Groups': '顯示群組',
'List Groups/View Members': '列示群組成員/檢視',
'List Hospitals': '醫院清單',
'List Human Resources': '人力資源清單',
'List Identities': '身分清單',
'List Images': '影像清單',
'List Impact Assessments': '清單影響評量',
'List Impact Types': '影響清單類型',
'List Impacts': '影響清單',
'List Incident Reports': '事件報告清單',
'List Incidents': '事件清單',
'List Inventory Items': '庫存項目清單',
'List Inventory Locations': '清單庫存位置',
'List Inventory Stores': '清單儲存庫存',
'List Item Catalog Categories': '型錄種類清單項目',
'List Item Catalogs': '清單項目型錄',
'List Item Categories': '項目種類清單',
'List Item Packs': '清單項目套件',
'List Item Sub-Categories': '清單項目子種類',
'List Items in Inventory': '清單中項目的庫存',
'List Items': '清單項目',
'List Job Roles': '列出工作角色',
'List Keys': '列出金鑰',
'List Kits': '套件清單',
'List Landmarks': '清單里程碑',
'List Layers': '層清單',
'List Level 1 Assessments': '清單層次一評量',
'List Level 1 assessments': '清單層次一評量',
'List Level 2 Assessments': '清單層次二評量',
'List Level 2 assessments': '清單層次二評量',
'List Locations': '列示位置',
'List Log Entries': '日誌項目清單',
'List Map Profiles': '對映清單配置',
'List Markers': '標記清單',
'List Members': '列示成員',
'List Memberships': '成員資格清單',
'List Messages': '列出訊息',
'List Metadata': 'meta資料清單',
'List Missing Persons': '失蹤災民列表',
'List Missions': '列出任務清單',
'List Need Types': '清單需要類型',
'List Needs': '需求清單',
'List Notes': '清單附註',
'List Offices': '辦公室清單',
'List Organizations': '組織清單',
'List Partners': '夥伴清單',
'List Peers': '對等清單',
'List Personal Effects': '列出個人效果',
'List Persons': '人員清單',
'List Photos': '清單照片',
'List Population Statistics': '列出人口統計資料',
'List Positions': '位置清單',
'List Problems': '問題清單',
'List Projections': '預測清單',
'List Projects': '專案清單',
'List Rapid Assessments': '快速清單評量',
'List Received Items': '清單接收項目',
'List Received Shipments': '清單收到出貨',
'List Records': '記錄清單',
'List Registrations': '登錄清單',
'List Relatives': '關係列表',
'List Relief Items': '浮雕清單項目',
'List Reports': '清單報告',
'List Request Items': '要求清單項目',
'List Requested Skills': '需求技能列表',
'List Requests': '要求清單',
'List Resources': '列出資源',
'List Responses': '清單回應',
'List Rivers': '清單Rivers',
'List Roles': '列出角色',
'List Rooms': '列出會談室清單',
'List Scenarios': '清單實務',
'List School Districts': '清單學校行政區',
'List School Reports': '學校清單報告',
'List Sections': '清單區段',
'List Sectors': '磁區清單',
'List Sent Items': '傳送的項目清單',
'List Sent Shipments': '列出貨清單',
'List Service Profiles': '服務設定檔清單',
'List Settings': '清單設定',
'List Shelter Services': '列表庇護服務',
'List Shelter Types': '列表收容所類型',
'List Shelters': '列表收容所',
'List Shipment Transit Logs': '列表過境貨物日誌',
'List Shipment/Way Bills': '清單出貨/方式賬單',
'List Shipment<>Item Relation': '列表運費<>物品關係',
'List Shipments': '列表裝運',
'List Sites': '站點清單',
'List Skill Equivalences': '技能清單同等',
'List Skill Provisions': '技能清單條款',
'List Skill Types': '技能清單類型',
'List Skill': '技能清單',
'List Skills': '技能清單',
'List Solutions': '解決方案清單',
'List Sources': '來源清單',
'List Staff Types': '人員清單類型',
'List Staff': '列出人員清單',
'List Status': '清單狀態',
'List Storage Bin Type(s)': 'bin清單儲存體類型(S)',
'List Storage Bins': '清單存儲Bin',
'List Storage Location': '儲存體位置清單',
'List Subscriptions': '清單訂閱',
'List Subsectors': '清單Subsectors',
'List Support Requests': '清單支援要求',
'List Survey Answers': '清單調查答案',
'List Survey Questions': '調查問題清單',
'List Survey Sections': '清單調查區段',
'List Survey Series': '清單調查系列',
'List Survey Templates': '清單調查範本',
'List Tasks': '列出作業',
'List Teams': '小組清單',
'List Themes': '佈景主題清單',
'List Tickets': '問題單清單',
'List Tracks': '追蹤清單',
'List Trainings': '清單撰文',
'List Units': '單位清單',
'List Updates': '更新清單',
'List Users': '列出使用者',
'List Vehicle Details': '交通工具資料',
'List Vehicles': '交通工劇烈表',
'List Volunteers': '志願者清單',
'List Warehouses': '倉庫清單',
'List all': '列示全部',
'List available Scenarios': '列出可用的實務範例',
'List of CSV files uploaded': '已上傳CSV 檔案列表',
'List of CSV files': 'CSV 檔案列表',
'List of Items': '項目清單',
'List of Missing Persons': '清單遺漏的人員',
'List of Peers': '清單的對等',
'List of Reports': '報告清單',
'List of Requests': '要求清單',
'List of Spreadsheets uploaded': '清單的試算表上傳',
'List of Spreadsheets': '清單的試算表',
'List of Volunteers for this skill set': '清單的主動參與者的這個職位技能設定',
'List of Volunteers': '志願者清單',
'List of addresses': '清單的位址',
'List unidentified': '未定義的清單',
'List': '清單',
'List/Add': '顯示/新增群組',
'Lists "who is doing what & where". Allows relief agencies to coordinate their activities': '列出"誰正在做什麼& "where"。可釋放機構來協調它們的活動',
'Live Help': '即時說明',
'Livelihood': '生計',
'Load Cleaned Data into Database': '加載已清理的數據到數據庫',
'Load Details': '載入詳細資料',
'Load Raw File into Grid': '載入原始檔案到網格',
'Load the details to help decide which is the best one to keep out of the 2.': '載入的詳細資料來協助判定哪個是最好的一個保留的二。',
'Loading Locations...': '正在載入位置...',
'Loading': '載入中',
'Local Language': '本地語言',
'Local Name': '綽號',
'Local Names': '綽號',
'Location 1': '位置一',
'Location 2': '位置二',
'Location De-duplicated': '位置取消重複',
'Location Details': '地點明細',
'Location Hierarchy Level 0 Name': '位置階層層次〇名稱',
'Location Hierarchy Level 1 Name': '位置階層層次一名稱',
'Location Hierarchy Level 2 Name': '位置階層層次二名稱',
'Location Hierarchy Level 3 Name': '位置階層層次三名稱',
'Location Hierarchy Level 4 Name': '位置階層層次四名稱',
'Location Hierarchy Level 5 Name': '位置階層層次五名稱',
'Location added': '新增位置',
'Location cannot be converted into a group.': '位置不能轉換成一個群組。',
'Location deleted': '位置已移除',
'Location details': '地點明細',
'Location group cannot be a parent.': '位置群組不能是母項。',
'Location group cannot have a parent.': '位置群組不能有一個母項。',
'Location groups can be used in the Regions menu.': '位置群組可用於區域的功能表。',
'Location groups may be used to filter what is shown on the map and in search results to only entities covered by locations in the group.': '位置群組可能用來過濾顯示的內容在地圖上和在搜尋結果中只能實體所涵蓋的位置群組。',
'Location updated': '更新位置',
'Location': '地點',
'Location: ': '地點: ',
'Location:': '位置:',
'Locations De-duplicator': '取消位置duplicator',
'Locations of this level need to have a parent of level': '這個層次的位置需要有母項的層次',
'Locations should be different!': '應該是不同的位置!',
'Locations': '位置',
'Lockdown': '鎖定',
'Log Entry Details': '日誌項目詳細資料',
'Log entry added': '新增日誌項目',
'Log entry deleted': '刪除日誌',
'Log entry updated': '日誌項目更新',
'Log': '日誌',
'Logged in': '已登入',
'Logged out': '登出',
'Login': '登入',
'Logistics Management System': '物流管理系統',
'Logistics Management': '管理物流',
'Logistics': '物流',
'Logo file %s missing!': 'Logo file %s 失蹤!',
'Logo': 'logo',
'Logout': '登出',
'Long Text': '長文字',
'Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. These need to be added in Decimal Degrees.': '經度是西-East (短)。 緯度是北美-南-(上下)。 緯度是〇equator與正在北部地區部分和負數在南部部分。 經度是〇本初子午線(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。 這些需要新增以小數度。',
'Longitude is West - East (sideways). Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '經度是西-East (短)。 經度是〇本初子午線(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。',
'Longitude is West - East (sideways).': '經度是西-East (短)。',
'Longitude is West-East (sideways).': '經度是西-East (短)。',
'Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '經度是〇本初子午線(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': '經度是〇本初子午線(透過格林威治,英國),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。',
'Longitude of Map Center': '經度的對映中心',
'Longitude of far eastern end of the region of interest.': '經度的最東部結束的區域相關的。',
'Longitude of far western end of the region of interest.': '經度的最西方結束的區域相關的。',
'Longitude should be between': '經度必須介於',
'Longitude': '經度',
'Lost Password': '忘記密碼',
'Lost': '遺失',
'Low': '低',
'Machine with which data was exchanged.': '機器資料交換。',
'Magnetic Storm': '磁性暴雨',
'Main cash source': '主要現金來源',
'Main income sources before disaster': '主收入來源前災難',
'Major Damage': '主要損壞',
'Major expenses': '主要費用',
'Major outward damage': '主要往外損壞',
'Make Commitment': '使承諾',
'Make New Commitment': '使新承諾',
'Make Pledge': '使質押',
'Make Request': '使要求',
'Make a Request for Aid': '提出請求的輔助',
'Make a Request': '使一個要求',
'Make a request': '使一個要求',
'Make preparations per the <instruction>': '準備讓每個<instruction>',
'Male': '男性',
'Malnutrition present prior to disaster': 'Malnutrition存在之前災難',
'Manage Category': '管理種類',
'Manage Events': '管理事件',
'Manage Item catalog': '管理項目型錄',
'Manage Kits': '管理套件',
'Manage Relief Item Catalogue': '管理浮雕項目型錄',
'Manage Sub-Category': '管理子種類',
'Manage Users & Roles': '管理使用者角色',
'Manage Warehouses/Sites': '管理倉庫/站點',
'Manage Your Facilities': '管理您的設備',
'Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.': '管理要求提供,資產,人員或其他資源。 比對庫存位置提供要求。',
'Manage requests of hospitals for assistance.': '管理要求的醫院的協助。',
'Manage volunteers by capturing their skills, availability and allocation': '管理參與者擷取其技能,可用性和配置',
'Manage': '管理',
'Manager': '管理程式',
'Managing Office': '辦公室管理',
'Managing, Storing and Distributing Relief Items': '管理,儲存和散發的項目',
'Managing, Storing and Distributing Relief Items.': '管理,儲存和散發的項目。',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': '必要的。 在GeoServer,這是層的名稱。 在WFS getCapabilities,這是FeatureType名稱部分之後,冒號(:)。',
'Mandatory. The URL to access the service.': '必要的。 的URL來存取服務。',
'Manual Synchronization': '手動同步化',
'Manual': '手動',
'Many': '許多',
'Map Center Latitude': '對映中心緯度',
'Map Center Longitude': '對映中心經度',
'Map Profile Details': '對映配置詳細資料',
'Map Profile added': '新增對映配置',
'Map Profile deleted': '刪除對映配置',
'Map Profile removed': '移除對映配置',
'Map Profile updated': '對映配置更新',
'Map Profile': '對映配置',
'Map Profiles': '對映配置',
'Map Height': '對映高度',
'Map Service Catalogue': '對服務型錄',
'Map Settings': '對映設定',
'Map Viewing Client': '檢視用戶端對映',
'Map Width': '地圖寬度',
'Map Zoom': '對映縮放',
'Map of Hospitals': '對映的醫院',
'Map': '地圖',
'Mapping': '地圖模組',
'Marine Security': '海運安全',
'Marital Status': '婚姻狀況',
'Marker Details': '標記詳細資料',
'Marker added': '新增標記',
'Marker deleted': '標記刪除',
'Marker updated': '更新標記',
'Marker': '標記',
'Markers': '標記',
'Master Message Log to process incoming reports & requests': '主要訊息日誌以處理進入的報告和要求',
'Master Message Log': '主要訊息日誌',
'Match Percentage': '符合百分比',
'Match Requests': '符合要求',
'Match percentage indicates the % match between these two records': '相符百分比表示的百分比之間符合這二個記錄',
'Match?': '相符?',
'Matching Catalog Items': '相符的型錄項目',
'Matching Items': '相符的項目',
'Matching Records': '相符記錄',
'Matrix of Choices (Multiple Answers)': '矩陣的選項(多個答案)',
'Matrix of Choices (Only one answer)': '矩陣的選項(只能有一個答案)',
'Matrix of Text Fields': '矩陣的文字欄位元',
'Max Persons per Dwelling': '每住宅最大人數',
'Maximum Location Latitude': '最大位置緯度',
'Maximum Location Longitude': '最大位置經度',
'Maximum Weight': '最大重量',
'Maximum weight capacity of the Storage Location followed by choosing the unit from the drop down list.': '存儲位置的最大承重能力然後從下拉清單中選擇單位。',
'Maximum weight capacity of the items the storage bin can contain. followed by choosing the unit from the drop down list.': '最大重量的項目儲存在可以包含。 接著選擇裝置中的下拉清單。',
'Measure Area: Click the points around the polygon & end with a double-click': '測量面積:點擊多邊形周圍的點和雙擊結束',
'Measure Length: Click the points along the path & end with a double-click': '測量長度:按一下點,並沿著路逕&下一個按一下',
'Measures': '測量',
'Medical Attention': '醫學注意',
'Medical Staff': '醫療人員',
'Medical Supplies': '醫療補給品',
'Medical and public health': '醫療及公共健康',
'Medicine': '醫藥',
'Medium': '中',
'Megabytes per Month': '每月(MB)',
'Member removed from Group': '組員已刪除',
'Members': '成員',
'Membership Details': '組員內容',
'Membership updated': '組員已更新',
'Membership': '成員資格',
'Memberships': '群組設定',
'Mental': '養心殿',
'Message Details': '訊息詳細資料',
'Message Variable': '訊息變數',
'Message added': '新增訊息',
'Message deleted': '訊息已刪除',
'Message sent to outbox': '訊息傳送至寄件匣',
'Message updated': '更新訊息',
'Message variable': '訊息變數',
'Message': '訊息',
'Messages': '訊息',
'Messaging settings updated': '傳訊設定更新',
'Messaging': '傳訊模組',
'Metadata Details': 'Meta 資料的詳細資料',
'Metadata added': '新增meta資料',
'Metadata can be supplied here to be applied to all uploaded photos, if desired.': 'meta資料可以提供來套用至所有上傳的照片, (如果想要)。',
'Metadata deleted': '刪除meta資料',
'Metadata updated': '更新meta資料',
'Metadata': 'meta 資料 (metadata)',
'Meteorological (inc. flood)': 'Meteorological (收入。 水災)',
'Method used': '使用方法',
'Micronutrient malnutrition prior to disaster': '災前微量營養素營養不良',
'Middle Name': '中間名',
'Migrants or ethnic minorities': 'Migrants或鬥爭',
'Military': '軍事',
'Minimum Bounding Box': '最小外框',
'Minimum Location Latitude': '最小位置緯度',
'Minimum Location Longitude': '最小位置經度',
'Minimum shift time is 6 hours': '最小時間為六小時移位',
'Minor Damage': '次要損壞',
'Minor/None': '次要/無',
'Minorities participating in coping activities': '勝出參與複製活動',
'Minute': '分鐘',
'Minutes must be a number between 0 and 60': '分鐘必須是〇和60之間的數字',
'Minutes must be between 0 and 60': '分鐘必須在〇和60之間',
'Minutes per Month': '分鐘每月',
'Minutes should be a number greater than 0 and less than 60': '分鐘應該是一個數字大於〇且小於60',
'Minutes should be greater than 0 and less than 60': '分鐘應該大於〇且小於60',
'Miscellaneous': '雜項',
'Missing Person Details': '遺漏人員詳細資料',
'Missing Person Registry': '遺漏人員登錄',
'Missing Person Reports': '失蹤人員報表',
'Missing Person': '遺漏人員',
'Missing Persons Registry': '遺漏人員登錄',
'Missing Persons Report': '失蹤人員報表',
'Missing Persons': '失蹤人員',
'Missing Report': '遺漏報告',
'Missing Senior Citizen': '遺漏資深市民',
'Missing Vulnerable Person': '遺漏漏洞人員',
'Missing': '遺漏',
'Mission Details': '任務詳細資料',
'Mission Record': '任務記錄',
'Mission added': '添加任務',
'Mission deleted': '刪除任務',
'Mission updated': '更新任務',
'Missions': '展望',
'Mobile Assess.': '行動評定。',
'Mobile Basic Assessment': '行動基本評量',
'Mobile Phone': '行動電話',
'Mobile': '行動電話',
'Mode': '模式',
'Model/Type': '型號/類型',
'Modem Settings': '數據機設定',
'Modem settings updated': '數據機設定更新',
'Modem': '數據機',
'Moderate': '普通',
'Moderator': '主持人',
'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': '修改功能:選取的功能時,您要deform和拖放的其中一個點deform的功能,您選擇的方式',
'Modify Information on groups and individuals': '修改群組的相關資訊及個人',
'Modifying data in spreadsheet before importing it to the database': '修改資料在試算表中匯入之前,將它的資料庫',
'Module Administration': '模組管理',
'Module disabled!': '模組已停用!',
'Module provides access to information on current Flood Levels.': '模組可讓您存取資訊目前正層次。',
'Module stores structured reports done by Professional Organizations - currently data includes WFP Assessments.': '模組儲存結構化報告來完成專業組織-目前資料包括WFP評估。',
'Module': '模組',
'Monday': '星期一',
'Monthly Cost': '每月成本',
'Monthly Salary': '每月薪資',
'Months': '月數',
'More about OpenID': '更多關於 OpenID',
'Morgue Status': 'Morgue狀態',
'Morgue Units Available': 'Morgue可用的單位',
'Motorcycle': '摩托車',
'Move Feature: Drag feature to desired location': '移動功能:拖曳功能所需的位置',
'Movements (Filter In/Out/Lost)': '移動過濾器(輸入/輸出/遺失)',
'MultiPolygon': '多個多邊形',
'Multiple Choice (Multiple Answers)': '多個選項(多個答案)',
'Multiple Choice (Only One Answer)': '多個選項(僅一回答)',
'Multiple Matches': '多個相符的項目',
'Multiple Text Fields': '多個文字欄位元',
'Multiple': '多個',
'Muslim': '回教',
'Must a location have a parent location?': '必須有一個位置有一個母項位置?',
'My Current function': '我的現行函數',
'My Tasks': '我的任務',
'My Volunteering': '我的志工任務',
'N/A': '不適用',
'NO': '無影響',
'NZSEE Level 1': 'NZSEE層次一',
'NZSEE Level 2': 'NZSEE層次二',
'Name and/or ID Label': '名稱和/或識別號碼標籤',
'Name and/or ID': '名稱和/或識別號碼',
'Name of School': '學校的名稱',
'Name of Storage Bin Type.': '存儲箱類型名稱。',
'Name of the file (& optional sub-path) located in static which should be used for the background of the header.': '文件的名稱(&可選的子路徑)位於靜態應該用於背景的頭。',
'Name of the file (& optional sub-path) located in static which should be used for the top-left image.': '檔案的名稱(&選用子路逕)位於靜態應該使用的最左影像。',
'Name of the file (& optional sub-path) located in views which should be used for footer.': '檔案的名稱(&選用子路逕)位於視圖中應該使用的頁尾。',
'Name of the person in local language and script (optional).': '的人員名稱以當地語言及Script (選用)。',
'Name of the unit or department this report refers to. Leave empty if your hospital has no subdivisions.': '名稱的單位或部門這份報告的參照。 如果您保留空白醫院沒有子分區。',
'Name': '名稱',
'Name, Org and/or ID': '名稱,組織及/或ID',
'Name/Model/Type': '名稱/模型/類型',
'Names can be added in multiple languages': '名稱可以添加多語言',
'National ID Card': '國家ID卡',
'National NGO': 'NGO國家',
'National Staff': '國家人員',
'National': 'NATIONAL',
'Nationality of the person.': '聯絡人的國籍.',
'Nationality': '國籍',
'Nautical Accident': 'Nautical意外',
'Nautical Hijacking': 'Nautical強制存取',
'Need Type Details': '需要類型詳細資料',
'Need Type added': '需要新增類型',
'Need Type deleted': '需要刪除類型',
'Need Type updated': '需要更新類型',
'Need Type': '需要類型',
'Need Types': '需要類型',
'Need added': '需要新增',
'Need deleted': '需要刪除',
'Need to be logged-in to be able to submit assessments': '需要登入,才能提交評量',
'Need to configure Twitter Authentication': '需要配置Twitter鑒別',
'Need to select 2 Locations': '需要選取二個位置',
'Need to specify a Budget!': '需要指定的預算!',
'Need to specify a Kit!': '需要指定一個套件!',
'Need to specify a Resource!': '必須指定資源!',
'Need to specify a bundle!': '需要指定軟體組!',
'Need to specify a feature group!': '需要指定一項特性群組!',
'Need to specify a group!': '需要指定"群組"!',
'Need to specify a location to search for.': '需要指定一個位置來搜尋。',
'Need to specify a role!': '需要指定一個角色!',
'Need to specify a table!': '需要指定一個表格!',
'Need to specify a user!': '需要指定一個使用者!',
'Need updated': '需要更新',
'Needs Details': '需求詳細資料',
'Needs Maintenance': '需要維護',
'Needs to reduce vulnerability to violence': '需要減少漏洞暴力',
'Needs': '需要',
'Negative Flow Isolation': '負數流程隔離',
'Neighborhood': '鄰居',
'Neighbourhood': '鄰居',
'Neighbouring building hazard': '鄰近建置危害',
'Network': '網路',
'Neurology': '神經內科',
'New Assessment reported from': '新的評量報告從',
'New Certificate': '新建憑證',
'New Checklist': '新核對清單',
'New Entry': '新建文章',
'New Event': '新建事件',
'New Item Category': '新項目種類',
'New Job Role': '新工作角色',
'New Location Group': '新位置群組',
'New Location': '新位置',
'New Patient': '新病患',
'New Peer': '新的同層級',
'New Record': '新建記錄',
'New Report': '新建報告',
'New Request': '我的要求',
'New Scenario': '新方案',
'New Skill': '新技能',
'New Solution Choice': '新解決方案選項',
'New Staff Member': '新人員成員',
'New Support Request': '新申請',
'New Synchronization Peer': '新的同層級同步化',
'New Team': '新團隊',
'New Training Course': '新的培訓課程',
'New Volunteer': '新志工',
'New cases in the past 24h': '新案例在過去小時',
'New': '新建',
'Newer Timestamp': '更新時間戳記',
'News': '新聞',
'Next View': '下一頁',
'Next': '下一頁(N)',
'No Activities Found': '沒有找到的活動',
'No Activities currently registered in this event': '在本事件中,並無已登錄的活動',
'No Addresses currently registered': '沒有位址目前登錄',
'No Aid Requests currently registered': '目前沒有任何已登錄的需求',
'No Alternative Items currently registered': '沒有替代項目目前登錄',
'No Assessment Summaries currently registered': '沒有評估目前已登錄摘要',
'No Assessments currently registered': '沒有評估目前登錄',
'No Asset Assignments currently registered': '沒有資產指派目前已登錄',
'No Assets currently registered in this event': '沒有資產目前登錄在此事件',
'No Assets currently registered in this scenario': '沒有資產目前已登錄在這個實務中',
'No Assets currently registered': '沒有資產目前已登錄',
'No Baseline Types currently registered': '沒有基準線類型目前登錄',
'No Baselines currently registered': '沒有基準線目前登錄',
'No Brands currently registered': '沒有品牌目前登錄',
'No Budgets currently registered': '目前沒有預算登錄',
'No Bundles currently registered': '目前沒有軟體組登錄',
'No Camp Services currently registered': '沒有Camp服務目前登錄',
'No Camp Types currently registered': 'Camp沒有類型目前登錄',
'No Camps currently registered': 'Camps沒有目前登錄',
'No Catalog Items currently registered': '沒有型錄項目目前已登錄',
'No Catalogs currently registered': '任何型錄目前已登錄',
'No Category<>Sub-Category<>Catalog Relation currently registered': 'Category<>Sub-沒有Category<>Catalog關系目前已登錄',
'No Checklist available': '沒有可用的清單',
'No Cluster Subsectors currently registered': '沒有Subsectors叢集目前登錄',
'No Clusters currently registered': '沒有"叢集目前登錄',
'No Commitment Items currently registered': '不確定項目目前登錄',
'No Commitments': '沒有Commitments',
'No Configs currently defined': '沒有配置目前定義',
'No Credentials currently set': '目前沒有認證設定',
'No Details currently registered': '沒有詳細資料目前已登錄',
'No Distribution Items currently registered': '沒有分配項目目前登錄',
'No Distributions currently registered': '沒有當前發行版註冊',
'No Documents found': '找不到檔',
'No Donors currently registered': 'Donors目前沒有登錄',
'No Events currently registered': '目前登錄任何事件',
'No Facilities currently registered in this event': '沒有設備目前登錄在此事件',
'No Facilities currently registered in this scenario': '沒有設備目前登錄在這個實務中',
'No Feature Layers currently defined': '沒有功能層目前定義',
'No Flood Reports currently registered': '沒有溢出報告目前正在登錄',
'No GPS data currently registered': '目前無GPS資料被登錄',
'No Groups currently defined': '目前沒有群組定義',
'No Groups currently registered': '目前沒有群組',
'No Hospitals currently registered': '沒有醫院目前登錄',
'No Human Resources currently registered in this event': '沒有人力資源目前已在這個事件',
'No Human Resources currently registered in this scenario': '沒有人力資源目前已登錄在這個實務中',
'No Identification Report Available': '識別沒有報告可用',
'No Identities currently registered': '沒有目前登錄身分',
'No Image currently defined': '沒有映射檔目前定義',
'No Image': '沒有影像',
'No Images currently registered': '沒有影像目前登錄',
'No Impact Types currently registered': '沒有目前有登記的影響類型',
'No Impacts currently registered': '沒有影響目前已登錄',
'No Import Files currently uploaded': '目前無影響檔案被上傳',
'No Incident Reports currently registered': '目前沒有事件報告記錄',
'No Incidents currently registered': '目前沒有事件記錄',
'No Incoming Shipments': '沒有進貨',
'No Inventories currently have suitable alternative items in stock': '目前沒有合適的替代品尚有庫存',
'No Inventories currently have this item in stock': '目前沒有合適的替代品尚有庫存',
'No Inventory Items currently registered': '目前沒有登記的庫存項目',
'No Inventory Locations currently registered': '目前沒有登記的庫存地點',
'No Inventory Stores currently registered': '無庫存儲存目前已登錄',
'No Item Catalog Category currently registered': '沒有項目型錄種類目前已登錄',
'No Item Catalog currently registered': '沒有項目型錄目前已登錄',
'No Item Categories currently registered': '沒有項目種類目前登錄',
'No Item Packs currently registered': '沒有項目套件目前已登錄',
'No Item Sub-Category currently registered': '項目沒有子類別目前已登錄',
'No Item currently registered': '沒有項目目前已登錄',
'No Items currently registered in this Inventory': '沒有項目目前登錄在此資產',
'No Items currently registered': '沒有項目目前登錄',
'No Items currently requested': '沒有項目目前要求',
'No Keys currently defined': '目前未定義任何金鑰',
'No Kits currently registered': '套件沒有目前登錄',
'No Landmarks currently defined': '沒有目前定義里程碑',
'No Level 1 Assessments currently registered': '沒有層次一評量目前已登錄',
'No Level 2 Assessments currently registered': '沒有層次二評量目前已登錄',
'No Locations currently available': '目前可用的任何位置',
'No Locations currently registered': '任何位置目前登錄',
'No Map Profiles currently defined': '沒有對映配置目前定義',
'No Map Profiles currently registered in this event': '沒有對映配置目前登錄在此事件',
'No Map Profiles currently registered in this scenario': '沒有對映配置目前登錄在這個實務中',
'No Markers currently available': '沒有當前可用標記',
'No Match': '沒有相符的項目',
'No Matching Catalog Items': '沒有相符的型錄項目',
'No Matching Items': '沒有相符的項目',
'No Matching Records': '沒有相符的記錄',
'No Members currently registered': '沒有成員目前登錄',
'No Memberships currently defined': '沒有資格目前定義',
'No Messages currently in Outbox': '沒有訊息目前在寄件匣',
'No Metadata currently defined': '目前沒有meta資料定義',
'No Need Types currently registered': '不需要目前登錄類型',
'No Needs currently registered': '目前沒有登錄需要',
'No Offices currently registered': '沒有辦公室目前登錄',
'No Offices found!': '沒有辦公室找到!',
'No Organizations currently registered': '目前登錄任何組織',
'No Organizations registered!': '沒有組織登錄!',
'No Packs for Item': '此品項無包裝',
'No Partners currently registered': '沒有夥伴目前登錄',
'No Patients currently registered': '目前沒有病人登錄',
'No Peers currently registered': '沒有同層級目前登錄',
'No People currently committed': '目前無人承諾',
'No People currently registered in this camp': '沒有人員目前登錄在此camp',
'No People currently registered in this shelter': '沒有人員目前登錄在此shelter',
'No Persons currently registered': '沒有人員目前已登錄',
'No Persons currently reported missing': '沒有人員目前報告遺漏',
'No Persons found': '沒有找到人員',
'No Photos found': '沒有找到照片',
'No Picture': '沒有圖片',
'No Population Statistics currently registered': '沒有移入目前已登錄統計',
'No Presence Log Entries currently registered': '不存在日誌目前已登錄',
'No Problems currently defined': '目前沒有問題定義',
'No Projections currently defined': '沒有估算目前定義',
'No Projects currently registered': '沒有專案目前已登錄',
'No Rapid Assessments currently registered': '沒有快速評估目前登錄',
'No Received Items currently registered': '沒有收到項目目前登錄',
'No Received Shipments': '沒有收到出貨',
'No Records currently available': '沒有記錄當前可用',
'No Records matching the query': '沒有符合查詢的記錄',
'No Reports currently registered': '沒有報告目前登錄',
'No Request Items currently registered': '項目目前沒有要求註冊',
'No Requests have been made yet': '沒有要求尚未完成',
'No Requests match this criteria': '沒有要求符合此準則',
'No Requests': '沒有要求',
'No Responses currently registered': '沒有回應目前已登錄',
'No Rivers currently registered': 'Rivers目前沒有登錄',
'No Roles currently defined': '目前未定義任何角色',
'No Rooms currently registered': '沒有會談室目前登錄',
'No Scenarios currently registered': '沒有目前登錄實務',
'No School Districts currently registered': '沒有學校行政區目前登錄',
'No School Reports currently registered': '沒有學校報告目前登錄',
'No Sections currently registered': '沒有區段目前登錄',
'No Sectors currently registered': '目前沒有已註冊部門',
'No Sent Items currently registered': '目前沒有已發送項目',
'No Sent Shipments': '沒有已發送貨物',
'No Settings currently defined': '目前沒有定義設置',
'No Shelter Services currently registered': '目前沒有註冊住房服務',
'No Shelter Types currently registered': '沒有Shelter類型目前登錄',
'No Shelters currently registered': '目前沒有註冊住房',
'No Shipment Transit Logs currently registered': '沒有出貨日誌傳送目前已登錄',
'No Shipment/Way Bills currently registered': '沒有出貨/方式賬單目前已登錄',
'No Shipment<>Item Relation currently registered': '沒有Shipment<>Item關系目前登錄',
'No Sites currently registered': '沒有站點目前登錄',
'No Skill Types currently set': '技能沒有類型目前設定',
'No Solutions currently defined': '沒有解決方案目前定義',
'No Sources currently registered': '沒有來源目前登錄',
'No Staff Types currently registered': '沒有人員類型目前登錄',
'No Staff currently registered': '沒有人員目前已登錄',
'No Storage Bin Type currently registered': '沒有儲存體bin目前登錄類型',
'No Storage Bins currently registered': '沒有儲存目前登錄紙匣',
'No Storage Locations currently registered': '沒有儲存體位置目前登錄',
'No Subscription available': '沒有可用的訂閱',
'No Subsectors currently registered': '目前沒有已登錄界別分組',
'No Support Requests currently registered': '不支援要求目前已登錄',
'No Survey Answers currently entered.': '目前沒有已輸入調查答案。',
'No Survey Answers currently registered': '沒有意見調查答案目前登錄',
'No Survey Questions currently registered': '沒有調查問題目前登錄',
'No Survey Sections currently registered': '沒有調查區段目前登錄',
'No Survey Series currently registered': '沒有調查系列目前登錄',
'No Survey Template currently registered': '沒有調查範本目前已登錄',
'No Sync': '沒有同步',
'No Tasks currently registered in this event': '在此事件中,無任何登錄的任務',
'No Tasks currently registered in this scenario': '在此情境中,無任何登錄的任務',
'No Tasks with Location Data': '沒有作業位置資料',
'No Teams currently registered': '目前沒有已登錄團隊',
'No Themes currently defined': '沒有主題目前定義',
'No Tickets currently registered': '沒有單目前登錄',
'No Tracks currently available': '沒有追蹤目前可用',
'No Units currently registered': '單元沒有目前登錄',
'No Updates currently registered': '沒有更新目前已登錄',
'No Users currently registered': '沒有使用者目前已登錄',
'No Volunteers currently registered': '沒有目前志願者註冊',
'No Warehouses currently registered': '沒有目前登錄倉庫',
'No access at all': '沒有存取在所有',
'No access to this record!': '無法存取這個記錄!',
'No action recommended': '沒有建議的動作',
'No conflicts logged': '沒有衝突登入',
'No contact information available': '沒有可用的聯絡資訊',
'No contacts currently registered': '沒有聯絡人目前登錄',
'No data in this table - cannot create PDF!': '沒有此表格中的資料-無法建立PDF!',
'No databases in this application': '這個應用程式中任何資料庫',
'No dead body reports available': '沒有傳送主體可用報告',
'No entries found': '找不到項目',
'No entries matching the query': '沒有符合查詢的項目',
'No entry available': '沒有可用的項目',
'No import jobs': '沒有匯入工作',
'No linked records': '沒有鏈結記錄',
'No location known for this person': '任何位置的識別此人員',
'No location known of this person.': '位置沒有已知的這位人員。',
'No locations found for members of this team': '任何位置找到的此小組的成員',
'No locations registered at this level': '任何位置登錄在這個層次',
'No log entries matching the query': '沒有符合查詢的日誌項目',
'No matching records found.': '沒有找到相符的記錄。',
'No messages in the system': '系統中沒有訊息',
'No notes available': '沒有可用的筆記',
'No of Families Settled in the Schools': '沒有的系列已在學校',
'No of Families to whom Food Items are Available': '沒有的系列對象食品可用的項目',
'No of Families to whom Hygiene is Available': '沒有的系列對象Hygiene是可用的',
'No of Families to whom Non-Food Items are Available': '沒有的系列對象的非食品可用的項目',
'No of Female Students (Primary To Higher Secondary) in the Total Affectees': '沒有的女性學員(主要較次要)總數中Affectees',
'No of Female Teachers & Other Govt Servants in the Total Affectees': '沒有的女性教師和其他政府服務者總數中Affectees',
'No of Male Students (Primary To Higher Secondary) in the Total Affectees': '沒有的男性學員(主要較次要)總數中Affectees',
'No of Male Teachers & Other Govt Servants in the Total Affectees': '沒有的男性教師和其他政府服務者總數中Affectees',
'No of Rooms Occupied By Flood Affectees': '沒有的檔案室被水災Affectees',
'No peers currently registered': '沒有同層級目前登錄',
'No pending registrations found': '沒有擱置的登錄找到',
'No pending registrations matching the query': '沒有擱置符合查詢的登錄',
'No person record found for current user.': '沒有人員記錄找到的現行使用者。',
'No positions currently registered': '沒有位置目前登錄',
'No problem group defined yet': '沒有問題群組尚未定義',
'No records matching the query': '沒有符合查詢的記錄',
'No records to delete': '沒有要刪除的記錄',
'No recovery reports available': '回復沒有可用的報告',
'No report available.': '沒有可用的報告。',
'No reports available.': '沒有可用的報告。',
'No reports currently available': '目前沒有可用的報告',
'No requests currently registered': '沒有要求目前已登錄',
'No requests found': '找不到要求',
'No resources currently registered': '沒有資源目前已登錄',
'No resources currently reported': '沒有資源目前報告',
'No service profile available': '沒有可用的服務配置',
'No skills currently set': '任何技術目前設定',
'No staff or volunteers currently registered': '目前沒有已登錄人員或志願者',
'No status information available': '沒有可用的狀態資訊',
'No sync permitted!': '不允許同步!',
'No synchronization': '沒有同步化',
'No tasks currently assigned': '沒有任務被指派',
'No tasks currently registered': '沒有作業目前已登錄',
'No template found!': '沒有找到範本!',
'No units currently registered': '單元沒有目前登錄',
'No volunteer availability registered': '沒有已登錄志願者可用性',
'No volunteer information registered': '沒有主動資訊登錄',
'No': '無影響',
'Non-medical Staff': '非醫療工作人員',
'Non-structural Hazards': '非結構危害',
'None (no such record)': '無(無記錄)',
'None': '刪除',
'Normal food sources disrupted': '正常食品來源中斷',
'Normal': '正常',
'Not Applicable': '不適用',
'Not Authorised!': '未獲授權!',
'Not Possible': '不可能',
'Not Set': '未設定',
'Not Authorized': '未獲授權',
'Not installed or incorrectly configured.': '未安裝或配置不正確。',
'Not supported': '不支援',
'Not yet a Member of any Group': '沒有資格目前登錄',
'Note Details': '附註詳細資料',
'Note Status': '附註狀態',
'Note Type': '附註類型',
'Note added': '已新增附註',
'Note deleted': '刪除附註',
'Note that this list only shows active volunteers. To see all people registered in the system, do a search from the home screen instead': '注意這份清單只會顯示作用中的參與者。 若要查看所有人註冊系統中,搜尋"首頁"螢幕中的而不是',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': '注意這份清單只會顯示作用中的參與者。 若要查看所有人註冊系統中,搜尋從這個畫面而不是',
'Note updated': '附註更新',
'Note': '附註',
'Notes': '附註',
'Notice to Airmen': '注意要Airmen',
'Number of Columns': '直欄數',
'Number of Patients': '號碼的病人',
'Number of People Affected': '受影響的人員數',
'Number of People Deceased': '號碼的人員死亡',
'Number of People Injured': '號碼的人員受傷',
'Number of Rows': '橫列數',
'Number of Vehicles': '車輛數目',
'Number of additional beds of that type expected to become available in this unit within the next 24 hours.': 'beds許多其他的類型預期為變成可用在此單元中下一個24小時。',
'Number of alternative places for studying': '號碼的替代工作區的研究',
'Number of available/vacant beds of that type in this unit at the time of reporting.': '號碼的可用/ beds用的輸入這個單位時間的報告。',
'Number of deaths during the past 24 hours.': '號碼的deaths在過去24小時。',
'Number of discharged patients during the past 24 hours.': '號碼的放電病患過去24小時。',
'Number of doctors actively working': '號碼的醫師主動工作',
'Number of doctors': '號碼的醫生',
'Number of houses damaged, but usable': '號碼的安置損壞,但可用',
'Number of houses destroyed/uninhabitable': '號碼的安置損毀/uninhabitable',
'Number of in-patients at the time of reporting.': '號碼的病患在時間的報告。',
'Number of latrines': '號碼的latrines',
'Number of midwives actively working': '號碼的midwives主動工作',
'Number of newly admitted patients during the past 24 hours.': '號碼的新送入病患過去24小時。',
'Number of non-medical staff': '號碼的非醫療工作人員',
'Number of nurses actively working': '號碼的nurses主動工作',
'Number of nurses': '號碼的nurses',
'Number of private schools': '號碼的專用學校',
'Number of public schools': '號碼的公用學校',
'Number of religious schools': '場所數學校',
'Number of residential units not habitable': '號碼的住宅單位不habitable',
'Number of residential units': '號碼的住宅單位',
'Number of schools damaged but usable': '號碼的學校損壞但可用',
'Number of schools destroyed/uninhabitable': '號碼的學校損毀/uninhabitable',
'Number of schools open before disaster': '號碼的學校先開啟災難',
'Number of schools open now': '號碼的學校開啟現在',
'Number of teachers affected by disaster': '號碼的教師影響災難',
'Number of teachers before disaster': '災前教師人數',
'Number of vacant/available beds in this hospital. Automatically updated from daily reports.': '這家醫院空置/可用病床數。 自動更新從每日報告。',
'Number of vacant/available units to which victims can be transported immediately.': '受害人可立即運送空置/可用單位數。',
'Number or Label on the identification tag this person is wearing (if any).': '聯絡人配戴的識別證編號或符號 (如果有).',
'Number or code used to mark the place of find, e.g. flag code, grid coordinates, site reference number or similar (if available)': '用以標記要搜尋的地方的號碼或代碼,如標誌代碼,網格坐標,場地參考號碼或類似(如果有)',
'Number': '號碼',
'Number/Percentage of affected population that is Female & Aged 0-5': '婦女和0-5歲數量 /佔受影響的人口百分比',
'Number/Percentage of affected population that is Female & Aged 13-17': '婦女和13至17歲數量 /佔受影響的人口百分比',
'Number/Percentage of affected population that is Female & Aged 18-25': '號碼/百分比的受影響的移入的女性值(18到25',
'Number/Percentage of affected population that is Female & Aged 26-60': '號碼/百分比的受影響的移入的女性值(26到60',
'Number/Percentage of affected population that is Female & Aged 6-12': '號碼/百分比的受影響的移入的女性值(六到12',
'Number/Percentage of affected population that is Female & Aged 61+': '號碼/百分比的受影響的移入的女性值(61+',
'Number/Percentage of affected population that is Male & Aged 0-5': '號碼/百分比的受影響的移入的男性值(〇到五',
'Number/Percentage of affected population that is Male & Aged 13-17': '號碼/百分比的受影響的移入的男性值(13到17',
'Number/Percentage of affected population that is Male & Aged 18-25': '號碼/百分比的受影響的移入的男性值(18到25',
'Number/Percentage of affected population that is Male & Aged 26-60': '受影響人口中男性26到60歲的人數/百分比',
'Number/Percentage of affected population that is Male & Aged 6-12': '受影響人口中男性6到12歲的人數/百分比',
'Number/Percentage of affected population that is Male & Aged 61+': '受影響人口中男性61歲以上的人數/百分比',
'Numbers Only': '只能填數字',
'Nurse': '護士',
'Nursing Information Manager': '看護資訊管理程式',
'Nutrition problems': '營養問題',
'Nutrition': '營養',
'OK': '確定',
'OR Reason': '或原因',
'OR Status Reason': '或狀態原因',
'OR Status': '或狀態',
'Observer': '觀察程式 (observer)',
'Obsolete': '已作廢',
'Office Address': '辦公室地址',
'Office Details': '辦公室詳細資料',
'Office Phone': '辦公室電話',
'Office added': '辦公室新增',
'Office deleted': '辦公室刪除',
'Office updated': '辦公室更新',
'Office': '辦公室',
'Offices & Warehouses': '辦公室與倉庫',
'Offices': '辦公室',
'Offline Sync (from USB/File Backup)': '離線同步(從USB/檔案備份)',
'Offline Sync': '離線同步',
'Old': '舊',
'Older people as primary caregivers of children': '舊的人員為主要caregivers的子項',
'Older people in care homes': '舊的人員在管理Home',
'Older people participating in coping activities': '舊的人員參與複製活動',
'Older people with chronical illnesses': '舊的人chronical疾病',
'Older person (>60 yrs)': '舊的人員(>60年期)',
'On by default? (only applicable to Overlays)': '依預設值嗎? (僅適用於覆蓋)',
'On by default?': '依預設值嗎?',
'On-site Hospitalization': '現場住院',
'One Time Cost': '單次成本',
'One time cost': '單次成本',
'One-time costs': '一-時間成本',
'One-time': '一次',
'Oops! Something went wrong...': '糟糕! 發生錯誤。',
'Oops! something went wrong on our side.': '糟糕! 發生錯誤的面。',
'Opacity (1 for opaque, 0 for fully-transparent)': '透明(一為透明,〇為完全透明)',
'Open Map': '開啟地圖',
'Open area': '開啟區域',
'Open in New Tab': '在新標籤中開啟',
'Open recent': '開啟最近檔',
'Open': '開啟',
'OpenID Login': 'OpenID 登入',
'OpenID authenticated successfully.': 'OpenID順利鑒別。',
'Operating Rooms': '作業房間',
'Operation': '作業',
'Optional link to an Incident which this Assessment was triggered by.': '選用鏈結到此事件評估所觸發。',
'Optional': '選用',
'Optional. If you wish to style the features based on values of an attribute, select the attribute to use here.': '選用。 如果您想要的樣式功能為基礎的值屬性,請選取屬性以使用在這裡。',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': '選用。 在GeoServer,這是工作區名稱空間URI (名稱不!)。在WFS getCapabilities,這是FeatureType名稱部分之前,冒號(:)。',
'Optional. In GeoServer, this is the Workspace Namespace URI. Within the WFS getCapabilities, this is the FeatureType Name part before the colon(:).': '選用。 在GeoServer,這是工作區名稱空間URI。 在WFS getCapabilities,這是FeatureType名稱組件之前的冒號(:)。',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': '選用。 名稱元素的內容應該是一個URL的映射檔放入蹦現畫面。',
'Optional. The name of an element whose contents should be put into Popups.': '選用。 名稱元素的內容應該進入蹦現畫面。',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': '選用。 綱目的名稱。 在Geoserver這個格式為http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name。',
'Options': '選項',
'Organization Details': '組織明細',
'Organization Registry': '組織登錄',
'Organization added': '新增組織',
'Organization deleted': '刪除組織',
'Organization updated': '更新組織',
'Organization': '組織',
'Organizations': '組織',
'Origin of the separated children': '原始的分隔的子項',
'Origin': '源點',
'Other (describe)': '其他(說明)',
'Other (specify)': '其他(請說明)',
'Other Evidence': '其他證據',
'Other Faucet/Piped Water': '其他Faucet/管道式臨界值',
'Other Isolation': '其他隔離',
'Other Name': '其他名稱',
'Other activities of boys 13-17yrs before disaster': '其他活動的提升13 17yrs之前災難',
'Other activities of boys 13-17yrs': '其他活動的男女13-17yrs',
'Other activities of boys <12yrs before disaster': '其他活動的男孩<12yrs之前災難',
'Other activities of boys <12yrs': '其他活動的<12yrs男女',
'Other activities of girls 13-17yrs before disaster': '其他活動的女孩13 17yrs之前災難',
'Other activities of girls 13-17yrs': '其他活動的女孩13-17yrs',
'Other activities of girls<12yrs before disaster': '其他活動的girls<12yrs之前災難',
'Other activities of girls<12yrs': '其他活動的girls<12yrs',
'Other alternative infant nutrition in use': '其他使用中的嬰兒營養品',
'Other alternative places for study': '其他研究區',
'Other assistance needed': '其他需要的恊助',
'Other assistance, Rank': '其他協助,等級',
'Other current health problems, adults': '其他目前健康問題,成人',
'Other current health problems, children': '其他目前健康問題,小孩',
'Other events': '其他事件',
'Other factors affecting school attendance': '其他因素影響學校與會者',
'Other major expenses': '其他主要費用',
'Other non-food items': '其他非食品項目',
'Other recommendations': '其他建議',
'Other residential': '其他居住地',
'Other school assistance received': '其他協助學校接收',
'Other school assistance, details': '其他協助學校,詳細資料',
'Other school assistance, source': '其他協助學校,來源',
'Other settings can only be set by editing a file on the server': '其他設定才能設定來編輯一個伺服器上的檔案',
'Other side dishes in stock': '其他端餐盤庫存',
'Other types of water storage containers': '其他類型的臨界值儲存體儲存區',
'Other ways to obtain food': '其他方式來取得食品',
'Other': '其他',
'Outbound Mail settings are configured in models/000_config.py.': '外寄郵件設定中配置模型/000_config.. py。',
'Outbox': '寄件匣',
'Outgoing SMS Handler': 'SMS送出的處理常式',
'Outgoing SMS handler': 'SMS送出的處理常式',
'Overall Hazards': '整體危害',
'Overhead falling hazard': '額外落在危害',
'Overland Flow Flood': '歐弗蘭流程水災',
'Overlays': '重疊',
'Owned Resources': '擁有的資源',
'PF Number': 'PF號碼',
'PIN number': '密碼',
'PIN': '密碼',
'PL Women': 'PL/I婦女',
'Pack': '包',
'Packs': '套件',
'Pan Map: keep the left mouse button pressed and drag the map': '平移對映:請將左滑鼠按鈕並拖曳的對映',
'Parameters': '參數',
'Parent Office': '母項辦公室',
'Parent needs to be of the correct level': '母項必須是正確的層次',
'Parent needs to be set for locations of level': '母項需要設定的位置的層次',
'Parent needs to be set': '母項需要設定',
'Parent': '母項',
'Parents/Caregivers missing children': '母項/Caregivers遺漏子項',
'Partial Database Synchronization': '部分資料庫同步化',
'Partial': '局部',
'Participant': '參與者',
'Partner Details': '夥伴詳細資料',
'Partner added': '新增夥伴',
'Partner deleted': '已刪除夥伴',
'Partner updated': '已更新夥伴',
'Partners': '夥伴',
'Pashto': '普什圖文',
'Pass': '通過 (pass)',
'Passport': '護照',
'Password for authentication at the peer. Note that only HTTP Basic authentication is supported.': '鑒別的密碼在同層級。 注意,只支援HTTP基本鑒別。',
'Password': '密碼',
'Path': '路徑',
'Patients': '病患',
'Peer Details': '同層級詳細資料',
'Peer Registration Details': '同層級註冊詳細資料',
'Peer Registration Request': '同層級註冊申請',
'Peer Registration': '同層級註冊',
'Peer Type': '等式類型',
'Peer UID': '同層級UID',
'Peer added': '新增同層級',
'Peer deleted': '刪除同層級',
'Peer not allowed to push': '同層級不允許推送',
'Peer registration request added': '已加入之同層級註冊申請',
'Peer registration request deleted': '已刪除之同層級註冊申請',
'Peer registration request updated': '已更新之同層級註冊申請',
'Peer updated': '更新同層級',
'Peer': '對等',
'Peers': '同層級',
'Pending Requests': '擱置要求',
'Pending': '擱置中',
'People Needing Food': '人員需要食品',
'People Needing Shelter': '人員需要Shelter',
'People Needing Water': '人員需要水',
'People Trapped': '人員再遷就',
'People with chronical illnesses': '與人員chronical疾病',
'People': '個人',
'Performance Rating': '效能等級',
'Person 1': '人員 1',
'Person 1, Person 2 are the potentially duplicate records': '一人,人員二是潛在的重複記錄',
'Person 2': '人員 2',
'Person Data': '人員資料',
'Person De-duplicator': 'DE人員-duplicator',
'Person Details': '人員明細',
'Person Finder': '人員搜尋器',
'Person Registry': '人員登錄',
'Person added to Group': '群組成員已新增',
'Person added to Team': '群組成員已新增',
'Person added': '已新增人員',
'Person deleted': '人員刪除',
'Person details updated': '人員詳細資料更新',
'Person found': '找到人員',
'Person interviewed': '人員受訪',
'Person missing': '遺漏人員',
'Person reporting': '人員報告',
'Person who has actually seen the person/group.': '人員實際上就是人員/群組。',
'Person who is reporting about the presence.': '人員報告的參與。',
'Person who observed the presence (if different from reporter).': '觀察人員的狀態(如果不同報告)。',
'Person': '聯絡人',
'Person/Group': '人員/群組',
'Personal Data': '個人資料',
'Personal Effects Details': '個人效果詳細資料',
'Personal Effects': '個人效果',
'Personal Map': '個人對映',
'Personal Profile': '個人設定檔',
'Personal impact of disaster': '個人的影響災難',
'Persons in institutions': '人員在機構',
'Persons per Dwelling': '每個人員住宅',
'Persons with disability (mental)': '與人員殘障人士(內部)',
'Persons with disability (physical)': '與人員殘障人士(實體)',
'Persons': '人員',
'Phone 1': '電話一',
'Phone 2': '電話二',
'Phone': '電話',
'Phone/Business': '電話/商業',
'Phone/Emergency': '電話/緊急',
'Phone/Exchange (Switchboard)': '電話/交換(switchboard)',
'Phone/Exchange': '電話/交換',
'Photo Details': '照片詳細資料',
'Photo Taken?': '照片採取?',
'Photo added': '新增照片',
'Photo deleted': '刪除照片',
'Photo updated': '更新照片',
'Photo': '照片',
'Photograph': '照片',
'Photos': '照片',
'Physical Description': '實體說明',
'Physical Safety': '實體安全',
'Physical': '實體',
'Picture upload and finger print upload facility': '圖像上傳和指紋上載機能',
'Picture': '圖片',
'Place for solid waste disposal': '位置的實心廢棄物',
'Place of Recovery': '位置的回復',
'Place': '街 (Place)',
'Places for defecation': '工作區的defecation',
'Places the children have been sent to': '工作區的子項已傳送至',
'Planning': '規劃',
'Playing': '播放',
'Please correct all errors.': '請更正所有錯誤。',
'Please enter a First Name': '請輸入名字',
'Please enter a Google Key if you wish to use Google Layers': '如果你想使用穀歌層,請輸入穀歌的關鍵鑰',
'Please enter a Yahoo Key if you wish to use Yahoo Layers': '如果你想使用雅虎層,請輸入雅虎的關鍵鑰',
'Please enter a first name': '請輸入名字',
'Please enter a site OR a location': '請輸入一個站點或一個位置',
'Please enter a valid email address': '請輸入一個有效的電子郵件位址',
'Please enter the first few letters of the Person/Group for the autocomplete.': '請輸入的前幾個字母的人員/群組使用。',
'Please enter the recipient': '請輸入收件者',
'Please fill this!': '請填寫這個!',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened. If a ticket was issued then please provide the Ticket ID.': '請提供URL的頁面時,您正在參照說明的您的預期發生,和什麼實際發生。 如果一個單發出,則請提供的摘記卷ID。',
'Please provide the URL of the page you are referring to, a description of what you expected to happen & what actually happened.': '請提供URL的頁面時,您正在參照說明的您的預期發生,和什麼實際發生。',
'Please report here where you are:': '請報告此位置您:',
'Please select another level': '請選取另一個層次',
'Please select': '請選取',
'Please sign-up with your Cell Phone as this allows us to send you Text messages. Please include full Area code.': '請註冊您的行動電話,因為這可讓我們向您傳送的文字訊息。 請包括完整區域碼。',
'Please specify any problems and obstacles with the proper handling of the disease, in detail (in numbers, where appropriate). You may also add suggestions the situation could be improved.': '請指定任何問題與障礙的適當處理的疾病,詳細的(號碼,適用的話)。 您也可以新增建議的狀湟可能改善。',
'Please use this field to record any additional information, including a history of the record if it is updated.': '請使用這個欄位來記錄任何其他資訊,包括一個歷史記錄的如果已更新。',
'Please use this field to record any additional information, including any Special Needs.': '請使用這個欄位來記錄任何其他資訊,包括任何特殊需求。',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': '請使用這個欄位來記錄任何其他資訊,例如Ushahidi實例ID。 包含歷史記錄的如果已更新。',
'Pledge Aid to match these Requests': '抵押輔助以符合這些要求',
'Pledge Aid': '抵押輔助',
'Pledge Status': '抵押狀態',
'Pledge Support': '抵押支援',
'Pledge': '選擇權質押',
'Pledged': '抵押',
'Pledges': '抵押',
'Poisonous Gas': '有毒瓦斯',
'Police': '保單',
'Policy': '政策',
'Pollution and other environmental': '汙染和其他環境',
'Polygon reference of the rating unit': '多邊形參照的等級單元',
'Polygon': '多邊形',
'Poor': '差',
'Population Statistic Details': '移入統計資料明細',
'Population Statistic added': '人口統計資料新增',
'Population Statistic deleted': '人口統計資料刪除',
'Population Statistic updated': '人口統計資料更新',
'Population Statistics': '人口統計',
'Population and number of households': '人口與戶數',
'Population': '人口',
'Popup Fields': '蹦現欄位',
'Popup Label': '蹦現標籤',
'Porridge': '稀飯',
'Port Closure': '埠關閉',
'Port': '埠',
'Position Catalog': '位置型錄',
'Position Details': '位置詳細資料',
'Position added': '新增位置',
'Position deleted': '刪除位置',
'Position type': '位置類型',
'Position updated': '更新位置',
'Position': '位置',
'Positions': '職位',
'Postcode': 'postcode',
'Poultry restocking, Rank': 'Poultry簽有,等級',
'Pounds': '英鎊',
'Power Failure': '電源故障',
'Powered by Sahana': '採用Sahana',
'Pre-cast connections': '預先強制轉型連線',
'Preferred Name': '暱稱',
'Pregnant women': 'Pregnant婦女',
'Preliminary': '初步的',
'Presence Condition': '存在條件',
'Presence Log': '存在日誌',
'Presence': '存在',
'Previous View': '前一頁',
'Previous': '前一頁(P)',
'Primary Name': '主要名稱',
'Primary Occupancy': '主要佔用',
'Priority Level': '優先順序層次',
'Priority from 1 to 9. 1 is most preferred.': '優先順序從一到九。 一是最偏好。',
'Priority': '優先順序',
'Private': '專用',
'Problem Administration': '問題管理',
'Problem Details': '問題明細',
'Problem Group': '問題群組',
'Problem Title': '問題標題',
'Problem added': '新增問題',
'Problem connecting to twitter.com - please refresh': 'problem connecting to twitter.com-請重新整理',
'Problem deleted': '問題已刪除',
'Problem updated': '問題已更新',
'Problem': '問題',
'Problems': '問題',
'Procedure': '程式',
'Process Received Shipment': '程式接收到出貨',
'Process Shipment to Send': '出貨程式來傳送',
'Procurements': '採購',
'Product Description': '產品說明',
'Product Name': '產品名稱',
'Profile updated': '更新設定檔',
'Profile': '設定檔',
'Project Activities': '專案活動',
'Project Details': '專案詳細資料',
'Project Management': '專案管理',
'Project Status': '項目狀態',
'Project Tracking': '項目追蹤',
'Project added': '新增專案',
'Project deleted': '已刪除專案',
'Project has no Lat/Lon': '專案沒有LAT/長',
'Project updated': '項目更新',
'Project': '專案 (project)',
'Projection Details': '預測明細',
'Projection added': '新增投射',
'Projection deleted': '投射刪除',
'Projection updated': '預測更新',
'Projection': '投射',
'Projections': '預測',
'Projects': '項目',
'Property reference in the council system': '議會制度中的物業參考',
'Protected resource': '受保護的資源',
'Protection': '保護',
'Provide Metadata for your media files': '提供meta資料的媒體檔案',
'Provide a password': '提供一個密碼。',
'Provide an optional sketch of the entire building or damage points. Indicate damage points.': '提供一個可選的草稿的整個建築物或損壞點。 指出損壞點。',
'Province': '省',
'Proxy-server': 'Proxy伺服器',
'Psychiatrics/Adult': 'Psychiatrics/Adult.txt',
'Public Event': '公用事件',
'Public and private transportation': '公共和私有運輸',
'Public assembly': '公共組件',
'Public': '公用',
'Pull tickets from external feed': '拉出單從外部源',
'Punjabi': '旁遮普文',
'Purchase Date': '採購日期',
'Push tickets to external system': '推送tickets to外部系統',
'Put a choice in the box': '放置選擇框中',
'Pyroclastic Flow': 'Pyroclastic流程',
'Pyroclastic Surge': 'Pyroclastic突波',
'Python Serial module not available within the running Python - this needs installing to activate the Modem': '序列模組Python內無法使用執行中的Python-這需要安裝到啟動數據機',
'Python needs the ReportLab module installed for PDF export': 'ReportLab模組內無法使用執行中的Python-這需要安裝PDF輸出!',
'Quantity Committed': '確定數量',
'Quantity Fulfilled': '履行數量',
'Quantity in Transit': '在途數量',
'Quantity': '數量',
'Quarantine': '隔離',
'Queries': '查詢',
'Query Feature': '查詢功能',
'Query': '查詢 (query)',
'Queryable?': '查詢?',
'RC frame with masonry infill': 'rc masonry infill與框架',
'RECORD A': '記錄 A',
'RECORD B': '記錄B',
'RESPONSE': '回應',
'RPC Service URL': 'RPC服務URL',
'Race': '競爭',
'Radio Callsign': '電臺呼號',
'Radiological Hazard': 'Radiological危害',
'Railway Accident': '鐵路事故',
'Railway Hijacking': '鐵路強制存取',
'Rain Fall': '落在雨',
'Rapid Assessment Details': '快速評估詳細資料',
'Rapid Assessment added': '快速新增評量',
'Rapid Assessment deleted': '快速評估刪除',
'Rapid Assessment updated': '快速更新評量',
'Rapid Assessment': 'Rapid評量',
'Rapid Assessments & Flexible Impact Assessments': '快速評估彈性與影響評量',
'Rapid Assessments': 'Rapid評量',
'Rapid Close Lead': '快速關閉商機',
'Rapid Data Entry': '快速數據輸入',
'Rating Scale': '評分',
'Raw Database access': '原始資料庫存取',
'Read-Only': '唯讀',
'Read-only': '唯讀',
'Real World Arbitrary Units': '實際單位任意',
'Receive Items': '接收項目',
'Receive New Shipment': '接收新出貨',
'Receive Shipment': '接收貨物',
'Receive this shipment?': '接收此出貨?',
'Receive': '接收',
'Received By Person': '接收人',
'Received By': '接收',
'Received Item Details': '接收項目詳細資料',
'Received Item deleted': '接收項目刪除',
'Received Item updated': '接收更新項目',
'Received Shipment Details': '收到出貨詳細資料',
'Received Shipment canceled and items removed from Inventory': '接收貨物取消和項目從庫存移除',
'Received Shipment canceled': '接收貨物取消',
'Received Shipment updated': '接收貨物更新',
'Received Shipments': '收到出貨',
'Received': '已接收',
'Receiving and Sending Items': '接收和發送項目',
'Recipient': '收件者',
'Recipients': '收件人',
'Recommendations for Repair and Reconstruction or Demolition': '維修和重建或拆除的建議',
'Record %(id)s created': '記錄百分比 %(id)s 建立',
'Record %(id)s updated': '記錄百分比 %(id)s 更新',
'Record Details': '記錄詳細資料',
'Record ID': '記錄 ID',
'Record Saved': '儲存記錄',
'Record added': '已新增記錄',
'Record any restriction on use or entry': '記錄任何限制使用或項目',
'Record deleted': '刪除記錄',
'Record last updated': '記錄前次更新',
'Record not found!': '記錄未找到!',
'Record not found': '找不到記錄',
'Record updated': '更新記錄',
'Record': '記錄',
'Recording and Assigning Assets': '錄制及指派資產',
'Records': '記錄',
'Recovery Reports': '回復報告',
'Recovery Request added': '新增回復要求',
'Recovery Request deleted': '回復刪除要求',
'Recovery Request updated': '回復要求更新',
'Recovery Request': '回復要求',
'Recovery Requests': '回復要求',
'Recovery report added': '復原報告新增',
'Recovery report deleted': '復原報告刪除',
'Recovery report updated': '回復更新報告',
'Recovery': '回復',
'Recurring Cost': '循環成本',
'Recurring cost': '循環成本',
'Recurring costs': '循環成本',
'Recurring': '重複出現',
'Red Cross / Red Crescent': '紅色的叉號Crescent /紅',
'Red': '紅色',
'Reference Document': '參考文件',
'Refers to default syncronization policy adopted if data entry recieved from other machine is already present in your machine.': '參照預設同步化原則採用如果資料項目接收從其他機器中已存在您的機器。',
'Refresh Rate (seconds)': '更新頻率(秒)',
'Region Location': '區域位置',
'Region': '區域',
'Regional': '地區',
'Regions': '地區',
'Register Person into this Camp': '登錄人員到這個Camp',
'Register Person into this Shelter': '登錄人員到這個Shelter',
'Register Person': '登錄人員',
'Register them as a volunteer': '登錄它們作為一個主動',
'Register': '註冊',
'Registered People': '註冊的人員',
'Registered users can': '註冊使用者可以',
'Registering ad-hoc volunteers willing to contribute': '註冊特定參與者願意提供',
'Registration Details': '註冊詳細資料',
'Registration added': '新增登錄',
'Registration entry deleted': '刪除登錄項目',
'Registration is pending approval': '申請等候核准中',
'Registration is still pending approval from Approver (%s) - please wait until confirmation received.': '登錄仍在擱置核准從核准者 (%s) -請稍候直到收到確認。',
'Registration key': '登錄索引鍵',
'Registration successful': '登錄成功',
'Registration updated': '更新登錄',
'Registration': '登錄',
'Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '登錄追蹤所有的組織工作在災難區域。 它不只會擷取工作區所作用中,但也會擷取的相關資訊範圍的專案會提供每一個區域。',
'Rehabilitation/Long Term Care': '複健/長期照護',
'Reinforced masonry': 'masonry強化',
'Rejected': '已拒絕',
'Reliable access to sanitation/hygiene items': '可靠地存取設施/hygiene項目',
'Relief Item Catalog': '浮雕項目型錄',
'Relief Item Details': '浮雕項目詳細資料',
'Relief Item': '浮雕項目',
'Relief Items stored in Inventories in different locations': '項目是儲存在位於不同地點的庫存',
'Relief Items': '浮雕項目',
'Relief Team': '救難隊',
'Relief': '浮雕',
'Religion': '宗教',
'Religious Leader': '宗教領導者',
'Religious': '教歷',
'Relocate as instructed in the <instruction>': '重新定位為中的指示<instruction>',
'Remove Activity from this event': '從這個事件中移除活動',
'Remove Asset from this event': '移除資產從這個事件',
'Remove Asset from this scenario': '移除資產從這個實務',
'Remove Document from this request': '從這個需求中移除文件',
'Remove Facility from this event': '移除機能從這個事件',
'Remove Facility from this scenario': '移除機能從這個實務',
'Remove Feature: Select the feature you wish to remove & press the delete key': '移除特性:選取的功能時,您要移除按下"刪除"鍵',
'Remove Human Resource from this event': '移除人力資源從這個事件',
'Remove Human Resource from this scenario': '移除人力資源從這個實務',
'Remove Item from Inventory': '從庫存移除項目',
'Remove Map Profile from this event': '移除對映配置從這個事件',
'Remove Map Profile from this scenario': '移除對映配置從這個實務',
'Remove Person from Group': '刪除組員',
'Remove Person from Team': '刪除組員',
'Remove Skill from Request': '從需求中移除技能',
'Remove Skill': '移除技能',
'Remove Task from this event': '從事件中移除任務',
'Remove Task from this scenario': '從情境中移除此任務',
'Remove this asset from this event': '移除這個資產從這個事件',
'Remove this asset from this scenario': '移除這個資產從這個實務',
'Remove this facility from this event': '從這個活動中移除此設備',
'Remove this facility from this scenario': '從這個情境中移除此設備',
'Remove this human resource from this event': '從這個活動中移除此人力資源',
'Remove this human resource from this scenario': '從這個情境中移除此人力資源',
'Remove this task from this event': '從這個活動中移除此任務',
'Remove this task from this scenario': '從這個情境中移除此任務',
'Remove': '移除',
'Removed from Group': '組員已刪除',
'Removed from Team': '組員已刪除',
'Repair': '修復',
'Repaired': '修復',
'Repeat your password': '重複您的密碼',
'Replace All': '全部取代',
'Replace if Master': '如果主要取代',
'Replace if Newer': '若較新,則取代',
'Replace with Remote': '以遠端取代',
'Replace': '取代',
'Replace/Master': '取代/主要',
'Replace/Newer': '取代/更新',
'Report Another Assessment...': '報告另一個評估...',
'Report Details': '報告詳細資料',
'Report Resource': '報告資源',
'Report Type': '報告類型',
'Report Types Include': '報告類型包括',
'Report a Problem with the Software': '回報軟體問題',
'Report added': '已新增報告',
'Report deleted': '已刪除報告',
'Report my location': '報告我的位置',
'Report that person missing': '報告的人員遺漏',
'Report the contributing factors for the current EMS status.': '報告的附加因素的現行EMS狀態。',
'Report the contributing factors for the current OR status.': '報告的附加因素的現行或狀態。',
'Report the person as found': '報告的人員發現',
'Report them as found': '它們報告發現',
'Report them missing': '它們報告遺漏',
'Report updated': '報告已更新',
'Report': '報告',
'Reported By': '報告者',
'Reporter Name': '報告名稱',
'Reporter': '報告者',
'Reporter:': '報告:',
'Reporting on the projects in the region': '報告中的專案區域',
'Reports': '報告',
'Request Added': '新增要求',
'Request Aid': '輔助請求',
'Request Canceled': '已取消申請',
'Request Detail': '要求詳細資料',
'Request Details': '要求的詳細資料',
'Request From': '要求來源',
'Request Item Details': '要求項目詳細資料',
'Request Item added': '要求新增項目',
'Request Item deleted': '要求刪除項目',
'Request Item from Available Inventory': '要求項目從可用庫存',
'Request Item updated': '要求更新項目',
'Request Item': '申請項目',
'Request Items': '申請項目',
'Request Status': '要求狀態',
'Request Type': '要求類型',
'Request Updated': '要求已更新項目',
'Request added': '新增要求',
'Request deleted': '已刪除要求',
'Request for Role Upgrade': '請求的角色升級',
'Request updated': '要求已更新項目',
'Request': '要求',
'Request, Response & Session': '要求,回應及階段作業',
'Requested By Facility': '所要求的機能',
'Requested By Site': '所要求的網站',
'Requested By Warehouse': '所要求的倉儲',
'Requested By': '申請者',
'Requested From': '要求從',
'Requested Items': '所要求的項目',
'Requested Skill Details': '所需技能細節',
'Requested Skill updated': '所需技能更新',
'Requested Skill': '所需技能',
'Requested Skills': '所需技能',
'Requested by': '要求者',
'Requested on': '要求上',
'Requested': '已要求',
'Requester': '要求者',
'Requestor': '要求者',
'Requests Management': '要求管理',
'Requests for Item': '要求的項目',
'Requests': '需求',
'Required Skill': '所需技能',
'Required by other servers.': '所需的其他伺服器。',
'Requires Login!': '需要登入!',
'Requires login': '需要登入',
'Rescue and recovery': '應急與恢復系統 (Rescue and Recovery)',
'Reset Password': '重設密碼',
'Reset form': '重設表單',
'Reset': '重設',
'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': '調整功能:選擇您希望調整的功能,然後拖動相關的點到你想要的大小',
'Resolve Conflict': '解決衝突',
'Resolve link brings up a new screen which helps to resolve these duplicate records and update the database.': '解決鏈接啟動一個新的畫面,有助於解決這些重複記錄和更新數據庫',
'Resolve': '解決',
'Resource Details': '資源詳細資料',
'Resource added': '新增資源',
'Resource deleted': '資源已刪除',
'Resource updated': '資源已更新',
'Resource': '資源',
'Resources': '資源',
'Respiratory Infections': '呼吸感染',
'Response Details': '回應明細',
'Response added': '新增回應',
'Response deleted': '刪除回應',
'Response updated': '回應已更新',
'Response': '回應',
'Responses': '回應',
'Restricted Access': '受限存取權',
'Restricted Use': '使用限制',
'Restrictions': '限制',
'Results': '結果',
'Resume Sync': '恢復同步',
'Retail Crime': '零售犯罪',
'Retrieve Password': '擷取密碼',
'Return to Request': '回到要求',
'Return': '返回',
'Returned From': '傳回從',
'Returned': '已返回',
'Review Incoming Shipment to Receive': '檢閱送入出貨以接收',
'Rice': '每',
'Right now, your system is set default synchronization scheme. You are currently able to synchronize your server with other servers.': '現在,您的系統是設定預設配置同步化。 您目前無法同步化您的伺服器與其他伺服器。',
'Right-hand headline': '右手標題',
'Right-to-Left': '由右至左',
'Riot': '暴動',
'River Details': '金水河詳細資料',
'River added': '金水河新增',
'River deleted': '金水河刪除',
'River updated': '金水河更新',
'River': '金水河',
'Rivers': '河流',
'Road Accident': '道路事故',
'Road Closed': '道路關閉',
'Road Conditions': '道路條件',
'Road Delay': '道路延遲',
'Road Hijacking': '道路強制存取',
'Road Usage Condition': '道路使用條件',
'Role Details': '角色詳細資料',
'Role Required': '需要角色',
'Role Updated': '更新角色',
'Role added': '已新增角色',
'Role deleted': '已刪除角色',
'Role updated': '更新角色',
'Role': '角色',
'Role-based': '角色-基礎',
'Roles Permitted': '角色允許',
'Roles': '角色',
'Roof tile': '並排安設',
'Roofs, floors (vertical load)': '體型,地板(垂直載入)',
'Room Details': '教室詳細資料',
'Room added': '新增室',
'Room deleted': '教室已刪除',
'Room updated': '更新室',
'Rooms': '會議室',
'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': '旋轉功能:選取的功能時,您要旋轉和,然後拖曳相關的點旋轉為您所需的位置',
'Row Choices (One Per Line)': '列選項一(每行)',
'Rows in table': '表格中的橫列',
'Rows selected': '已選取的列數',
'Run Functional Tests': '執行功能測試',
'Run Interval': '執行間隔',
'Running Cost': '執行成本',
'Russian': '俄國人',
'SITUATION': '狀況',
'Safe environment for vulnerable groups': '安全環境的漏洞群組',
'Safety Assessment Form': '安全評量表單',
'Safety of children and women affected by disaster': '安全的子項和婦女影響災難',
'Safety of children and women affected by disaster?': '安全的子項和婦女影響災難?',
'Sahana Administrator': 'Sahana管理者',
'Sahana Blue': 'Sahana藍色',
'Sahana Community Chat': 'Sahana 社群聊天室',
'Sahana Eden <= Other sync (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=其他同步化(Sahana Agasti, Ushahidi,等等。 )',
'Sahana Eden <=> Other (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=>其他(Sahana Agasti, Ushahidi,等等。 )',
'Sahana Eden <=> Other sync (Sahana Agasti, Ushahidi, etc.)': 'Sahana Eden <=>其他同步化(Sahana Agasti, Ushahidi,等等。 )',
'Sahana Eden <=> Other': 'Sahana Eden <=>其他',
'Sahana Eden <=> Sahana Eden sync': 'Sahana Eden <=> Sahana Eden同步',
'Sahana Eden Disaster Management Platform': 'Sahana Eden災難管理平臺',
'Sahana Eden Humanitarian Management Platform': 'Sahana Eden Humanitarian管理平臺',
'Sahana Eden Website': 'Sahana Eden 網站',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management. The following modules are available': 'Sahana Eden 是一套救災管理網站系統,可協助救援單位進行災難管理的分工合作。 下列模組可用',
'Sahana Eden is a family of applications that provide solutions to coordination and collaboration for organisations working in disaster management.': 'Sahana Eden 是一套救災管理網站系統,可協助救援單位進行災難管理的分工合作。',
'Sahana FOSS Disaster Management System': 'Sahana FOSS災難管理系統',
'Sahana Green': 'Sahana綠色',
'Sahana Login Approval Pending': 'Sahana登入擱置核准',
'Sahana Steel': 'Sahana鋼',
'Sahana access granted': 'Sahana授予的存取權',
'Sahana has to hook to a network port other than port being used by website (normally port 80). If your firewall blocks this port you have change it to any other free port. For information on eligible ports, see': 'Sahana已連結至一個網路埠以外的埠正在使用網站(通常是埠80)。 如果您的防火牆區塊這個埠就將它變更為任何其他可用的埠。 資格的相關資訊連接埠,請參閱',
'Sahana: new request has been made. Please login to see if you can fulfil the request.': 'Sahana:新的請求已完成。 請登入以查看您是否可以滿足要求。',
'Salted Fish': '隨機fish',
'Salvage material usable from destroyed houses': '援救物料可從毀損安置',
'Salvage material usable from destroyed schools': '援救物料可從毀損學校',
'Sanitation problems': '消毒問題',
'Satellite Layer': '衛星層',
'Satellite Office': '衛星辦公室',
'Satellite': '衛星',
'Saturday': '星期六',
'Save any Changes in the one you wish to keep': '儲存任何變更在一個您要保留',
'Save': '儲存',
'Save: Default Lat, Lon & Zoom for the Viewport': '儲存:預設平面,縮放(完整的視角的',
'Saved.': '已儲存。',
'Saving...': '正在儲存...',
'Scale of Results': '小數位數的結果',
'Scanned File': '掃描檔案',
'Scenario Details': '實務詳細資料',
'Scenario added': '新增實務',
'Scenario deleted': '刪除情境',
'Scenario updated': '更新情境',
'Scenario': '實務',
'Scenarios': '實務',
'Schedule': '排程',
'Schema': '綱目',
'School Closure': '學校關閉',
'School Code': '學校代碼',
'School District Details': '學校特區詳細資料',
'School District added': '學校特區新增',
'School District deleted': '學校特區刪除',
'School District updated': '學校特區更新',
'School District': '學校特區',
'School Districts': '學校行政區',
'School Lockdown': '學校鎖定',
'School Report Details': '學校報告詳細資料',
'School Report added': '學校報告新增',
'School Report deleted': '學校報告刪除',
'School Report updated': '學校報告更新',
'School Reports will be moved to Shelter Registry as this is what they are. Rapid Assessments will be added here.': '學校將報告移動到Shelter登錄,因為這是它們。 快速評估將在此新增。',
'School Reports': '學校報告',
'School Teacher': '學校老師',
'School activities': '學校活動',
'School assistance received/expected': '學校協助/預期接收',
'School assistance': '學校協助',
'School attendance': '學校與會者',
'School destroyed': '學校損毀',
'School heavily damaged': '學校大量損壞',
'School tents received': '學校內容接收',
'School tents, source': '學校內容,來源',
'School used for other purpose': '學校用於其他用途',
'School': '學校',
'School/studying': '學校/研究',
'Schools': '學校',
'Search & List Bin Types': '搜尋清單bin類型',
'Search & List Bins': '搜尋貯存箱清單(&L)',
'Search & List Catalog': '搜尋型錄清單(&L)',
'Search & List Category': '搜尋種類清單(&L)',
'Search & List Items': '搜尋項目清單(&L)',
'Search & List Locations': '搜尋位置清單(&L)',
'Search & List Site': '搜尋網站清單(&L)',
'Search & List Sub-Category': '搜尋子類別清單',
'Search & List Unit': '搜尋和列舉單元',
'Search Activities': '搜尋活動',
'Search Activity Report': '搜尋活動報告',
'Search Addresses': '搜尋位址',
'Search Aid Requests': '搜尋輔助要求',
'Search Alternative Items': '搜尋替代項目',
'Search Assessment Summaries': '搜尋評量摘要',
'Search Assessments': '搜尋評量',
'Search Asset Assignments': '搜尋資產分派',
'Search Asset Log': '搜尋資產日誌',
'Search Assets': '搜尋資產',
'Search Baseline Type': '搜尋基準線類型',
'Search Baselines': '搜尋基準線',
'Search Brands': '搜尋品牌',
'Search Budgets': '搜尋預算',
'Search Bundles': '搜尋軟體組',
'Search Camp Services': 'Camp搜尋服務',
'Search Camp Types': 'Camp搜尋類型',
'Search Camps': '搜尋Camps',
'Search Catalog Items': '搜尋型錄項目',
'Search Catalogs': '搜尋型錄',
'Search Category<>Sub-Category<>Catalog Relation': '搜尋Category<>Sub-Category<>Catalog關系',
'Search Certificates': '搜尋憑證',
'Search Certifications': '認證搜尋',
'Search Checklists': '核對搜尋',
'Search Cluster Subsectors': '搜尋叢集Subsectors',
'Search Clusters': '搜尋叢集',
'Search Commitment Items': '搜尋項目承諾',
'Search Commitments': '搜尋Commitments',
'Search Competencies': '搜尋能力',
'Search Competency Ratings': '搜尋能力等級',
'Search Configs': '搜尋配置',
'Search Contact Information': '搜尋聯絡資訊',
'Search Contacts': '搜尋聯絡人',
'Search Course Certicates': '搜尋進程憑證',
'Search Courses': '搜尋課程',
'Search Credentials': '認證搜尋',
'Search Distribution Items': '搜尋項目分配',
'Search Distributions': '搜尋配送',
'Search Documents': '搜尋文件',
'Search Donors': '搜尋Donors',
'Search Entries': '搜尋項目',
'Search Events': '搜尋事件',
'Search Facilities': '搜尋機能',
'Search Feature Layers': '搜尋功能層',
'Search Flood Reports': '搜尋水災報告',
'Search Geonames': '搜尋GeoNames',
'Search Groups': '搜尋群組',
'Search Homes': '搜尋家庭',
'Search Hospitals': '搜尋醫院',
'Search Human Resources': '搜尋人力資源',
'Search Identity': '搜尋身分',
'Search Images': '搜尋影像',
'Search Impact Type': '搜尋影響類型',
'Search Impacts': '搜尋影響',
'Search Import Files': '搜尋匯入檔案',
'Search Incident Reports': '搜尋事件報告',
'Search Incidents': '搜尋事件',
'Search Inventory Items': '搜尋庫存項目',
'Search Inventory Stores': '儲存搜尋庫存',
'Search Inventory items': '搜尋庫存項目',
'Search Item Catalog Category(s)': '搜尋項目型錄分類',
'Search Item Catalog(s)': '搜尋項目目錄',
'Search Item Categories': '搜尋項目類別',
'Search Item Packs': '搜尋項目套件',
'Search Item Sub-Category(s)': '搜尋項目子類別(S)',
'Search Items': '搜尋項目',
'Search Job Roles': '搜尋工作角色',
'Search Keys': '搜尋關鍵字',
'Search Kits': '搜尋套件',
'Search Landmarks': '搜尋里程碑',
'Search Layers': '搜尋層',
'Search Level 1 Assessments': '搜尋層次一評量',
'Search Level 2 Assessments': '搜尋層次二評量',
'Search Level': '搜尋層級',
'Search Locations': '搜尋位置',
'Search Log Entry': '搜尋日誌項目',
'Search Map Profiles': '搜尋對映配置',
'Search Markers': '搜尋標記',
'Search Members': '搜尋成員',
'Search Membership': '搜尋成員資格',
'Search Memberships': '搜尋成員資格',
'Search Metadata': '搜尋meta資料',
'Search Missions': '搜尋任務',
'Search Need Type': '搜尋需要類型',
'Search Needs': '搜尋需求',
'Search Notes': '搜尋Notes',
'Search Offices': '搜尋辦公室',
'Search Organizations': '搜尋組織',
'Search Partners': '搜尋夥伴',
'Search Patients': '查詢病人',
'Search Peer': '搜尋同層級',
'Search Peers': '搜尋對等',
'Search Personal Effects': '搜尋個人效果',
'Search Persons': '搜尋人員',
'Search Photos': '搜尋照片',
'Search Population Statistics': '搜尋人口統計資料',
'Search Positions': '搜尋位置',
'Search Problems': '搜尋問題',
'Search Projections': '搜尋估算',
'Search Projects': '搜尋專案',
'Search Rapid Assessments': '快速搜尋評量',
'Search Received Items': '搜尋接收項目',
'Search Received Shipments': '搜尋收到出貨',
'Search Records': '搜尋記錄',
'Search Recovery Reports': '搜尋回復報告',
'Search Registations': '搜尋Registations',
'Search Registration Request': '搜尋登錄要求',
'Search Report': '搜尋報告',
'Search Reports': '搜尋報告',
'Search Request Items': '搜尋要求項目',
'Search Request': '搜尋要求',
'Search Requested Items': '搜尋所要求的項目',
'Search Requested Skills': '查詢要求技能',
'Search Requests': '搜尋需求',
'Search Resources': '搜尋資源',
'Search Responses': '搜尋回應',
'Search Rivers': '搜尋Rivers',
'Search Roles': '搜尋角色',
'Search Rooms': '搜尋檔案室',
'Search Scenarios': '搜尋實務',
'Search School Districts': '搜尋學校行政區',
'Search School Reports': '搜尋學校報告',
'Search Sections': '搜尋區段',
'Search Sectors': '搜尋煽形',
'Search Sent Items': '傳送搜尋項目',
'Search Sent Shipments': '傳送搜尋出貨',
'Search Service Profiles': '搜尋服務設定檔',
'Search Settings': '搜尋設定',
'Search Shelter Services': '搜尋Shelter服務',
'Search Shelter Types': '搜尋Shelter類型',
'Search Shelters': '搜尋Shelters',
'Search Shipment Transit Logs': '搜尋出貨傳輸日誌',
'Search Shipment/Way Bills': '出貨/搜尋方式清單',
'Search Shipment<>Item Relation': '搜尋Shipment<>Item關系',
'Search Site(s)': '搜尋(S)',
'Search Skill Equivalences': '搜尋技能同等',
'Search Skill Provisions': '搜尋技能條款',
'Search Skill Type': '搜尋技能類型',
'Search Skill Types': '搜尋技能類型',
'Search Skill': '搜尋技能',
'Search Skills': '搜尋技能',
'Search Solutions': '搜尋解決方案',
'Search Sources': '搜尋來源',
'Search Staff Types': '搜尋人員類型',
'Search Staff or Volunteer': '搜尋人員或主動參與者',
'Search Staff': '搜尋人員',
'Search Status': '搜尋狀態',
'Search Storage Bin Type(s)': '搜尋儲存bin類型(S)',
'Search Storage Bin(s)': '搜尋儲存BIN(S)',
'Search Storage Location(s)': '搜尋儲存位置(S)',
'Search Subscriptions': '搜尋訂閱',
'Search Subsectors': '搜尋Subsectors',
'Search Support Requests': '搜尋支援要求',
'Search Tasks': '搜尋作業',
'Search Teams': '搜尋團隊',
'Search Themes': '搜尋主題',
'Search Tickets': '搜尋摘記卷',
'Search Tracks': '搜尋追蹤',
'Search Trainings': '搜尋撰文',
'Search Twitter Tags': '搜尋Twitter標籤',
'Search Units': '搜尋單位',
'Search Updates': '搜尋更新',
'Search Users': '搜尋使用者',
'Search Vehicle Details': '查詢交通工具細節',
'Search Vehicles': '查詢交通工具',
'Search Volunteer Availability': '搜尋自願可用性',
'Search Volunteer Registrations': '搜尋自願登錄',
'Search Volunteers': '搜尋志願者',
'Search Warehouses': '搜尋倉庫',
'Search and Edit Group': '搜尋及編輯群組',
'Search and Edit Individual': '搜尋及編輯個別',
'Search by ID Tag': '搜尋依ID標籤',
'Search for Items': '搜尋項目',
'Search for Staff or Volunteers': '搜尋人員或志願者',
'Search for a Hospital': '搜尋一個醫院',
'Search for a Location by name, including local names.': '搜尋位置名稱,包括本端名稱。',
'Search for a Location': '搜尋位置',
'Search for a Person': '人員查詢',
'Search for a Project': '搜尋一個專案',
'Search for a Request': '搜尋要求',
'Search for a shipment by looking for text in any field.': '搜尋一個出貨尋找文字中的任何欄位元。',
'Search for a shipment received between these dates': '搜尋的貨物接收在這些日期之間',
'Search for a vehicle by text.': '以文字查詢交通工具.',
'Search for an Organization by name or acronym': '搜尋的組織名稱或縮寫',
'Search for an Organization by name or acronym.': '搜尋的組織名稱或縮寫。',
'Search for an asset by text.': '搜尋資產的文字。',
'Search for an item by category.': '搜尋項目類別。',
'Search for an item by Year of Manufacture.': '以製造日期查詢項目.',
'Search for an item by text.': '搜尋項目的文字。',
'Search for asset by country.': '搜尋資產的國家。',
'Search for office by country.': '搜尋的辦公室國家/地區。',
'Search for office by organization.': '搜尋的辦公室組織。',
'Search for office by text.': '搜尋的辦公室文字。',
'Search for warehouse by country.': '搜尋倉儲的國家/地區。',
'Search for warehouse by organization.': '搜尋倉儲的組織。',
'Search for warehouse by text.': '搜尋倉儲的文字。',
'Search here for a person record in order to:': '搜尋這裡的一個個人記錄中,以便:',
'Search messages': '搜尋訊息',
'Search': '搜尋',
'Searching for different groups and individuals': '搜尋不同的群組及個體',
'Secondary Server (Optional)': '次要伺服器(選用)',
'Seconds must be a number between 0 and 60': '秒必須是〇和60之間的數字',
'Seconds must be between 0 and 60': '秒必須在〇和60之間',
'Section Details': '區段詳細資料',
'Section added': '新增區段',
'Section deleted': '刪除區段',
'Section updated': '更新區段',
'Sections': '區段',
'Sector Details': '行業詳細資料',
'Sector added': '新增行業',
'Sector deleted': '刪除磁區',
'Sector updated': '行業更新',
'Sector': '區塊',
'Sector(s)': '磁區(S)',
'Sector(s):': '類別(秒):',
'Sectors': '磁區',
'Security Policy': '安全原則 (security policy)',
'Security Status': '安全狀態',
'Security problems': '安全問題',
'Security': '安全',
'See All Entries': '查看所有項目',
'See all': '請參閱全部',
'See unassigned recovery requests': '請參閱未回復要求',
'Seen': '看到',
'Select 2 potential locations from the dropdowns.': '選取二個可能位置的清單。',
'Select Items from the Request': '選取項目從要求',
'Select Items from this Inventory': '選取項目從這個資產',
'Select Language': '選取語言',
'Select Photos': '選取照片',
'Select a location': '選取位置',
'Select a question from the list': '從清單中選取一個問題',
'Select a range for the number of total beds': '選取一個範圍的總數beds',
'Select all that apply': '選取所有適用的',
'Select an Organization to see a list of offices': '選取組織才能見到一份清單的辦公室',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': '選取重疊的評估和活動相關的每一個需要識別填補空白區域。',
'Select the person assigned to this role for this project.': '選取人員指派給這個角色適用於這個專案。',
'Select the person associated with this scenario.': '選擇相關的聯絡人.',
'Select to show this configuration in the Regions menu.': '選取以顯示此配置中的功能表。',
'Selects whether to use a Modem, Tropo or other Gateway for sending out SMS': '選取是否要使用數據機, Tropo或其他的閘道傳送SMS',
'Selects whether to use the gateway or the Modem for sending out SMS': '選取是否要使用"閘道"或數據的傳送SMS',
'Self Registration': '自動登錄',
'Self-care': '自我管理',
'Self-registration': '自我登記',
'Send Alerts using Email &/or SMS': '透過電子郵件和/或簡訊發送通知',
'Send Commitment as Shipment': '傳送承諾為出貨',
'Send Items': '傳送項目',
'Send Mail': '傳送郵件',
'Send Message': '傳送訊息',
'Send New Shipment': '傳送新出貨',
'Send Notification': '傳送通知',
'Send Shipment': '傳送出貨',
'Send a message to this person': '傳送訊息給這個人',
'Send a message to this team': '傳送訊息至這個團隊',
'Send from %s': '傳送從%s',
'Send message': '傳送訊息',
'Send new message': '傳送新訊息',
'Send': '傳送',
'Sends & Receives Alerts via Email & SMS': '透過電子郵件和簡訊收發通知',
'Senior (50+)': '年長者 (65+)',
'Sensitivity': '靈敏度',
'Sent By Person': '傳送人員',
'Sent By': '寄件者',
'Sent Item Details': '傳送項目詳細資料',
'Sent Item deleted': '傳送項目刪除',
'Sent Item updated': '傳送更新項目',
'Sent Shipment Details': '傳送出貨詳細資料',
'Sent Shipment canceled and items returned to Inventory': '傳送出貨取消,退回至庫存項目',
'Sent Shipment canceled': '傳送出貨取消',
'Sent Shipment updated': '傳送更新出貨',
'Sent Shipments': '傳送出貨',
'Sent': '已送出',
'Separate latrines for women and men': '個別latrines的男人或婦女,老人',
'Separated children, caregiving arrangements': '區隔子項, caregiving協議',
'Serial Number': '序號',
'Series': '系列',
'Server': '伺服器 (server)',
'Service Catalogue': '服務型錄',
'Service or Facility': '服務或機能',
'Service profile added': '新增服務設定檔',
'Service profile deleted': '服務設定檔刪除',
'Service profile updated': '服務設定檔更新',
'Service': '服務程式',
'Services Available': '服務可用',
'Services': '服務',
'Set Base Site': '設定基本網站',
'Set By': '設定者',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': '設為True,可允許編輯這個層次的位置階層的使用者不MapAdmins。',
'Setting Details': '設定明細',
'Setting added': '新增設定',
'Setting deleted': '設定已經刪除',
'Setting updated': '更新設定',
'Settings updated': '已更新設定',
'Settings were reset because authenticating with Twitter failed': '設定已重設,因為鑒別Twitter失敗',
'Settings which can be configured through the web interface are available here.': '設定可配置透過Web介面可用在這裡。',
'Settings': '設定',
'Severe': 'severe',
'Severity': '嚴重性',
'Share a common Marker (unless over-ridden at the Feature level)': '共用一個共同的標記(除非進行置換在特性層次)',
'Shelter & Essential NFIs': 'Shelter &重要NFIs',
'Shelter Details': 'Shelter詳細資料',
'Shelter Name': 'Shelter名稱',
'Shelter Registry': '庇護所登錄',
'Shelter Service Details': 'Shelter服務詳細資料',
'Shelter Service added': 'Shelter服務新增',
'Shelter Service deleted': 'Shelter服務刪除',
'Shelter Service updated': 'Shelter服務更新',
'Shelter Service': 'Shelter服務',
'Shelter Services': 'Shelter服務',
'Shelter Type Details': 'Shelter類型詳細資料',
'Shelter Type added': 'Shelter新增類型',
'Shelter Type deleted': 'Shelter刪除類型',
'Shelter Type updated': 'Shelter更新類型',
'Shelter Type': 'Shelter類型',
'Shelter Types and Services': 'Shelter類型和服務',
'Shelter Types': 'Shelter類型',
'Shelter added': 'Shelter新增',
'Shelter deleted': 'Shelter刪除',
'Shelter updated': 'Shelter更新',
'Shelter': '庇護所',
'Shelter/NFI Assistance': 'Shelter/NFI協助',
'Shelter/NFI assistance received/expected': 'Shelter/NFI協助/預期接收',
'Shelters': '庇護所',
'Shipment Created': '出貨建立',
'Shipment Details': '貨運詳細資料',
'Shipment Items received by Inventory': '出貨項目到庫存',
'Shipment Items sent from Inventory': '出貨項目傳送從庫存',
'Shipment Items': '貨運項目',
'Shipment Transit Log Details': '出貨傳輸日誌詳細資料',
'Shipment Transit Log added': '出貨傳輸日誌添加',
'Shipment Transit Log deleted': '出貨傳輸日誌刪除',
'Shipment Transit Log updated': '出貨運送日誌更新',
'Shipment Transit Logs': '出貨運送日誌',
'Shipment to Send': '出貨以傳送',
'Shipment/Way Bill added': '已新增出貨/提單',
'Shipment/Way Bills Details': '出貨/提單詳細資料',
'Shipment/Way Bills deleted': '已刪除出貨/提單',
'Shipment/Way Bills updated': '已更新出貨/提單',
'Shipment/Way Bills': '出貨/提單',
'Shipment<>Item Relation added': '貨運<>新增項目關係',
'Shipment<>Item Relation deleted': '貨運<>刪除項目關係',
'Shipment<>Item Relation updated': '貨運<>更新項目關係',
'Shipment<>Item Relations Details': '貨運<>項目關係詳細資料',
'Shipment<>Item Relations': '貨運<>項目關係',
'Shipments To': '出貨至',
'Shipments': '貨物',
'Short Assessment': '短評量',
'Short Description': '簡要說明',
'Show Checklist': '顯示清單',
'Show Details': '顯示詳細資料',
'Show Map': '顯示地圖',
'Show Region in Menu?': '顯示區域功能?',
'Show on Map': '顯示在對映上',
'Show on map': '顯示在對映上',
'Sign in': '登入',
'Sign-in with OpenID:': '以 OpenID 登入:',
'Sign-up as a volunteer': '註冊為一個主動',
'Sign-up for Account': 'A}{\b\f4\fs20\\cf13帳戶',
'Sign-up succesful - you should hear from us soon!': '註冊成功-我們很快會與你聯絡!',
'Sindhi': '信德文',
'Single PDF File': '單一 PDF 檔案',
'Site Address': '月臺位址',
'Site Administration': '網站管理',
'Site Description': '場所說明',
'Site Details': '網站詳細資料',
'Site ID': '網站 ID',
'Site Location Description': '網站位置說明',
'Site Location Name': '月臺位置名稱',
'Site Manager': '網站管理',
'Site Name': '網站名稱',
'Site added': '新增網站',
'Site deleted': '刪除站點',
'Site updated': '更新站點',
'Site': '網站',
'Site/Warehouse': '網站/倉儲',
'Sites': '場所',
'Situation Awareness & Geospatial Analysis': '狀湟狀態& Geospatial分析',
'Situation Report': '報告狀湟',
'Situation': '狀況',
'Sketch': '概略圖',
'Skill Catalog': '技能型錄',
'Skill Details': '技能詳細資料',
'Skill Equivalence Details': '技能等值詳細資料',
'Skill Equivalence added': '技能新增等值',
'Skill Equivalence deleted': '技能刪除等值',
'Skill Equivalence updated': '技能等值更新',
'Skill Equivalence': '等值技能',
'Skill Equivalences': '同等技能',
'Skill Provision Catalog': '技能供應型錄',
'Skill Provision Details': '技能供應詳細資料',
'Skill Provision added': '技能供應新增',
'Skill Provision deleted': '技能供應刪除',
'Skill Provision updated': '技能供應更新',
'Skill Provision': '供應技能',
'Skill Provisions': '技術條款',
'Skill Status': '技能狀態',
'Skill TYpe': '技術類型',
'Skill Type Catalog': '型錄技術類型',
'Skill Type Details': '技術類型詳細資料',
'Skill Type added': '添加技術類型',
'Skill Type deleted': '刪除技術類型',
'Skill Type updated': '更新技術類型',
'Skill Type': '技術類型',
'Skill Types': '技能類型',
'Skill added to Request': '技能已新增至需求',
'Skill added': '添加技能',
'Skill deleted': '刪除技能',
'Skill removed from Request': '技能已從需求中移除',
'Skill removed': '技能已移除',
'Skill updated': '技術更新',
'Skill': '技能',
'Skills Catalog': '技能型錄',
'Skills Management': '技能管理',
'Skills': '技術',
'Slope failure, debris': '斜率失敗,碎屑',
'Small Trade': '小型交易',
'Smoke': '煙霧',
'Snapshot Report': 'Snapshot 報告',
'Snapshot': '快照',
'Snow Fall': '除雪落',
'Social': '社會',
'Soil bulging, liquefaction': '土壤膨脹, liquefaction',
'Solid waste': '實心廢棄',
'Solution Details': '解決方案明細',
'Solution Item': '解決方案項目',
'Solution added': '新增解決方案',
'Solution deleted': '刪除解決方案',
'Solution updated': '更新解決方案',
'Solution': '解決方案',
'Solutions': '解決方案',
'Some': '部分',
'Sorry - the server has a problem, please try again later.': '抱歉-伺服器發生問題,請稍後再試一次。',
'Sorry that location appears to be outside the area of the Parent.': '抱歉該位置可用區域之外的母項。',
'Sorry that location appears to be outside the area supported by this deployment.': '抱歉該位置可用請在區域外支援這個部署。',
'Sorry, I could not understand your request': '抱歉,我不瞭解您的請求',
'Sorry, only users with the MapAdmin role are allowed to create location groups.': '抱歉,只有在使用者與MapAdmin角色允許建立位置群組。',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': '抱歉,只有在使用者與MapAdmin角色允許編輯位置',
'Sorry, something went wrong.': '很抱歉,發生錯誤。',
'Sorry, that page is forbidden for some reason.': '抱歉,該頁面是禁止的某些原因。',
'Sorry, that service is temporary unavailable.': '抱歉,該服務暫時無法使用。',
'Sorry, there are no addresses to display': '抱歉,沒有位址來顯示',
'Source Details': '來源詳細資料',
'Source ID': '來源 ID',
'Source Time': '時間來源',
'Source Type': '來源類型',
'Source added': '新增來源',
'Source deleted': '刪除來源',
'Source of Information': '來源的資訊',
'Source updated': '更新來源',
'Source': '原始檔',
'Sources of income': '來源的收入',
'Sources': '來源',
'Space Debris': '空間碎片',
'Spanish': '西班牙文',
'Special Ice': '特殊ICE',
'Special Marine': '特殊MARINE',
'Special needs': '特殊需求',
'Specialized Hospital': '特殊化醫院',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': '特定區域(例如建置/室溫)的位置內的此人員/群組是出現。',
'Specific locations need to have a parent of level': '需要有一個特定位置的母項層次',
'Specify a descriptive title for the image.': '指定的敘述性標題的影像。',
'Specify the bed type of this unit.': '指定的平臺類型的裝置。',
'Specify the minimum sustainability in weeks or days.': '指定的最小永續性以週為單位"或"日"。',
'Specify the number of available sets': '指定數目的組可用',
'Specify the number of available units (adult doses)': '指定的數目可用單元(成年人劑量)',
'Specify the number of available units (litres) of Ringer-Lactate or equivalent solutions': '指定的數目可用單元(公升)的鳴鐘-Lactate或相等的解決方案',
'Specify the number of sets needed per 24h': '指定的數目集需要每小時',
'Specify the number of units (adult doses) needed per 24h': '指定的單位數目(成年人劑量)需要每小時',
'Specify the number of units (litres) of Ringer-Lactate or equivalent solutions needed per 24h': '指定的單位數目(公升)的鳴鐘-Lactate或相等的解決方案需要每小時',
'Spherical Mercator?': '描繪成球形Mercator?',
'Spreadsheet Importer': '匯入試算表',
'Spreadsheet uploaded': '上傳試算表',
'Staff & Volunteers': '人員和志願者',
'Staff 2': '員工二',
'Staff Details': '人員明細',
'Staff ID': '人員ID',
'Staff Member Details': '人員成員詳細資料',
'Staff Members': '人員成員',
'Staff Record': '人員記錄',
'Staff Type Details': '員工類型詳細資料',
'Staff Type added': '人員新增類型',
'Staff Type deleted': '人員刪除類型',
'Staff Type updated': '人員更新類型',
'Staff Types': '員工類型',
'Staff added': '新增人員',
'Staff and Volunteers': '人員和志願者',
'Staff deleted': '人員刪除',
'Staff member added': '新增人員',
'Staff present and caring for residents': '人員存在與維護的居民',
'Staff updated': '更新人員',
'Staff': '人員',
'Staff2': '員工 2',
'Staffing': '人員配置',
'Stairs': '階梯',
'Start Date': '開始日期',
'Start date': '開始日期 (start date)',
'Start of Period': '開始的期間',
'Start using your OpenID': '開始使用您的 OpenID',
'State': '省 (縣)',
'Stationery': '信箋',
'Status Report': '狀態報告',
'Status Update': '狀態更新',
'Status Updated': '狀態更新',
'Status added': '新增狀態',
'Status deleted': '刪除狀態',
'Status of clinical operation of the facility.': '狀態的臨床作業的機能。',
'Status of general operation of the facility.': '狀態的一般作業的機能。',
'Status of morgue capacity.': '狀態的morgue容量。',
'Status of operations of the emergency department of this hospital.': '狀態的作業的緊急部門的這個醫院。',
'Status of security procedures/access restrictions in the hospital.': '狀態的安全程式/存取限制在醫院。',
'Status of the operating rooms of this hospital.': '狀態的作業的會談室這個醫院。',
'Status updated': '狀態更新',
'Status': '狀態',
'Steel frame': '鋼框架',
'Stolen': '已遭竊',
'Storage Bin Details': '存儲Bin詳細資料',
'Storage Bin Number': '存儲Bin號碼',
'Storage Bin Type Details': '存儲Bin類型詳細資料',
'Storage Bin Type added': '存儲Bin新增類型',
'Storage Bin Type deleted': '存儲Bin類型刪除',
'Storage Bin Type updated': '存儲Bin更新類型',
'Storage Bin Type': 'Bin存儲類型',
'Storage Bin Types': '存儲Bin類型',
'Storage Bin added': '存儲Bin新增',
'Storage Bin deleted': '存儲Bin刪除',
'Storage Bin updated': '儲存更新bin',
'Storage Bin': '儲存體 Bin',
'Storage Bins': '存儲Bin',
'Storage Location Details': '儲存體位置詳細資料',
'Storage Location ID': '儲存體位置ID',
'Storage Location Name': '儲存體位置名稱',
'Storage Location added': '儲存體位置新增',
'Storage Location deleted': '儲存體位置刪除',
'Storage Location updated': '儲存體位置更新',
'Storage Location': '儲存體位置',
'Storage Locations': '儲存體位置',
'Store spreadsheets in the Eden database': '儲存試算表中的Eden資料庫',
'Storeys at and above ground level': 'Storeys在及以上接地層次',
'Storm Force Wind': '暴雨強制wind',
'Storm Surge': '暴雨突波',
'Street (continued)': '街道(續)',
'Street Address': '地址',
'Street': '街道',
'Strong Wind': 'strong wind',
'Structural Hazards': '結構性危害',
'Structural': '結構性',
'Style Field': '樣式欄位',
'Style Values': '樣式值',
'Sub Category': '子種類',
'Sub-type': '子類型',
'SubType': '子類型',
'Subject': '主旨',
'Submission successful - please wait': '提交成功-請稍候',
'Submission successful - please wait...': '提交成功-請稍候。',
'Submit New (full form)': '提交新的(完整形式)',
'Submit New (triage)': '提交新(分類)',
'Submit New': '提交新的',
'Submit a request for recovery': '提交要求的回復',
'Submit new Level 1 assessment (full form)': '提交新的層次一Assessment完整形式)',
'Submit new Level 1 assessment (triage)': '提交新層次一評量(分類)',
'Submit new Level 2 assessment': '提交新層次二評量',
'Submit': '確認送出',
'Submitting information about the individual such as identification numbers, physical appearance, last seen location, status, etc': '提交個人的相關資訊(例如識別號碼),實體外觀,前次看到的位置,狀態等',
'Subscription Details': '訂閱詳細資料',
'Subscription added': '已新增訂閱',
'Subscription deleted': '刪除訂閱',
'Subscription updated': '更新訂閱',
'Subscriptions': '訂閱',
'Subsector Details': 'Subsector詳細資料',
'Subsector added': '界別分組已新增',
'Subsector deleted': '界別分組已刪除',
'Subsector updated': 'Subsector更新',
'Subsector': '界別分組',
'Subsistence Cost': '補貼成本',
'Suburb': '西郊',
'Sufficient care/assistance for chronically ill': '足夠的管理/的幫助chronically不正確',
'Suggest not changing this field unless you know what you are doing.': '建議不變更這個欄位,除非您知道您要做。',
'Summary by Administration Level': '摘要(依管理層次',
'Summary': '摘要',
'Sunday': '星期日',
'Supervisor': '監督者',
'Supply Chain Management': '供應鏈管理',
'Support Request': '支援要求',
'Support Requests': '支援要求',
'Supports the decision making of large groups of Crisis Management Experts by helping the groups create ranked list.': '支援決策的大型群組的危機管理專家來幫助群組建立排序清單。',
'Sure you want to delete this object?': '確定要刪除嗎?',
'Survey Answer Details': '調查回答詳細資料',
'Survey Answer added': '調查回答新增',
'Survey Answer deleted': '調查回答刪除',
'Survey Answer updated': '調查回答更新',
'Survey Answer': '調查回答',
'Survey Module': '調查模組',
'Survey Name': '意見調查名稱',
'Survey Question Details': '調查問題詳細資料',
'Survey Question Display Name': '調查問題顯示名稱',
'Survey Question added': '調查問題添加',
'Survey Question deleted': '調查問題刪除',
'Survey Question updated': '調查問題更新',
'Survey Question': '調查問題',
'Survey Section Details': '調查區段詳細資料',
'Survey Section Display Name': '調查區段顯示名稱',
'Survey Section added': '調查區段新增',
'Survey Section deleted': '調查刪除區段',
'Survey Section updated': '調查區段更新',
'Survey Section': '意見調查區段',
'Survey Series Details': '調查系列詳細資料',
'Survey Series Name': '調查系列名稱',
'Survey Series added': '調查系列新增',
'Survey Series deleted': '調查系列刪除',
'Survey Series updated': '調查系列更新',
'Survey Series': '調查系列',
'Survey Template Details': '調查範本詳細資料',
'Survey Template added': '調查範本新增',
'Survey Template deleted': '調查刪除範本',
'Survey Template updated': '調查更新範本',
'Survey Template': '意見調查範本',
'Survey Templates': '意見調查範本',
'Switch this on to use individual CSS/Javascript files for diagnostics during development.': '這個交換器上使用個別CSS/JavaScript檔在開發期間進行診斷。',
'Symbology': '符號學',
'Sync Conflicts': '同步衝突',
'Sync History': '同步歷程',
'Sync Now': '立即同步',
'Sync Partners are instances or peers (SahanaEden, SahanaAgasti, Ushahidi, etc.) that you want to sync information with. Click on the link on the right to go the page where you can add sync partners, search for sync partners and modify them.': '同步夥伴的實例或同層級(SahanaEden, SahanaAgasti, Ushahidi,等等。 您要同步的資訊。 按一下鏈結,右邊的移至"頁面,您可以在其中新增同步夥伴,搜尋同步夥伴並對其進行修改。',
'Sync Partners': '同步夥伴',
'Sync Password': '同步密碼',
'Sync Policy': '同步原則',
'Sync Pools are groups of peers (SahanaEden & SahanaAgasti instances) willing to sync with each other. You can subscribe to different groups, define new groups and dicsover the existing ones. Click the link on the right to go to Sync Pools page.': '同步儲存區群組的對等(SahanaEden和SahanaAgasti實例)想要同步的每一個"其他"。 您可以訂閱不同的群組,定義新的群組及dicsover現有的範本。 上的鏈結,按一下滑鼠右鍵以跳至同步儲存區頁面。',
'Sync Pools': '同步儲存區',
'Sync Schedule': '同步排程',
'Sync Schedules': '同步排程',
'Sync Settings updated': '同步更新設定',
'Sync Settings': '同步設定',
'Sync Username': '同步使用者名稱',
'Sync process already started on': '同步程式已啟動',
'Synchronisation - Sync Now': '同步化-立即同步',
'Synchronisation History': '同步化歷程',
'Synchronisation': '同步化',
'Synchronization Conflicts': '同步化衝突',
'Synchronization Details': '同步化詳細資料',
'Synchronization History': '同步化歷程',
'Synchronization Peers': '同步化對等',
'Synchronization Settings': '同步化設定',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the automatic synchronization feature of SahanaEden': '可讓您同步化共用資料,您可以與其他更新您自己的資料庫與最新資料來自其他對等。 這個頁面提供您的相關資訊,請使用"自動同步處理功能的SahanaEden',
'Synchronization allows you to share data that you have with others and update your own database with latest data from other peers. This page provides you with information about how to use the synchronization features of Sahana Eden': '可讓您同步化共用資料,您可以與其他更新您自己的資料庫與最新資料來自其他對等。 這個頁面提供您的相關資訊,請使用"同步化的功能Sahana Eden',
'Synchronization not configured': '未配置同步化',
'Synchronization not configured.': '未配置同步化。',
'Synchronization settings updated': '同步化設定更新',
'Synchronization': '同步化',
'Syncronisation History': '同步歷程',
'Syncronisation Schedules': '同步排程',
'System allows the General Public to Report Incidents & have these Tracked.': '系統容許的一般公用報告事件和這些追蹤。',
'System allows the tracking & discovery of Items stored in Locations.': '可讓系統追蹤及探索項目中所儲存的位置。',
'System is a central online repository where all relief organizations, relief workers, government agents and camp sites for displaced personnel can coordinate the supply of aid with their demand. It allows users to allocate the available resources to fulfill the demands effectively and efficiently.': '系統是一個中央線上資料庫所有的組織,釋放工作,政府代理和camp站點,以取代人員可以將提供的輔助的需求。 它可讓使用者配置的可用資源以滿足需求效益與效率。',
'System keeps track of all Volunteers working in the disaster region. It captures not only the places where they are active, but also captures information on the range of services they are providing in each area.': '系統追蹤所有的志願者工作在災難區域。 它不只會擷取工作區所作用中,但也會擷取的相關資訊的服務範圍,它們提供在各個領域。',
'Table name': '表格名稱',
'Tags': '標籤',
'Take shelter in place or per <instruction>': '採取shelter處或每<instruction>',
'Task Details': '作業詳細資料',
'Task List': '作業清單',
'Task Status': '作業狀態',
'Task added': '新增作業',
'Task deleted': '作業已刪除',
'Task status': '作業狀態',
'Task updated': '作業已更新',
'Tasks': '作業',
'Team Description': '團隊說明',
'Team Details': '團隊詳細資料',
'Team Head': '團隊負責人',
'Team ID': '團隊 ID',
'Team Id': '團隊 ID',
'Team Leader': '團隊領導人',
'Team Member added': '新增團隊成員',
'Team Members': '團隊成員',
'Team Name': '團隊名稱',
'Team Type': '團隊類型',
'Team added': '新增團隊',
'Team deleted': '已刪除團隊',
'Team updated': '更新團隊',
'Team': '團隊',
'Teams': '團隊',
'Technical testing only, all recipients disregard': '技術僅測試,所有收件者不',
'Telecommunications': '電信',
'Telephone': '電話',
'Telephony': '電話系統',
'Temp folder %s not writable - unable to apply theme!': '暫存資料夾%無法寫入-無法套用佈景主題!',
'Template file %s not readable - unable to apply theme!': '範本檔案%無法讀取-無法套用佈景主題!',
'Templates': '範本',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': '術語的第5層內的國家管理部門(例如表決或郵遞區號分區)。 這個層次不常使用。',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': '術語的第4層內的國家管理部門(例如,村落,芳鄰"或Precinct)。',
'Term for the primary within-country administrative division (e.g. State or Province).': '術語的主要內的國家管理部門(例如州或省)。',
'Term for the secondary within-country administrative division (e.g. District or County).': '術語的次要內的國家管理部門(例如,地區或縣)。',
'Term for the third-level within-country administrative division (e.g. City or Town).': '術語的第3層內的國家管理部門(例如"城市"或"大街)。',
'Term for the top-level administrative division (i.e. Country).': '術語的最上層管理部門(例如國家)。',
'Territorial Authority': '完整省權限',
'Terrorism': 'terrorism',
'Tertiary Server (Optional)': '層伺服器(選用)',
'Test Results': '測試結果',
'Text Colour for Text blocks': '文字顏色的文字區塊',
'Text Direction': '文字方向',
'Text before each Text Field (One per line)': '文字之前每個文字欄位元(一每行)',
'Text in Message': '訊息中的文字',
'Text in Message:': '訊息文字:',
'Text': '文字',
'Thank you for validating your email. Your user account is still pending for approval by the system administator (%s).You will get a notification by email when your account is activated.': '感謝您驗證您的電子郵件。 您的使用者帳戶仍在擱置中的核準的系統管理者 (%s).,您將得到時,以電子郵件通知您的帳戶已被啟用。',
'Thanks for your assistance': '感謝您的恊助',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1 == db.table2.field2" results in a SQL JOIN.': "The query is a condition like db.table1.field1=='value'. Something like db.table1.field1 == db.table2.field2 results in a SQL JOIN.",
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': "The query is a condition like db.table1.field1=='value'. Something like db.table1.field1==db.table2.field2 results in a SQL JOIN.",
'The Area which this Site is located within.': '這個網站區域',
'The Assessments module allows field workers to send in assessments.': '評量模組容許現場工人傳送中評估。',
'The Author of this Document (optional)': '作者的這份檔(可選)',
'The Building Asssesments module allows building safety to be assessed, e.g. after an Earthquake.': '建置Asssesments模組容許建置安全要評估,例如,在一個諸如。',
'The Camp this Request is from': 'Camp在這個要求中',
'The Camp this person is checking into.': 'Camp在此人員檢查。',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '現行位置的人員/群組,它可以是一般(報告)或精確(用於顯示上一個)。 輸入幾個字元搜尋可用的位置。',
'The District for this Report.': '特區的這份報告。',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': '電子郵件位址所核准要求傳送(通常,這將是一個郵件,而不是個人)。 如果此欄位空白,則要求會自動核准網域是否相符。',
'The Group whose members can edit data in this record.': '的群組成員可以編輯此記錄中的數據。',
'The Incident Reporting System allows the General Public to Report Incidents & have these Tracked.': '事件報告系統可讓一般公用報告事件和這些追蹤。',
'The Location of this Site, which can be general (for Reporting) or precise (for displaying on a Map).': '位置的站點,它可以是一般的報告)或精確(用於顯示上一個)。',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '位置的人員有來自,它可以是一般的報告)或精確(用於顯示上一個)。 輸入幾個字元搜尋可用的位置。',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': '位置的人員進入,可為一般(報告)或精確(用於顯示上一個)。 輸入幾個字元搜尋可用的位置。',
'The Media Library provides a catalogue of digital media.': '媒體庫提供一個型錄的數位媒體。',
'The Messaging Module is the main communications hub of the Sahana system. It is used to send alerts and/or messages using SMS & Email to various groups and individuals before, during and after a disaster.': '傳訊模組是主要的通訊中心的Sahana系統。 它是用來傳送警示及/或訊息使用SMS和電子郵件給不同的群組及個人之前,期間和之後發生。',
'The Office this record is associated with.': '辦公室的此記錄的關聯。',
'The Organization Registry keeps track of all the relief organizations working in the area.': '組織登錄記錄的所有釋放組織工作的範圍內。',
'The Organization Registry keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '組織登錄記錄的所有釋放組織工作在災難區域。 它不只會擷取工作區所作用中,但也會擷取的相關資訊範圍的專案會提供每一個區域。',
'The Organization this record is associated with.': '組織此記錄的關聯。',
'The Organization which is funding this Activity.': '組織是資金此活動。',
'The Person currently filling this Role.': '人員目前填寫這個角色。',
'The Project Tracking module allows the creation of Activities to meet Gaps in Needs Assessments.': '專案追蹤模組可建立的活動,以滿足間隙需要評估。',
'The Rapid Assessments Module stores structured reports done by Professional Organizations.': '快速評估模組儲存結構化報告來完成專業組織。',
'The Request this record is associated with.': '要求此記錄的關聯。',
'The Role this person plays within this Office/Project.': '此人員的角色內扮演這個辦事處/專案。',
'The Role this person plays within this hospital.': '此人員的角色內扮演這個醫院。',
'The Role to which this Role reports.': '這個角色的角色的報告。',
'The Shelter Registry tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': 'Shelter登錄的shelters及儲存追蹤所有的基本相關資訊。 它與其他模組追蹤人員相關聯的shelter,可用的服務等等。',
'The Shelter this Request is from (optional).': '在Shelter這個要求(選用)。',
'The Shelter this Request is from': '在Shelter這個要求從',
'The Shelter this person is checking into.': 'Shelter此人員的檢查。',
'The Source this information came from.': '來源這個資訊來源。',
'The URL for the GetCapabilities of a WMS Service whose layers you want accessible via the Map.': '的URL GetCapabilities的WMS服務層您要存取透過對映。',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'URL的GetCapabilities頁面的一個網路對映服務(WMS)層您要使用透過瀏覽器"畫面的"對映。',
'The URL of your web gateway without the post parameters': 'Web閘道的URL不POST參數',
'The URL to access the service.': '的URL來存取服務。',
'The Unique Identifier (UUID) as assigned to this facility by the government.': '唯一ID (UUID)作為指派給這個機能的政府。',
'The area is': '區域是',
'The attribute which is used for the title of popups.': '屬性用於標題的蹦現畫面。',
'The attribute within the KML which is used for the title of popups.': '屬性在KML用於標題的蹦現畫面。',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': '屬性(S)在KML用於檢索的蹦現畫面。 (屬性)之間使用空格',
'The body height (crown to heel) in cm.': '主體高度(繼位者的傾斜)在CM中。',
'The category of the Item.': '種類的項目。',
'The contact person for this organization.': '聯絡人的此組織。',
'The country the person usually lives in.': '聯絡人日常居住的國家.',
'The default Organization for whom this person is acting.': '預設組織給誰此人員是處理。',
'The default Organization for whom you are acting.': '預設組織您正為其處理。',
'The default policy for data import from this peer.': '預設原則的資料從這個同層級。',
'The descriptive name of the peer.': '同層級的敘述名稱。',
'The duplicate record will be deleted': '重複的記錄會被刪除',
'The entered unit links to this unit. For e.g. if you are entering m for meter then choose kilometer(if it exists) and enter the value 0.001 as multiplicator.': '輸入的單元鏈結至此單位。 例如:如果您輸入的M計量,然後選擇公里(如果有的話),然後輸入值0.001作為multiplicator。',
'The first or only name of the person (mandatory).': '聯絡人的名字 (必填).',
'The following modules are available': '下列模組可用',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'URL的格式http://your/web/map/service?service=WMS&request=GetCapabilities的位置/web/對映/服務代表的URL路逕WMS。',
'The hospital this record is associated with.': '醫院此記錄的關聯。',
'The item is designated to be sent for specific project, population, village or other earmarking of the donation such as a Grant Code.': '項目是指定要傳送給特定專案", "人口",村落或其他earmarking的"捐獻"之類的授權代碼。',
'The language to use for notifications.': '使用的語言的通知。',
'The language you wish the site to be displayed in.': '語言,您希望將網站顯示於中。',
'The last known location of the missing person before disappearance.': '在最後一個已知位置遺漏的人,才disappearance。',
'The last known location of the missing person.': '在最後一個已知位置遺漏的人員。',
'The length is': '的長度是',
'The list of Brands are maintained by the Administrators.': '清單的品牌所維護的管理者。',
'The list of Catalogs are maintained by the Administrators.': '型錄清單所維護的管理者。',
'The list of Item categories are maintained by the Administrators.': '清單中的項目類別所維護的管理者。',
'The map will be displayed initially with this latitude at the center.': '地圖會顯示最初與此緯度在中心。',
'The map will be displayed initially with this longitude at the center.': '圖表會被最初顯示與此經度中心。',
'The minimum number of features to form a cluster.': '最小數目的功能,以形成叢集。',
'The name to be used when calling for or directly addressing the person (optional).': '致電或直呼聯絡人時所用的名字 (非必填).',
'The next screen will allow you to detail the number of people here & their needs.': '下一個畫面可讓你詳細記載這裡的人數以及他們的需要',
'The next screen will allow you to enter a detailed list of items and quantities, if appropriate...': '下一個畫面可讓您輸入的詳細清單項目和數量,如果適當的話,在。',
'The number of Units of Measure of the Alternative Items which is equal to One Unit of Measure of the Item': '測量單位的數量的替代項目等於一的測量單位的項目',
'The number of pixels apart that features need to be before they are clustered.': '圖元數目以外的功能需要之前形成叢集。',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': '數目並排的周圍顯示對映至下載。 〇,表示載入頁面第1更快速,數字越高,表示後續分割窗格會較快。',
'The person at the location who is reporting this incident (optional)': '人員的位置上使用報告此事件(可選)',
'The person reporting about the missing person.': '人員報告關於遺漏的人員。',
'The person reporting the missing person.': '人員報告遺漏的人員。',
'The post variable containing the phone number': 'POST變數含有的電話號碼',
'The post variable on the URL used for sending messages': 'POST變數的URL用於傳送訊息',
'The post variables other than the ones containing the message and the phone number': '變數POST以外的項目包含的訊息及電話號碼',
'The request this record is associated with.': '要求此記錄的關聯。',
'The scanned copy of this document.': '這份文件的掃描。',
'The serial port at which the modem is connected - /dev/ttyUSB0, etc on linux and com1, com2, etc on Windows': '序列埠的數據機巳連接到/ dev/ttyUSB0等在Linux和COM1, COM2,等等Windows',
'The server did not receive a timely response from another server that it was accessing to fill the request by the browser.': '伺服器未收到即時回應從另一個伺服器的存取,以填滿所要求的瀏覽器。',
'The server received an incorrect response from another server that it was accessing to fill the request by the browser.': '伺服器收到不正確的回應從另一個伺服器的存取,以填滿所要求的瀏覽器。',
'The simple policy allows anonymous users to Read & registered users to Edit. The full security policy allows the administrator to set permissions on individual tables or records - see models/zzz.py.': '在"簡易"原則允許匿名使用者讀取和註冊用戶進行編輯。 ',
'The site where this position is based.': '此位置的場所的基礎。',
'The staff responsibile for Facilities can make Requests for assistance. Commitments can be made against these Requests however the requests remain open until the requestor confirms that the request is complete.': '人員responsibile的設備可以提出要求,以取得協助。 承諾可以Zh這些要求,但要求會保持開啟狀態,直到要求者確認的要求已完成。',
'The subject event no longer poses a threat or concern and any follow on action is described in <instruction>': '主題的事件不再對一個威脅或問題,並為任何後續動作的說明<instruction>',
'The time at which the Event started.': '的時間啟動事件。',
'The title of the WMS Browser panel in the Tools panel.': '標題的WMS瀏覽器"中的"在"工具"畫面。',
'The token associated with this application on': '相關聯的記號與這個應用程式上',
'The unique identifier of the peer. Leave blank if the peer is no Sahana Eden instance, it will be auto-assigned in that case.': '唯一ID的同層級。 保留為空白(如果沒有同層級Sahana Eden實例,它將會被自動指派的案例。',
'The unique identifier which identifies this instance to other instances.': '唯一ID識別此實例到其他實例。',
'The way in which an item is normally distributed': '方式的一個項目通常是分散式',
'The weight in kg.': '加權中公斤。',
'The': '此',
'Theme Details': '佈景主題詳細資料',
'Theme added': '新增佈景主題',
'Theme deleted': '刪除佈景主題',
'Theme updated': '更新佈景主題',
'Theme': '佈景主題',
'Themes': '佈景主題',
'There are errors': '有錯誤',
'There are insufficient items in the Inventory to send this shipment': '沒有足夠的項目中資產傳送至此出貨',
'There are multiple records at this location': '有多個記錄在這個位置',
'There are not sufficient items in the Inventory to send this shipment': '沒有足夠的項目庫存中傳送至此出貨',
'There are several ways which you can use to select the Location.': '有幾種方法可用來選取位置。',
'There is no Sahana account associated with that OpenID. Would you like to create one?': '沒有Sahana帳戶相關的OpenID。 您要建立一個嗎?',
'There is no address for this person yet. Add new address.': '沒有針對這個人員尚未。 新增地址。',
'There was a problem, sorry, please try again later.': '有問題,很抱歉,請稍後再試一次。',
'These are settings for Inbound Mail.': '這些設定的入埠郵件。',
'These are the Incident Categories visible to normal End-Users': '這些事件類別可見正常結束-使用者',
'These are the default settings for all users. To change settings just for you, click': '這些是預設的所有使用者的設定。 若要變更設定為您量身打造的,按一下',
'These need to be added in Decimal Degrees.': '這些需要新增以小數度。',
'They': '他們',
'This Group has no Members yet': '沒有成員目前登錄',
'This Team has no Members yet': '沒有成員目前登錄',
'This appears to be a duplicate of': '這顯然是一個重複的',
'This email address is already in use': '這個email已經被使用',
'This file already exists on the server as': '這個檔案已存在於伺服器上為',
'This form allows the administrator to remove a duplicate location by 1st updating all references to it by a different location.': '此表單可讓管理者來移除重複的位置第1更新所有參照另一個位置。',
'This form allows the administrator to remove a duplicate location.': '此表單可讓管理者來移除重複的位置。',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': '這是才適用這個層次在建構。 以防止意外修改之後,這個層次後,可以將其設為false。',
'This is the way to transfer data between machines as it maintains referential integrity.': '這是向之間傳送資料的機器,因為它維護參照完整性。',
'This is the way to transfer data between machines as it maintains referential integrity...duplicate data should be removed manually 1st!': '這是向之間傳送資料的機器,因為它維護參照完整性。。應該手動移除重複的資料第1!',
'This level is not open for editing.': '這個層次不開啟進行編輯。',
'This might be due to a temporary overloading or maintenance of the server.': '這可能是由於暫時超載或維護的伺服器。',
'This module allows Inventory Items to be Requested & Shipped between the Inventories of Facilities.': '此模組容許庫存項目要求及發布之間的庫存的機能。',
'This module allows you to manage Events - whether pre-planned (e.g. exercises) or Live Incidents. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': '這個單元可管理事件,不論事前計畫(如預演)或事件發生時。你可以安排適當的資源,如人力、物資、設備等,使其能更容易被動員',
'This module allows you to plan scenarios for both Exercises & Events. You can allocate appropriate Resources (Human, Assets & Facilities) so that these can be mobilized easily.': '此模組可讓您計劃的實務練習和事件。 您可以配置適當的資源(人力,資產和設備),以便它們可以mobilized簡單。',
'This module assists the management of fatalities and the identification of the deceased.': '這個模組會協助管理的fatalities和識別的死亡。',
'This page provides you with information about how to use the automatic synchronization feature of Sahana': '這個頁面提供您的相關資訊,請使用"自動同步處理功能的Sahana',
'This page shows you logs of past syncs. Click on the link below to go to this page.': '這個頁面顯示您的日誌之前的同步。 按一下下面的鏈結,以跳至此頁面。',
'This screen allows you to upload a collection of photos to the server.': '這個畫面可讓您上傳的集合,照片至伺服器。',
'This setting can only be controlled by the Administrator.': '這項設定只能由"管理者"。',
'This shipment has already been received.': '貨物已收到。',
'This shipment has already been sent.': '貨物已送出。',
'This shipment has not been received - it has NOT been canceled because it can still be edited.': '未收到貨物-尚未取消,因為仍然可以編輯。',
'This shipment has not been sent - it has NOT been canceled because it can still be edited.': '此出貨尚未傳送其尚未取消,因為仍然可以編輯。',
'This shipment will be confirmed as received.': '這會在確認出貨為已接收。',
'Thursday': '星期四',
'Ticket Details': '問題單詳細內容',
'Ticket ID': '通行證 ID',
'Ticket added': '新增問題單',
'Ticket deleted': '單刪除',
'Ticket updated': '票據更新',
'Ticket': '通行證',
'Ticketing Module': '待辦事項模組',
'Tickets': '通行證',
'Tilt-up concrete': '傾斜的具體',
'Timber frame': 'Timber訊框',
'Time Stamp': '時間戳記',
'Time at which data was exchanged.': '時間資料交換。',
'Time needed to collect water': '需要時間來收集臨界值',
'Time of Request': '要求時間',
'Timeline Report': '報告時間表',
'Timeline': '時間表',
'Timestamp': '時間戳記',
'Title to show for the Web Map Service panel in the Tools panel.': '標題顯示的網頁對映服務"中的"在"工具"畫面。',
'Title': '標題',
'To Location': '終點位置',
'To Organization': '目標組織',
'To Person': '將人員',
'To Site': '目標場所',
'To begin the sync process, click the button on the right =>': '開始同步程式,請按一下右邊的按鈕=>',
'To begin the sync process, click this button =>': '開始同步程式,請按一下這個按鈕=>',
'To create a personal map configuration, click ': '若要建立個人化地圖設定,請點選 ',
'To create a personal map configuration, click': '若要建立個人配置,請按一下對映',
'To delete': '要刪除',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in models/000_config.py': '若要編輯OpenStreetMap,您必須編輯OpenStreetMap設定模型/000_config.. py',
'To search by job title, enter any portion of the title. You may use % as wildcard.': '搜尋工作職稱,輸入任何部分的標題。 您可以使用%作為萬用字元。',
'To submit a new job, use the': '提交一個新的工作,使用',
'To variable': '到變數',
'To': '起飛',
'Tools': '工具',
'Total # of Beneficiaries Reached': '總數的受益人達到',
'Total # of Target Beneficiaries': '總數的目標受益人',
'Total # of households of site visited': '總數的家庭場所的訪問',
'Total Beds': '總計Beds',
'Total Beneficiaries': '總受益人',
'Total Cost per Megabyte': '每MB成本總計',
'Total Cost per Minute': '每分鐘的總成本',
'Total Households': '總家庭',
'Total Monthly Cost': '每月成本總計',
'Total Monthly Cost:': '每月成本總計:',
'Total Monthly': '每月總計',
'Total No of Affectees (Including Students, Teachers & Others)': '總沒有的Affectees (包括學員,教師及其他)',
'Total No of Female Affectees (Including Students, Teachers & Others)': '總沒有的女性Affectees (包括學員,教師及其他)',
'Total No of Male Affectees (Including Students, Teachers & Others)': '總沒有的男性Affectees (包括學員,教師及其他)',
'Total No of Students (Primary To Higher Secondary) in the Total Affectees': '總沒有的學員(主要較次要)總數中Affectees',
'Total No of Teachers & Other Govt Servants in the Total Affectees': '總沒有的教職員和其他政府服務者總數中Affectees',
'Total One-time Costs': '總計一-時間成本',
'Total Persons': '總人員',
'Total Recurring Costs': '總循環成本',
'Total Unit Cost': '總單位成本',
'Total Unit Cost:': '總單位成本:',
'Total Units': '裝置總計',
'Total gross floor area (square meters)': '總毛利底板區域(平方公尺)',
'Total number of beds in this hospital. Automatically updated from daily reports.': 'beds總數在此醫院。 自動更新從每日報告。',
'Total number of houses in the area': '總數可容納的範圍內',
'Total number of schools in affected area': '總數學校中受影響的區域',
'Total population of site visited': '總體的網站瀏覽',
'Total': '總計',
'Totals for Budget:': '預算的總計:',
'Totals for Bundle:': '總額的軟體組:',
'Totals for Kit:': '總額的套件:',
'Tourist Group': '觀光團',
'Town': '鄉鎮',
'Traces internally displaced people (IDPs) and their needs': '跟蹤內部移動人員(IDP)及其需求',
'Tracing': '追蹤',
'Track Details': '追蹤詳細資料',
'Track deleted': '刪除跟蹤',
'Track updated': '更新跟蹤',
'Track uploaded': '跟蹤上傳',
'Track with this Person?': '跟蹤與此人員嗎?',
'Track': '追蹤',
'Tracking of Projects, Activities and Tasks': '追蹤的專案,活動和任務',
'Tracking of basic information on the location, facilities and size of the Shelters': '追蹤的基本資訊的位置,設備和大小Shelters',
'Tracks requests for aid and matches them against donors who have pledged aid': '追蹤要求的輔助和符合那些對donors擁有抵押輔助',
'Tracks the location, distibution, capacity and breakdown of victims in Shelters': '追蹤位置,分佈值,容量和分解的受害者中Shelters',
'Tracks': '磁軌',
'Traffic Report': '資料流量報告',
'Training Course Catalog': '訓練課程型錄',
'Training Details': '訓練詳細資料',
'Training added': '新增訓練',
'Training deleted': '刪除訓練',
'Training updated': '訓練更新',
'Training': '訓練',
'Trainings': '撰文',
'Transit Status': '傳輸狀態',
'Transit': '運輸',
'Transit. Status': '運輸。 狀態',
'Transition Effect': '轉變的效果',
'Transparent?': '透明?',
'Transport': '傳輸',
'Transportation assistance, Rank': '交通工具輔助,等級',
'Trauma Center': 'Trauma中心',
'Travel Cost': '旅行成本',
'Treatments': '離群值',
'Tree': '樹狀結構',
'Tropical Storm': '暴雨熱帶',
'Tropo Messaging Token': 'Tropo記號傳訊',
'Tropo Settings': 'Tropo設定',
'Tropo Voice Token': 'Tropo語音記號',
'Tropo settings updated': 'Tropo更新設定',
'Truck': '卡車',
'Try checking the URL for errors, maybe it was mistyped.': '嘗試檢查的URL錯誤,可能是輸入錯誤。',
'Try hitting refresh/reload button or trying the URL from the address bar again.': "嘗試按重新整理/載入按鈕或試著的URL從'網址'列。",
'Try refreshing the page or hitting the back button on your browser.': '請嘗試重新整理頁面,或按"上一頁"按鈕的瀏覽器。',
'Tsunami': '海嘯',
'Tuesday': '星期二',
'Twitter ID or #hashtag': 'Twitter ID或#hashtag',
'Twitter Settings': 'Twitter設定',
'Twitter': '推特',
'Type of Construction': '類型的建構',
'Type of cause': '原因類型',
'Type of place for defecation': '的工作區類型的defecation',
'Type of water source before the disaster': '類型的臨界值來源前的災難',
'Type': '類型',
'Type:': '類型:',
'Types of health services available': '類型的健康服務可用',
'Types of water storage containers available': '類型的臨界值儲存體儲存區可用',
'Types': '類型',
'URL of the Ushahidi instance': 'Ushahidi實例的網址',
'URL': '網址',
'UTC Offset': '世界標準時間時差',
'UUID of foreign Sahana server': 'UUID的外部Sahana伺服器',
'Un-Repairable': '取消-修復',
'Unable to parse CSV file!': '無法剖析CSV檔!',
'Unidentified': '識別',
'Union Council': '聯集委員會',
'Unit Bed Capacity': '單元容量平臺',
'Unit Cost': '單位成本',
'Unit Details': '單元詳細資料',
'Unit Name': '單元名稱',
'Unit Set': '單元設定',
'Unit Short Code for e.g. m for meter.': '空頭代碼單元例如M的計量。',
'Unit added': '新增單元',
'Unit deleted': '刪除單元',
'Unit of Measure': '測量單位',
'Unit updated': '單元更新',
'Unit': '裝置',
'Units of Measure': '測量單位',
'Units': '單元',
'Unknown Peer': '不明的同層級',
'Unknown type of facility': '不明類型的機能',
'Unknown': '不明',
'Unresolved Conflicts': '尚未解決的衝突',
'Unselect to disable the modem': '若要停用取消數據機',
'Unsent': '未傳送',
'Unsupported data format!': '不受支援的資料格式!',
'Unsupported method!': '不受支援的方法!',
'Update Activity Report': '更新活動報告',
'Update Cholera Treatment Capability Information': '更新Cholera處理功能資訊',
'Update Details': '更新詳細資料',
'Update Import Job': '更新匯入工作',
'Update Request': '更新要求',
'Update Service Profile': '更新服務設定檔',
'Update Status': '更新狀態',
'Update Task Status': '更新作業狀態',
'Update Unit': '更新單元',
'Update added': '新增更新',
'Update deleted': '刪除更新',
'Update if Master': '如果更新主要',
'Update if Newer': '若較新則更新',
'Update updated': '更新更新',
'Update your current ordered list': '更新您現行排序清單',
'Update': '更新項目',
'Update/Master': '更新/主要',
'Update/Newer': '更新/新',
'Updated By': '更新者',
'Updates': '更新項目',
'Upload Photos': '上傳照片',
'Upload Spreadsheet': '上傳試算表',
'Upload Track': '上傳跟蹤',
'Upload a Spreadsheet': '上傳一個試算表',
'Upload an image file (bmp, gif, jpeg or png), max. 300x300 pixels!': '上傳影像檔案(BMP, GIF, JPEG或PNG),最大 300x300圖元!',
'Upload an image file here.': '上傳影像檔案在這裡。',
'Upload an image, such as a photo': '上傳影像,例如圖片',
'Upload': '上傳',
'Urban Fire': '都市發動',
'Urban area': '都市區域',
'Urdu': '烏都文',
'Urgent': '緊急',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用(...)&(...)的, (...)|(...)的或,且~(...)的建置更複雜的查詢。',
'Use Geocoder for address lookups?': '使用地理編碼程式的位址查閱嗎?',
'Use default from feature class': '使用從特性預設類別',
'Use default': '使用預設值',
'Use these links to download data that is currently in the database.': '使用這些鏈結來下載資料中的現行資料庫。',
'Use this link to review the situation.': '請利用這個鏈結來檢視狀湟。',
'Use this space to add a description about the Bin Type.': '使用此空間來新增說明的bin類型。',
'Use this space to add a description about the site location.': '使用此空間來新增說明的站點位置。',
'Use this space to add a description about the warehouse/site.': '使用此空間來新增說明倉儲或網站。',
'Use this space to add additional comments and notes about the Site/Warehouse.': '使用此空間來新增其他註解和附註的相關站點/WAREHOUSE。',
'Use this to indicate that the person has been found.': '使用此項來表示此人已被找到。',
'Used by IRS & Assess': '使用已送交IRS及評估',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': '在使用onHover工具提示和叢集蹦現以區分類型。',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': '用來建置onHover和工具提示第1欄位也用於叢集蹦現以區分記錄。',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': '用來檢查的緯度輸入的位置是否合理。 可用來過濾列出的資源的位置。',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': '用來檢查輸入的經度位置是否合理。 可用來過濾列出的資源的位置。',
'Used to import data from spreadsheets into the database': '用來匯入資料到工作表的資料庫',
'Used within Inventory Management, Request Management and Asset Management': '使用"庫存"內的管理要求管理"和"資產管理',
'User %(id)s Logged-in': '使用者 %(id)s 的登入',
'User %(id)s Logged-out': '使用者 %(id)s 登入登出',
'User %(id)s Profile updated': '使用者 %(id)s 設定檔更新',
'User %(id)s Registered': '使用者 %(id)s 登錄',
'User Account has been Disabled': '使用者帳戶已停用',
'User Details': '使用者詳細資料',
'User ID': '使用者 ID',
'User Management': '使用者管理',
'User Profile': '使用者設定檔',
'User Requests': '使用者要求',
'User Updated': '已更新使用者',
'User added': '已新增使用者',
'User already has this role': '使用者已具有此角色',
'User deleted': '已刪除使用者',
'User updated': '已更新使用者',
'User': '使用者',
'Username & Password': '使用者密碼(&P)',
'Username for authentication at the peer. Note that only HTTP Basic authentication is supported.': '使用者名稱的鑒別的同層級。 注意,只支援HTTP基本鑒別。',
'Username': '使用者名稱',
'Users can collaboratively add markers of what is occuring.': '使用者可以新增合作的標記是什麼發生。',
'Users removed': '移除使用者',
'Users': '使用者',
'Uses the REST Query Format defined in': '使用其他查詢中定義的格式',
'Usual food sources in the area': '一般來源食品的範圍內',
'Utilities': '公用程式',
'Utility, telecommunication, other non-transport infrastructure': '公用程式,電信,其他非傳輸基礎架構',
'Value': '值',
'Various Reporting functionalities': '各種報告功能',
'Vehicle Crime': '車輛犯罪',
'Vehicle Types': '車輛類型',
'Vehicle': '車輛',
'Vendor': '供應商',
'Verification Status': '驗證狀態',
'Verified': '已驗證',
'Verified?': '驗證?',
'Verify Password': '驗證密碼',
'Verify password': '驗證密碼',
'Version': '版本',
'Very Good': '非常良好',
'Very High': '非常高',
'View & Edit Pledges': '檢視和編輯抵押',
'View Alerts received using either Email or SMS': '檢視透過電子郵件或簡訊收到的通知',
'View All': '全部檢視',
'View Error Tickets': '檢視摘記卷錯誤',
'View Fullscreen Map': '檢視全螢幕對映',
'View Image': '檢視影像',
'View Items': '檢視項目',
'View Map': '檢視對映',
'View On Map': '檢視上對映',
'View Outbox': '檢視寄件匣',
'View Picture': '檢視圖片',
'View Requests & Pledge Aid': '檢視要求與抵押輔助',
'View Requests for Aid': '檢視要求的輔助',
'View Settings': '視圖設定',
'View Situation Map': '檢視狀湟對映',
'View Tickets': '檢視摘記卷',
'View and/or update their details': '檢視及/或更新其詳細資料',
'View or update the status of a hospital.': '檢視或更新狀態的醫院。',
'View pending requests and pledge support.': '檢視擱置要求和抵押支援。',
'View the hospitals on a map.': '檢視醫院圖上。',
'View/Edit the Database directly': '檢視/編輯資料庫直接',
'Village Leader': '村落領導者',
'Village': '村落',
'Visible?': '可見嗎?',
'Visual Recognition': '視覺化辨識',
'Volcanic Ash Cloud': 'Volcanic灰燼雲端',
'Volcanic Event': 'Volcanic事件',
'Volume (m3)': '磁區(M3)',
'Volume - Fluids': '磁區-液體',
'Volume - Solids': '磁區的固體',
'Volume Capacity': '容體容量',
'Volume/Dimensions': '磁碟區/維度',
'Volunteer Availability': '自願可用性',
'Volunteer Data': '自願資料',
'Volunteer Details': '自願詳細資料',
'Volunteer Information': '志工資訊',
'Volunteer Management': '主動管理',
'Volunteer Project': '志願者專案',
'Volunteer Record': '志工記錄',
'Volunteer Registration': '志願者登錄',
'Volunteer Registrations': '自願登錄',
'Volunteer Request': '自願要求',
'Volunteer added': '新增志工',
'Volunteer availability added': '自願可用性新增',
'Volunteer availability deleted': '自願可用性刪除',
'Volunteer availability updated': '自願可用性更新',
'Volunteer deleted': '志工刪除',
'Volunteer details updated': '更新志願者詳細資料',
'Volunteer location': '志願者位置',
'Volunteer registration added': '已新增志願者登記',
'Volunteer registration deleted': '已刪除志願者登記',
'Volunteer registration updated': '自願登錄更新',
'Volunteers were notified!': '已主動通知!',
'Volunteers': '志工',
'Vote': '表決',
'Votes': '表決',
'WASH': '清洗',
'WFP Assessments': '世界糧食計劃組織的評估',
'WMS Browser Name': 'WMS瀏覽器名稱',
'WMS Browser URL': 'WMS瀏覽器URL',
'Walking Only': '僅查訪',
'Walking time to the health service': '遍訪時間,健康狀態"服務程式',
'Wall or other structural damage': '牆面或其他結構損壞',
'Warehouse Details': '詳細資料倉儲',
'Warehouse Management': '倉儲管理',
'Warehouse added': '新增倉儲',
'Warehouse deleted': '刪除倉庫',
'Warehouse updated': '更新倉儲',
'Warehouse': '倉儲',
'Warehouse/Sites Registry': '倉庫/站點登錄',
'Warehouses': '倉庫',
'Water Sanitation Hygiene': '水Hygiene設施',
'Water collection': '水集合',
'Water gallon': '水加侖',
'Water storage containers available for HH': '水儲存體儲存區可用的hh',
'Water storage containers in households': '水儲存體儲存區中家庭',
'Water storage containers sufficient per HH': '水儲存體儲存區足夠每hh',
'Water supply': '水供應',
'Water': '水',
'Way Bill(s)': '方式賬單(S)',
'We have tried': '我們已經嘗試',
'Web Map Service Browser Name': 'Web瀏覽器名稱對映服務',
'Web Map Service Browser URL': 'Web瀏覽器URL對映服務',
'Website': '網站',
'Wednesday': '星期三',
'Weekly': '每週',
'Weight (kg)': '重量 (公斤)',
'Weight': '重量',
'Welcome to the Sahana Eden Disaster Management System': '歡迎使用 Sahana Eden 救災管理系統',
'Welcome to the Sahana Portal at': '歡迎使用Sahana入口網站',
'Well-Known Text': '常用文字',
'Were basic medical supplies available for health services prior to the disaster?': '在基本醫療用品可用的健康服務之前,災難?',
'Were breast milk substitutes used prior to the disaster?': 'breast到espresso使用替換之前,災難?',
'Were there cases of malnutrition in this area prior to the disaster?': '在這個災難之前,這個地區有營養不良的個案嗎?',
'Were there health services functioning for the community prior to the disaster?': '在這個災難之前,這個地區有正常運作的健康醫療服務嗎?',
'Were there reports or evidence of outbreaks of any micronutrient malnutrition disorders before the emergency?': '有報告或的跡象爆發的任何micronutrient malnutrition disorders之前的緊急嗎?',
'What are the factors affecting school attendance?': '有哪些因素會影響到學校上課的出席率?',
'What are your main sources of cash to restart your business?': '什麼是你重新創業的主要現金資本來源?',
'What are your main sources of income now?': '什麼是你現階段主要收入來源?',
'What do you spend most of your income on now?': '現階段你大部份收入花用在什麼地方?',
'What food stocks exist? (main dishes)': '什麼食品股票存在嗎? (主要餐盤)',
'What food stocks exist? (side dishes)': '什麼食品股票存在嗎? (側面餐盤)',
'What is the estimated total number of people in all of these institutions?': '把這些機構的人全部加起來, 大約總共是多少人?',
'What is your major source of clean water for daily use (ex: washing, cooking, bathing)?': '什麼是你每天主要的清潔用水來源 (例如: 清洗, 煮飯, 洗澡)?',
'What is your major source of drinking water?': '你的飲用水主要是來自於那裡?',
'What type of latrines are available in the village/IDP centre/Camp?': '什麼類型的廁所在村/ IDP中心/營可用?',
'What type of salvage material can be used from destroyed houses?': '什麼類型的打撈材料從被摧毀的房屋可用?',
'What type of salvage material can be used from destroyed schools?': '什麼類型的打撈材料從被摧毀的學校可用?',
'What types of health problems do children currently have?': '孩子目前有什麼類型的衛生問題?',
'What types of health problems do people currently have?': '人目前有什麼類型的衛生問題?',
'What types of health services are still functioning in the affected area?': '在受影響的地區什麼類型的衛生服務仍然運作?',
'What types of household water storage containers are available?': '有什麼類型的家庭儲水容器可用?',
'What were your main sources of income before the disaster?': '災難之前什麼是你的主要收入來源?',
'Wheat': '小麥',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value adds a small mount of distance outside the points. Without this, the outermost points would be on the bounding box, and might not be visible.': '當這項對映會出現,旨在點的集合,這項對映會縮放,以僅顯示區域外框的點。 這個值新增小裝載的距離外的點。 無此項,最外側的點上外框,且可能不可見。',
'When a map is displayed that focuses on a collection of points, the map is zoomed to show just the region bounding the points. This value gives a minimum width and height in degrees for the region shown. Without this, a map showing a single point would not show any extent around that point. After the map is displayed, it can be zoomed as desired.': '當這項對映會出現,旨在點的集合,這項對映會縮放,以僅顯示區域外框的點。 此值提供一個最小寬度和高度度的區域顯示。 否則,一個顯示一個不會顯示任何範圍在該點。 之後,這項對映會出現,它可以放大需求。',
'When reports were entered': '當報告已輸入',
'Where are the alternative places for studying?': '另類的學習地方在哪裡?',
'Where are the separated children originally from?': '位置是分隔的子項最初的?',
'Where do the majority of people defecate?': '在執行大部分的人defecate嗎?',
'Where have the children been sent?': '在具有子項已傳送?',
'Where is solid waste disposed in the village/camp?': '位置是實心廢棄物丟棄在村落/camp?',
'Whereabouts': '下落',
'Whether this is a Sahana Eden, Sahana Agasti, Ushahidi or Other instance.': '這是否為Sahana Eden, Sahana Agasti, Ushahidi或其他實例。',
'Which API function was called, it can only have two values: getdata refers to data export operation and putdata refers to data import operation.': '該API函數呼叫,它只能有二個值: getData參照資料匯出作業, putdata是指資料匯入作業。',
'Who is doing what and where': '誰正在做什麼和位置',
'Who usually collects water for the family?': '誰通常收集水的系列?',
'Width (m)': '寬度(M)',
'Width': '寬度',
'Wild Fire': '萬用字元發動',
'Wind Chill': '風硬化',
'Window frame': '視窗框',
'Winter Storm': '冬季暴雨',
'Without mentioning any names or indicating anyone, do you know of any incidents of violence against women or girls occuring since the disaster?': '在不提出任何人名或暗示任何人的情況下,你是否知道自從災難發生後, 有沒有任何對婦女或女孩暴力的事件?',
'Women of Child Bearing Age': '婦女的子項軸承經歷時間',
'Women participating in coping activities': '婦女參與複製活動',
'Women who are Pregnant or in Labour': '女性是Pregnant或在人工',
'Womens Focus Groups': 'Womens專題',
'Wooden plank': '木製plank',
'Wooden poles': '木製poles',
'Working hours end': '結束工作時數',
'Working hours start': '工作小時開始',
'Working or other to provide money/food': '工作或其他提供金錢/餐飲',
'Would you like to display the photos on the map?': '您想顯示的照片上的對映?',
'X-Ray': 'X光',
'YES': '是',
'Year built': '建置年份',
'Year of Manufacture': '年的製造',
'Yellow': '黃色',
'Yes': '是',
'You are a recovery team?': '您的回復團隊?',
'You are attempting to delete your own account - are you sure you want to proceed?': '您正在嘗試刪除您自己的帳戶-您確定要繼續進行嗎?',
'You are currently reported missing!': '您目前報告遺漏!',
'You can add information about your organization here. It is the information which other servers can read about you.': '您可以新增組織的相關資訊在這裡。 這是資訊的其他伺服器可以閱讀您。',
'You can change the configuration of synchronization module in the Settings section. This configuration includes your UUID (unique identification number), sync schedules, beacon service and so on. Click the following link to go to the Sync Settings page.': '您可以變更的配置同步化模組設定"區段中。 此配置包括您的UUID (唯一識別號碼),同步化排程, Beacon服務等等。 按一下下列鏈結以跳至"同步設定"頁面。',
'You can click on the map below to select the Lat/Lon fields': '您可以按一下"對映"下面選擇的平面/長欄位',
'You can click on the map below to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': '您可以按一下"對映"下面選擇的平面/長欄位。 經度是西-East (短)。 緯度是北美-南-(上下)。 緯度是〇equator與正在北部地區部分和負數在南部部分。 經度是〇本初子午線(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。 需要將此新增以小數度。',
'You can click on the map below to select the Lat/Lon fields:': '您可以按一下"對映"下面選擇的平面/長欄位:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': '您可以按一下在對映至選取的平面/長欄位。 經度是西-East (短)。 緯度是北美-南-(上下)。 緯度是〇equator與正在北部地區部分和負數在南部部分。 經度是〇本初子午線(格林威治標準時間表示),正向,東,歐洲和亞洲"。 經度為負數,西,越過大西洋和"美洲"。 需要將此新增以小數度。',
'You can select the Draw tool (': '您可以選取繪制工具',
'You can select the Draw tool': '您可以選取繪制工具',
'You can set the modem settings for SMS here.': '您可以設定數據機設定的SMS這裡。',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': '您可以使用轉換工具來轉換或GPS座標或度/分鐘/秒。',
'You do no have permission to cancel this received shipment.': '您沒有許可權來取消此接收出貨。',
'You do no have permission to cancel this sent shipment.': '您沒有許可權來取消此傳送出貨。',
'You do no have permission to make this commitment.': '您沒有許可權來進行此承諾。',
'You do no have permission to receive this shipment.': '您沒有許可權來接收此出貨。',
'You do no have permission to send this shipment.': '您沒有許可權來傳送這份出貨。',
'You do not have permission for any facility to make a commitment.': '您沒有許可權的任何機能來使承諾。',
'You do not have permission for any facility to make a request.': '您沒有許可權的任何機能來提出請求。',
'You do not have permission for any site to add an inventory item.': '您沒有許可權的任何網站,以新增一個庫存項目。',
'You do not have permission for any site to receive a shipment.': '您沒有許可權的任何網站接收出貨。',
'You do not have permission for any site to send a shipment.': '您沒有許可權的任何網站傳送出貨。',
'You do not have permission to cancel this received shipment.': '您沒有許可權來取消此接收出貨。',
'You do not have permission to cancel this sent shipment.': '您沒有許可權來取消此傳送出貨。',
'You do not have permission to make this commitment.': '您沒有權限可讓此承諾。',
'You do not have permission to receive this shipment.': '您沒有許可權來接收這個出貨。',
'You do not have permission to send a shipment from this site.': '您沒有許可權來傳送運送產品這個網站。',
'You do not have permission to send this shipment.': '您沒有許可權來傳送這份出貨。',
'You have a personal map configuration. To change your personal configuration, click': '您有一個個人對映配置。 若要變更您的個人配置,請按一下',
'You have found a dead body?': '您找到一個停用身體?',
'You must be logged in to register volunteers.': '您必須登入,才能登錄參與者。',
'You must be logged in to report persons missing or found.': '您必須登入,才能報告人員遺漏或找到。',
'You must provide a series id to proceed.': '您必須提供一個系列ID來繼續。',
'You should edit Twitter settings in models/000_config.py': '您應該編輯Twitter中的設定模型/000_config.. py',
'Your action is required. Please approve user %s asap:': '您的動作是必要的。 請核准使用者%s ASAP:',
'Your action is required. Please approve user': '您的動作是必要的。 請核准使用者',
'Your current ordered list of solution items is shown below. You can change it by voting again.': '您的現行排序清單的解決方案項目如下所示。 您可以變更它的表決。',
'Your post was added successfully.': '已順利新增您的文章。',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': '您的系統已被指派一個唯一的識別碼(UUID),它在其他電腦可讓您用來識別您。 若要檢視您的UUID,您可以跳至同步化->同步設定。 您也可以查看其他設定這個頁面上。',
'Your unique identification key. It is a 16 character word (aka string). Other servers in your organization will recognize you from this.': '您的唯一識別金鑰。 它是一個16字元字組(亦稱為字串)。 其他伺服器的組織會識別您。',
'ZIP/Postcode': 'ZIP/郵遞區號',
'Zero Hour': '〇小時',
'Zeroconf Description': 'ZeroConf說明',
'Zeroconf Port': 'ZeroConf埠',
'Zinc roof': '鋅安設',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': '放大:按一下在對映中,或使用滑鼠左鍵並拖動滑鼠來建立一個矩形',
'Zoom Levels': '縮放級別',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': '縮小:按一下在對映中,或使用滑鼠左鍵並拖動滑鼠來建立一個矩形',
'Zoom to maximum map extent': '縮放至對映上限範圍',
'Zoom': '縮放',
'accepted': '接受',
'act': '行動',
'active': '作用中',
'added': '已新增',
'all records': '所有記錄',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': '允許的預算來開發基於員工和設備成本,包括任何管理成本。',
'allows for creation and management of surveys to assess the damage following a natural disaster.': '可讓您建立和管理的調查來評估損壞之後,自然災難。',
'an individual/team to do in 1-2 days': '個別團隊/一中執行的二天',
'approved': '已核准',
'are mandatory and must be filled': '的欄位為必填',
'assigned': '已指派',
'average': '平均值',
'black': '黑色',
'blond': '金色',
'blue': '藍色',
'brown': '棕色',
'by': '根據',
'c/o Name': 'C I/O名稱',
'can be used to extract data from spreadsheets and put them into database tables.': '可用來擷取資料的試算表和放置到資料庫表格。',
'check all': '勾選「全部」',
'click for more details': '按一下以取得更多詳細資料',
'collateral event': '抵押品事件',
'completed': '已完成',
'confirmed': '已確認',
'consider': '考量',
'criminal intent': '犯罪目的',
'critical': '重要',
'crud': 'CRUD',
'curly': '大括弧',
'currently registered': '目前登錄',
'daily': '每日',
'data uploaded': '上傳資料',
'database %s select': '資料庫%選取',
'database': '資料庫',
'db': 'DB',
'deceased': '死亡',
'deferred': '延遲',
'delete all checked': '所有已刪除',
'delete': '刪除',
'deleted': '已刪除',
'denied': '已拒絕',
'description': '說明',
'design': '設計',
'diseased': '死者',
'displaced': '移離',
'divorced': '離婚',
'done!': '完成!',
'duplicate': '重複',
'edit': '編輯',
'editor': '編輯者',
'eg. gas, electricity, water': '例如: 瓦斯,電力,水',
'embedded': '內嵌的',
'enclosed area': '括住區域',
'export as csv file': '匯出為CSV檔案',
'fat': 'FAT',
'feedback': '讀者意見',
'female': '女性',
'final report': '最終報告',
'flush latrine with septic tank': 'latrine清除與septic油槽',
'follow-up assessment': '後續評量',
'food_sources': '食物來源',
'form data': '表單資料',
'found': '找到',
'from Twitter': '從Twitter',
'full': '滿載',
'getting': '取得',
'green': '綠色',
'grey': '灰色',
'here': '這裡',
'high': '高',
'highly critical': '高度重要',
'hourly': '每小時',
'households': '家庭',
'how to deal with duplicate data found between your machine and that particular sahana instance.': '如何處理重複資料之間找到在您的機器與該特定sahana實例。',
'http://openid.net/get-an-openid/start-using-your-openid/': 'https://myid.tw/profile/help',
'human error': '人為錯誤',
'identified': '識別',
'ignore': '忽略',
'immediately': '立即',
'improvement': '改進',
'in Deg Min Sec format': '在度最小秒格式',
'in GPS format': '在GPS格式',
'inactive': '非作用中',
'initial assessment': '起始評量',
'injured': '受傷',
'insert new %s': '插入新的%',
'insert new': '插入新建項目',
'invalid request': '無效要求',
'invalid': '無效',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': '是一個中央線上資料庫位置資訊的所有意外受害者和系列,特別是識別意外, evacuees和移動人員可以儲存。 資訊,如姓名,年齡,聯絡人編號,身分證號碼,取代位置,以及其他詳細資料擷取。 圖片和指紋詳細資料的人可以上傳至系統中。 人員也可以擷取群組的效率和方便。',
'is an online bulletin board of missing and found people. It captures information about the people missing and found, as well as information of the person seeking them, increasing the chances of people finding each other. For example if two members of a family unit is looking for the head of the family, we can use this data at least to connect those two family members.': '是一個線上公佈欄的遺失及找到的人員。 它會擷取資訊的人遺失及找到的,以及資訊的人員辨認,增加機會的人的其他人。 例如,如果二個成員的一系列單元正在尋找的標頭,系列,我們可以使用這個資料至少連接二個系列的成員。',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': '可想而知是要由數個子模組合力提供複雜的功能,用於管理釋放和專案項目來組織。 這包括進氣系統,倉儲管理系統,商品追蹤,供應鏈管理,車隊管理,採購,財務追蹤和其他資產和資源管理功能',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': '跟蹤記錄所有傳入門票,讓他們進行分類和路由到適當的地方行動。',
'keeps track of all the relief organizations working in the disaster region. It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': '追蹤所有的組織工作在災難區域。 它不只會擷取工作區所作用中,但也會擷取的相關資訊範圍的專案會提供每一個區域。',
'leave empty to detach account': '保留空白以分離帳戶',
'legend URL': '圖註URL',
'light': '光亮',
'locations': '位置',
'login': '登入',
'long': 'Long',
'long>12cm': '超過12cm',
'low': '低',
'male': '男性',
'manual': '手動',
'married': '已婚',
'medium': '中',
'menu item': '功能表項目',
'meters': '米',
'missing': '遺漏',
'module allows the site administrator to configure various options.': '模組可讓網站管理者配置的各種選項。',
'module helps monitoring the status of hospitals.': '模組有助於監視狀態的醫院。',
'module provides a mechanism to collaboratively provide an overview of the developing disaster, using online mapping (GIS).': '模組提供一種機制來提供合作開發的概述,意外,使用連線對映(GIS)。',
'more': '更多模組',
'n/a': '不適用',
'natural hazard': '自然危害',
'never': '絕不',
'new record inserted': '插入新記錄',
'new': '新建',
'next 100 rows': '下100個橫列',
'no': '無影響',
'non-critical': '非重要',
'none': '無',
'normal': '正常',
'not accessible - no cached version available!': '無法存取-無快取可用版本!',
'not accessible - using cached version from': '無法存取-使用快取的版本。',
'not specified': '未指定',
'not writable - unable to cache GeoRSS layers!': '無法寫入-無法快取GeoRSS層!',
'not writable - unable to cache KML layers!': '無法寫入-無法快取KML層!',
'num Zoom Levels': 'num個縮放級別',
'obsolete': '已作廢',
'on': '開啟',
'once': '一次',
'open defecation': '開啟defecation',
'operational intent': '目的作業',
'optional': '選用',
'or import from csv file': '或從CSV檔案',
'other': '其他',
'over one hour': '上一個小時',
'people': '個人',
'piece': '片段',
'pit latrine': 'PIT latrine',
'pit': 'PIT',
'postponed': '延遲',
'preliminary template or draft, not actionable in its current form': '初步範本或初稿,不可在其現行表單',
'previous 100 rows': '前100個橫列',
'primary incident': '主要事件',
'provides a catalogue of digital media.': '提供一個型錄的數位媒體。',
'record does not exist': '記錄不存在',
'record id': '記錄 ID',
'records deleted': '已刪除的記錄',
'red': '紅色',
'refresh': '重新整理',
'reported': '已報告',
'reports successfully imported.': '報告已順利匯入。',
'representation of the Polygon/Line.': '標記法的多邊形/行。',
'retired': '已撤回',
'retry': '重試',
'review': '檢閱',
'river': '金水河',
'secondary effect': '次要效果',
'see comment': '請參閱註解',
'selected': '已選取',
'separated from family': '分開系列',
'separated': '分居',
'short': 'Short',
'sides': '側邊',
'sign-up now': '現在註冊',
'simple': '簡單',
'single': '單身',
'skills': '技術',
'slim': 'Slim',
'specify': '指定',
'staff members': '人員成員',
'staff': '人員',
'state location': '位置狀態',
'state': '狀況',
'status': '狀態',
'straight': '直線',
'suffered financial losses': '艱辛的財務損失',
'supports nurses in the field to assess the situation, report on their activities and keep oversight.': '支援nurses,以將欄位中評估狀況,報告其活動和保留監督。',
'table': '表格',
'tall': '頁高',
'technical failure': '技術失敗',
'times and it is still not working. We give in. Sorry.': '時間,它仍無法運作。 我們提供。 抱歉。',
'to access the system': '來使用系統功能',
'total': '總計',
'tracks all shelters and stores basic details regarding them. It collaborates with other modules to track people associated with a shelter, the services available etc.': '追蹤所有shelters和儲存基本相關資訊。 它與其他模組追蹤人員相關聯的shelter,可用的服務等等。',
'tweepy module not available within the running Python - this needs installing for non-Tropo Twitter support!': 'tweepy模組內無法使用執行中的Python-這需要安裝非Tropo Twitter支援中心!',
'unable to parse csv file': '無法剖析CSV檔',
'unapproved': '核准',
'uncheck all': '取消全選',
'unidentified': '識別',
'uninhabitable = foundation and structure destroyed': 'uninhabitable = 地基及結構損毀',
'unknown': '不明',
'unspecified': '未指定的',
'unverified': '未驗證',
'updated': '已更新',
'updates only': '僅更新',
'urgent': '緊急',
'verified': '已驗證',
'view': '可視圖',
'volunteer': '志工',
'volunteers': '志工',
'wavy': '波浪形',
'weekly': '每週',
'white': '白色',
'wider area, longer term, usually contain multiple Activities': '寬區域,長期的,通常包含多個活動',
'widowed': '鰥居',
'window': '視窗',
'windows broken, cracks in walls, roof slightly damaged': 'Windows中斷,是否在牆面,屋脊略有損壞',
'wish': '希望',
'within human habitat': '在人類居住的範圍',
'xlwt module not available within the running Python - this needs installing for XLS output!': 'xlwt模組內無法使用執行中的Python-這需要安裝XLS輸出!',
'yes': '是',
}
|
flavour/eden
|
languages/zh-tw.py
|
Python
|
mit
| 338,534
|
[
"VisIt"
] |
4ffed5cc5216a1b9d187b8579e4393fbf12a331da78fb2e7e3a20e1b2f3131e9
|
#!/bin/env python
""" script to obtain release notes from DIRAC PRs
"""
from __future__ import print_function
from collections import defaultdict
from datetime import datetime, timedelta
import argparse
from pprint import pformat
import logging
import textwrap
import requests
try:
from GitTokens import GITHUBTOKEN
except ImportError:
raise ImportError(textwrap.dedent("""
***********************
Failed to import GITHUBTOKEN please!
Point the pythonpath to your GitTokens.py file which contains
your "Personal Access Token" for Github
I.e.:
Filename: GitTokens.py
Content:
```
GITHUBTOKEN = "e0b83063396fc632646603f113437de9"
```
(without the triple quotes)
***********************
"""),
)
SESSION = requests.Session()
SESSION.headers.update({'Authorization': "token %s " % GITHUBTOKEN})
logging.basicConfig(level=logging.WARNING, format='%(levelname)-5s - %(name)-8s: %(message)s')
LOGGER = logging.getLogger('GetReleaseNotes')
def req2Json(url, parameterDict=None, requestType='GET'):
"""Call to github API using requests package."""
log = LOGGER.getChild("Requests")
log.debug("Running %s with %s ", requestType, parameterDict)
req = getattr(SESSION, requestType.lower())(url, json=parameterDict)
if req.status_code not in (200, 201):
log.error("Unable to access API: %s", req.text)
raise RuntimeError("Failed to access API")
log.debug("Result obtained:\n %s", pformat(req.json()))
return req.json()
def getCommands(*args):
"""Create a flat list.
:param *args: list of strings or tuples/lists
:returns: flattened list of strings
"""
comList = []
for arg in args:
if isinstance(arg, (tuple, list)):
comList.extend(getCommands(*arg))
else:
comList.append(arg)
return comList
def checkRate():
"""Return the result for check_rate call."""
rate = req2Json(url="https://api.github.com/rate_limit")
LOGGER.getChild("Rate").info("Remaining calls to github API are %s of %s",
rate['rate']['remaining'], rate['rate']['limit'])
def _parsePrintLevel(level):
"""Translate debug count to logging level."""
level = level if level <= 2 else 2
return [logging.WARNING,
logging.INFO,
logging.DEBUG,
][level]
def getFullSystemName(name):
"""Translate abbreviations to full system names."""
name = {'API': 'Interfaces',
'AS': 'AccountingSystem',
'CS': 'ConfigurationSystem',
'Config': 'ConfigurationSystem',
'Configuration': 'ConfigurationSystem',
'DMS': 'DataManagementSystem',
'DataManagement': 'DataManagementSystem',
'FS': 'FrameworkSystem',
'Framework': 'FrameworkSystem',
'MS': 'MonitoringSystem',
'Monitoring': 'MonitoringSystem',
'RMS': 'RequestManagementSystem',
'RequestManagement': 'RequestManagementSystem',
'RSS': 'ResourceStatusSystem',
'ResourceStatus': 'ResourceStatusSystem',
'SMS': 'StorageManagamentSystem',
'StorageManagement': 'StorageManagamentSystem',
'TS': 'TransformationSystem',
'TMS': 'TransformationSystem',
'Transformation': 'TransformationSystem',
'WMS': 'WorkloadManagementSystem',
'Workload': 'WorkloadManagementSystem',
}.get(name, name)
return name
def parseForReleaseNotes(commentBody):
"""Look for "BEGINRELEASENOTES / ENDRELEASENOTES" and extend releaseNoteList if there are entries."""
if not all(tag in commentBody for tag in ("BEGINRELEASENOTES", "ENDRELEASENOTES")):
return ''
return commentBody.split("BEGINRELEASENOTES")[1].split("ENDRELEASENOTES")[0]
def collateReleaseNotes(prs):
"""Put the release notes in the proper order.
FIXME: Tag numbers could be obtained by getting the last tag with a name similar to
the branch, will print out just the base branch for now.
"""
releaseNotes = ""
for baseBranch, pr in prs.iteritems():
releaseNotes += "[%s]\n\n" % baseBranch
systemChangesDict = defaultdict(list)
for prid, content in pr.iteritems():
notes = content['comment']
system = ''
for line in notes.splitlines():
line = line.strip()
if line.startswith("*"):
system = getFullSystemName(line.strip("*:").strip())
elif line:
splitline = line.split(":", 1)
if splitline[0] == splitline[0].upper() and len(splitline) > 1:
line = "%s: (#%s) %s" % (splitline[0], prid, splitline[1].strip())
systemChangesDict[system].append(line)
for system, changes in systemChangesDict.iteritems():
if system:
releaseNotes += "*%s\n\n" % system
releaseNotes += "\n".join(changes)
releaseNotes += "\n\n"
releaseNotes += "\n"
return releaseNotes
class GithubInterface(object):
"""Object to make calls to github API."""
def __init__(self, owner='DiracGrid', repo='Dirac'):
"""Set default values to parse release notes for DIRAC."""
self.owner = owner
self.repo = repo
self.branches = ['Integration', 'rel-v6r19', 'rel-v6r20']
self.openPRs = False
self.startDate = str(datetime.now() - timedelta(days=14))[:10]
self.printLevel = logging.WARNING
LOGGER.setLevel(self.printLevel)
@property
def _options(self):
"""Return options dictionary."""
return dict(owner=self.owner, repo=self.repo)
def parseOptions(self):
"""Parse the command line options."""
log = LOGGER.getChild('Options')
parser = argparse.ArgumentParser("Dirac Release Notes",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--branches", action="store", default=self.branches,
dest="branches", nargs='+',
help="branches to get release notes for")
parser.add_argument("--date", action="store", default=self.startDate, dest="startDate",
help="date after which PRs are checked, default (two weeks ago): %s" % self.startDate)
parser.add_argument("--openPRs", action="store_true", dest="openPRs", default=self.openPRs,
help="get release notes for open (unmerged) PRs, for testing purposes")
parser.add_argument("-d", "--debug", action="count", dest="debug", help="d, dd, ddd", default=0)
parser.add_argument("-r", "--repo", action="store", dest="repo", help="Repository to check: [Group/]Repo",
default='DiracGrid/Dirac')
parsed = parser.parse_args()
self.printLevel = _parsePrintLevel(parsed.debug)
LOGGER.setLevel(self.printLevel)
self.branches = parsed.branches
log.info('Getting PRs for: %s', self.branches)
self.startDate = parsed.startDate
log.info('Starting from: %s', self.startDate)
self.openPRs = parsed.openPRs
log.info('Also including openPRs?: %s', self.openPRs)
repo = parsed.repo
repos = repo.split('/')
if len(repos) == 1:
self.repo = repo
elif len(repos) == 2:
self.owner = repos[0]
self.repo = repos[1]
else:
raise RuntimeError("Cannot parse repo option: %s" % repo)
def _github(self, action):
"""Return the url to perform actions on github.
:param str action: command to use in the gitlab API, see documentation there
:returns: url to be used
"""
log = LOGGER.getChild('GitHub')
options = dict(self._options)
options["action"] = action
ghURL = "https://api.github.com/repos/%(owner)s/%(repo)s/%(action)s" % options
log.debug('Calling: %s', ghURL)
return ghURL
def getGithubPRs(self, state="open", mergedOnly=False, perPage=100):
"""Get all PullRequests from github.
:param str state: state of the PRs, open/closed/all, default open
:param bool merged: if PR has to be merged, only sensible for state=closed
:returns: list of githubPRs
"""
url = self._github("pulls?state=%s&per_page=%s" % (state, perPage))
prs = req2Json(url=url)
if not mergedOnly:
return prs
# only merged PRs
prsToReturn = []
for pr in prs:
if pr.get('merged_at', None) is not None:
prsToReturn.append(pr)
return prsToReturn
def getNotesFromPRs(self, prs):
"""Loop over prs, get base branch, get PR comment and collate into dictionary.
:returns: dict of branch:dict(#PRID, dict(comment, mergeDate))
"""
rawReleaseNotes = defaultdict(dict)
for pr in prs:
baseBranch = pr['base']['label'][len("DiracGrid:"):]
if baseBranch not in self.branches:
continue
comment = parseForReleaseNotes(pr['body'])
prID = pr['number']
mergeDate = pr.get('merged_at', None)
mergeDate = mergeDate if mergeDate is not None else '9999-99-99'
if mergeDate[:10] < self.startDate:
continue
rawReleaseNotes[baseBranch].update({prID: dict(comment=comment, mergeDate=mergeDate)})
return rawReleaseNotes
def getReleaseNotes(self):
"""Create the release notes."""
if self.openPRs:
prs = self.getGithubPRs(state='open', mergedOnly=False)
else:
prs = self.getGithubPRs(state='closed', mergedOnly=True)
prs = self.getNotesFromPRs(prs)
releaseNotes = collateReleaseNotes(prs)
print(releaseNotes)
checkRate()
if __name__ == "__main__":
RUNNER = GithubInterface()
try:
RUNNER.parseOptions()
except RuntimeError as e:
LOGGER.error("Error during argument parsing: %s", e)
exit(1)
try:
RUNNER.getReleaseNotes()
except RuntimeError as e:
LOGGER.error("Error during runtime: %s", e)
exit(1)
|
petricm/DIRAC
|
docs/Tools/GetReleaseNotes.py
|
Python
|
gpl-3.0
| 9,873
|
[
"DIRAC"
] |
7ae3c4b2f111cfe2a5e16a09dd1fbea0c3d5e75b8bd826a7254ad56f124c317e
|
# -*- coding: utf-8 -*-
# YAFF is yet another force-field code.
# Copyright (C) 2011 Toon Verstraelen <Toon.Verstraelen@UGent.be>,
# Louis Vanduyfhuys <Louis.Vanduyfhuys@UGent.be>, Center for Molecular Modeling
# (CMM), Ghent University, Ghent, Belgium; all rights reserved unless otherwise
# stated.
#
# This file is part of YAFF.
#
# YAFF is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# YAFF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
'''GCMC simulation of rigid CO2 molecules inside the rigid MIL-53 framework'''
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from glob import glob
import os
from molmod.units import bar
from yaff.conversion.raspa import read_raspa_loading
def make_plot():
results = np.load('results.npy')
plt.clf()
plt.plot(results[:,0]/bar, results[:,1], marker='o', label='Yaff')
# Read the RASPA results
fns = sorted(glob(os.path.join('raspa','Output','System_0','*.data')))
results_raspa = []
for fn in fns:
T, P, fugacity, N, Nerr = read_raspa_loading(fn)
results_raspa.append([P,N])
results_raspa = np.asarray(results_raspa)
indexes = np.argsort(results_raspa[:,0])
results_raspa = results_raspa[indexes]
plt.plot(results_raspa[:,0]/bar, results_raspa[:,1], marker='.',
label='RASPA', linestyle='--')
plt.legend()
plt.xlabel("P [bar]")
plt.ylabel("Uptake [molecules/uc]")
plt.savefig('results.png')
if __name__=='__main__':
make_plot()
|
molmod/yaff
|
yaff/examples/007_monte_carlo/co2_in_mil53/process.py
|
Python
|
gpl-3.0
| 2,053
|
[
"RASPA"
] |
947d26fc2c4a17bdf4ff6c4b56a561c045547783cafa19d228a89475dbc580fd
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Tests for Vowpal Wabbit LDA wrapper.
Will not be run unless the environment variable 'VOWPAL_WABBIT_PATH' is set
and points to the `vw` executable.
"""
import logging
import unittest
import os
import os.path
import tempfile
from collections import defaultdict
import six
from gensim.corpora import Dictionary
import gensim.models.wrappers.ldavowpalwabbit as ldavowpalwabbit
from gensim.models.wrappers.ldavowpalwabbit import LdaVowpalWabbit
module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder
datapath = lambda fname: os.path.join(module_path, 'test_data', fname)
# set up vars used in testing ("Deerwester" from the web tutorial)
TOPIC_WORDS = [
'cat lion leopard mouse jaguar lynx cheetah tiger kitten puppy'.split(),
'engine car wheel brakes tyre motor suspension cylinder exhaust clutch'.split(),
'alice bob robert tim sue rachel dave harry alex jim'.split(),
'c cplusplus go python haskell scala java ruby csharp erlang'.split(),
'eggs ham mushrooms cereal coffee beans tea juice sausages bacon'.split()
]
def get_corpus():
text_path = datapath('ldavowpalwabbit.txt')
dict_path = datapath('ldavowpalwabbit.dict.txt')
dictionary = Dictionary.load_from_text(dict_path)
with open(text_path) as fhandle:
corpus = [dictionary.doc2bow(l.strip().split()) for l in fhandle]
return corpus, dictionary
class TestLdaVowpalWabbit(unittest.TestCase):
def setUp(self):
vw_path = os.environ.get('VOWPAL_WABBIT_PATH', None)
if not vw_path:
msg = "Environment variable 'VOWPAL_WABBIT_PATH' not specified, skipping tests"
try:
raise unittest.SkipTest(msg)
except AttributeError:
# couldn't find a way of skipping tests in python 2.6
self.vw_path = None
corpus, dictionary = get_corpus()
self.vw_path = vw_path
self.corpus = corpus
self.dictionary = dictionary
def test_save_load(self):
"""Test loading/saving LdaVowpalWabbit model."""
if not self.vw_path: # for python 2.6
return
lda = LdaVowpalWabbit(self.vw_path,
corpus=self.corpus,
passes=10,
chunksize=256,
id2word=self.dictionary,
cleanup_files=True,
alpha=0.1,
eta=0.1,
num_topics=len(TOPIC_WORDS),
random_seed=1)
with tempfile.NamedTemporaryFile() as fhandle:
lda.save(fhandle.name)
lda2 = LdaVowpalWabbit.load(fhandle.name)
# ensure public fields are saved/loaded correctly
saved_fields = [lda.alpha, lda.chunksize, lda.cleanup_files,
lda.decay, lda.eta, lda.gamma_threshold,
lda.id2word, lda.num_terms, lda.num_topics,
lda.passes, lda.random_seed, lda.vw_path]
loaded_fields = [lda2.alpha, lda2.chunksize, lda2.cleanup_files,
lda2.decay, lda2.eta, lda2.gamma_threshold,
lda2.id2word, lda2.num_terms, lda2.num_topics,
lda2.passes, lda2.random_seed, lda2.vw_path]
self.assertEqual(saved_fields, loaded_fields)
# ensure topic matrices are saved/loaded correctly
saved_topics = lda.show_topics(num_topics=5, num_words=10)
loaded_topics = lda2.show_topics(num_topics=5, num_words=10)
self.assertEqual(loaded_topics, saved_topics)
def test_model_update(self):
"""Test updating existing LdaVowpalWabbit model."""
if not self.vw_path: # for python 2.6
return
lda = LdaVowpalWabbit(self.vw_path,
corpus=[self.corpus[0]],
passes=10,
chunksize=256,
id2word=self.dictionary,
cleanup_files=True,
alpha=0.1,
eta=0.1,
num_topics=len(TOPIC_WORDS),
random_seed=1)
lda.update(self.corpus[1:])
result = lda.log_perplexity(self.corpus)
self.assertTrue(result < -1)
self.assertTrue(result > -5)
def test_perplexity(self):
"""Test LdaVowpalWabbit perplexity is within expected range."""
if not self.vw_path: # for python 2.6
return
lda = LdaVowpalWabbit(self.vw_path,
corpus=self.corpus,
passes=10,
chunksize=256,
id2word=self.dictionary,
cleanup_files=True,
alpha=0.1,
eta=0.1,
num_topics=len(TOPIC_WORDS),
random_seed=1)
# varies, but should be between -1 and -5
result = lda.log_perplexity(self.corpus)
self.assertTrue(result < -1)
self.assertTrue(result > -5)
def test_topic_coherence(self):
"""Test LdaVowpalWabbit topic coherence."""
if not self.vw_path: # for python 2.6
return
corpus, dictionary = get_corpus()
lda = LdaVowpalWabbit(self.vw_path,
corpus=corpus,
passes=10,
chunksize=256,
id2word=dictionary,
cleanup_files=True,
alpha=0.1,
eta=0.1,
num_topics=len(TOPIC_WORDS),
random_seed=1)
lda.print_topics(5, 10)
# map words in known topic to an ID
topic_map = {}
for i, words in enumerate(TOPIC_WORDS):
topic_map[frozenset(words)] = i
n_coherent = 0
for topic_id in range(lda.num_topics):
topic = lda.show_topic(topic_id, topn=20)
# get all words from LDA topic
topic_words = [w[1] for w in topic]
# get list of original topics that each word actually belongs to
ids = []
for word in topic_words:
for src_topic_words, src_topic_id in six.iteritems(topic_map):
if word in src_topic_words:
ids.append(src_topic_id)
# count the number of times each original topic appears
counts = defaultdict(int)
for found_topic_id in ids:
counts[found_topic_id] += 1
# if at least 6/10 words assigned to same topic, consider it coherent
max_count = 0
for count in six.itervalues(counts):
max_count = max(max_count, count)
if max_count >= 6:
n_coherent += 1
# not 100% deterministic, but should always get 3+ coherent topics
self.assertTrue(n_coherent >= 3)
def test_corpus_to_vw(self):
"""Test corpus to Vowpal Wabbit format conversion."""
if not self.vw_path: # for python 2.6
return
corpus = [[(0, 5), (7, 1), (5, 3), (0, 2)],
[(7, 2), (2, 1), (3, 11)],
[(1, 1)],
[],
[(5, 2), (0, 1)]]
expected = """
| 0:5 7:1 5:3 0:2
| 7:2 2:1 3:11
| 1:1
|
| 5:2 0:1
""".strip()
result = '\n'.join(ldavowpalwabbit.corpus_to_vw(corpus))
self.assertEqual(result, expected)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
ziky90/gensim
|
gensim/test/test_ldavowpalwabbit_wrapper.py
|
Python
|
lgpl-2.1
| 8,151
|
[
"Jaguar"
] |
abcac4d6519b267a903e107bbecbe37d5b45051d7d30babe29e3511ba82cfe84
|
import sys
tests = [("testExecs/testDescriptors.exe", "", {}), ]
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
|
jandom/rdkit
|
Code/GraphMol/Descriptors/test_list.py
|
Python
|
bsd-3-clause
| 239
|
[
"RDKit"
] |
4fc6ebbca0a5abd2b40c1f178de4cb20d650c029fa52ee8b659ac5a5292d9abf
|
#!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Test Force classes in forces.py.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import pickle
import nose.tools
from openmmtools import testsystems, states
from openmmtools.forces import *
from openmmtools.forces import _compute_sphere_volume, _compute_harmonic_radius
# =============================================================================
# CONSTANTS
# =============================================================================
# =============================================================================
# TESTING UTILITIES
# =============================================================================
def assert_pickles_equal(object1, object2):
assert pickle.dumps(object1) == pickle.dumps(object2)
def assert_quantity_almost_equal(object1, object2):
assert utils.is_quantity_close(object1, object2), '{} != {}'.format(object1, object2)
def assert_equal(*args, **kwargs):
"""Python 2 work-around to be able to yield nose.tools.assert_equal"""
# TODO: Just yield nose.tools.assert_equal after we have dropped Python2 support.
nose.tools.assert_equal(*args, **kwargs)
# =============================================================================
# UTILITY FUNCTIONS TESTS
# =============================================================================
def test_find_forces():
"""Generator of tests for the find_forces() utility function."""
system = testsystems.TolueneVacuum().system
# Add two CustomBondForces, one is restorable.
restraint_force = HarmonicRestraintBondForce(spring_constant=1.0*unit.kilojoule_per_mole/unit.angstroms**2,
restrained_atom_index1=2, restrained_atom_index2=5)
system.addForce(restraint_force)
system.addForce(openmm.CustomBondForce('0.0'))
def assert_forces_equal(found_forces, expected_force_classes):
# Forces should be ordered by their index.
assert list(found_forces.keys()) == sorted(found_forces.keys())
found_forces = {(i, force.__class__) for i, force in found_forces.items()}
nose.tools.assert_equal(found_forces, set(expected_force_classes))
# Test find force without including subclasses.
found_forces = find_forces(system, openmm.CustomBondForce)
yield assert_forces_equal, found_forces, [(6, openmm.CustomBondForce)]
# Test find force and include subclasses.
found_forces = find_forces(system, openmm.CustomBondForce, include_subclasses=True)
yield assert_forces_equal, found_forces, [(5, HarmonicRestraintBondForce),
(6, openmm.CustomBondForce)]
found_forces = find_forces(system, RadiallySymmetricRestraintForce, include_subclasses=True)
yield assert_forces_equal, found_forces, [(5, HarmonicRestraintBondForce)]
# Test exact name matching.
found_forces = find_forces(system, 'HarmonicBondForce')
yield assert_forces_equal, found_forces, [(0, openmm.HarmonicBondForce)]
# Find all forces containing the word "Harmonic".
found_forces = find_forces(system, '.*Harmonic.*')
yield assert_forces_equal, found_forces, [(0, openmm.HarmonicBondForce),
(1, openmm.HarmonicAngleForce),
(5, HarmonicRestraintBondForce)]
# Find all forces from the name including the subclasses.
# Test find force and include subclasses.
found_forces = find_forces(system, 'CustomBond.*', include_subclasses=True)
yield assert_forces_equal, found_forces, [(5, HarmonicRestraintBondForce),
(6, openmm.CustomBondForce)]
# With check_multiple=True only one force is returned.
force_idx, force = find_forces(system, openmm.NonbondedForce, only_one=True)
yield assert_forces_equal, {force_idx: force}, [(3, openmm.NonbondedForce)]
# An exception is raised with "only_one" if multiple forces are found.
yield nose.tools.assert_raises, MultipleForcesError, find_forces, system, 'CustomBondForce', True, True
# An exception is raised with "only_one" if the force wasn't found.
yield nose.tools.assert_raises, NoForceFoundError, find_forces, system, 'NonExistentForce', True
# =============================================================================
# RESTRAINTS TESTS
# =============================================================================
class TestRadiallySymmetricRestraints(object):
"""Test radially symmetric receptor-ligand restraint classes."""
@classmethod
def setup_class(cls):
cls.well_radius = 12.0 * unit.angstroms
cls.spring_constant = 15000.0 * unit.joule/unit.mole/unit.nanometers**2
cls.restrained_atom_indices1 = [2, 3, 4]
cls.restrained_atom_indices2 = [10, 11]
cls.restrained_atom_index1=12
cls.restrained_atom_index2=2
cls.custom_parameter_name = 'restraints_parameter'
cls.restraints = [
HarmonicRestraintForce(spring_constant=cls.spring_constant,
restrained_atom_indices1=cls.restrained_atom_indices1,
restrained_atom_indices2=cls.restrained_atom_indices2),
HarmonicRestraintBondForce(spring_constant=cls.spring_constant,
restrained_atom_index1=cls.restrained_atom_index1,
restrained_atom_index2=cls.restrained_atom_index2),
FlatBottomRestraintForce(spring_constant=cls.spring_constant, well_radius=cls.well_radius,
restrained_atom_indices1=cls.restrained_atom_indices1,
restrained_atom_indices2=cls.restrained_atom_indices2),
FlatBottomRestraintBondForce(spring_constant=cls.spring_constant, well_radius=cls.well_radius,
restrained_atom_index1=cls.restrained_atom_index1,
restrained_atom_index2=cls.restrained_atom_index2),
HarmonicRestraintForce(spring_constant=cls.spring_constant,
restrained_atom_indices1=cls.restrained_atom_indices1,
restrained_atom_indices2=cls.restrained_atom_indices2,
controlling_parameter_name=cls.custom_parameter_name),
FlatBottomRestraintBondForce(spring_constant=cls.spring_constant, well_radius=cls.well_radius,
restrained_atom_index1=cls.restrained_atom_index1,
restrained_atom_index2=cls.restrained_atom_index2,
controlling_parameter_name=cls.custom_parameter_name),
]
def test_restorable_forces(self):
"""Test that the restraint interface can be restored after serialization."""
for restorable_force in self.restraints:
force_serialization = openmm.XmlSerializer.serialize(restorable_force)
deserialized_force = utils.RestorableOpenMMObject.deserialize_xml(force_serialization)
yield assert_pickles_equal, restorable_force, deserialized_force
def test_restraint_properties(self):
"""Test that properties work as expected."""
for restraint in self.restraints:
yield assert_quantity_almost_equal, restraint.spring_constant, self.spring_constant
if isinstance(restraint, FlatBottomRestraintForceMixIn):
yield assert_quantity_almost_equal, restraint.well_radius, self.well_radius
if isinstance(restraint, RadiallySymmetricCentroidRestraintForce):
yield assert_equal, restraint.restrained_atom_indices1, self.restrained_atom_indices1
yield assert_equal, restraint.restrained_atom_indices2, self.restrained_atom_indices2
else:
assert isinstance(restraint, RadiallySymmetricBondRestraintForce)
yield assert_equal, restraint.restrained_atom_indices1, [self.restrained_atom_index1]
yield assert_equal, restraint.restrained_atom_indices2, [self.restrained_atom_index2]
def test_controlling_parameter_name(self):
"""Test that the controlling parameter name enters the energy function correctly."""
default_name_restraint = self.restraints[0]
custom_name_restraints = self.restraints[-2:]
assert default_name_restraint.controlling_parameter_name == 'lambda_restraints'
energy_function = default_name_restraint.getEnergyFunction()
assert 'lambda_restraints' in energy_function
assert self.custom_parameter_name not in energy_function
for custom_name_restraint in custom_name_restraints:
assert custom_name_restraint.controlling_parameter_name == self.custom_parameter_name
energy_function = custom_name_restraint.getEnergyFunction()
assert 'lambda_restraints' not in energy_function
assert self.custom_parameter_name in energy_function
def test_compute_restraint_volume(self):
"""Test the calculation of the restraint volume."""
testsystem = testsystems.TolueneVacuum()
thermodynamic_state = states.ThermodynamicState(testsystem.system, 300*unit.kelvin)
energy_cutoffs = np.linspace(0.0, 10.0, num=3)
radius_cutoffs = np.linspace(0.0, 5.0, num=3) * unit.nanometers
def assert_integrated_analytical_equal(restraint, square_well, radius_cutoff, energy_cutoff):
args = [thermodynamic_state, square_well, radius_cutoff, energy_cutoff]
# For flat-bottom, the calculation is only partially analytical.
analytical_volume = restraint._compute_restraint_volume(*args)
# Make sure there's no analytical component (from _determine_integral_limits)
# in the numerical integration calculation.
copied_restraint = copy.deepcopy(restraint)
for parent_cls in [RadiallySymmetricCentroidRestraintForce, RadiallySymmetricBondRestraintForce]:
if isinstance(copied_restraint, parent_cls):
copied_restraint.__class__ = parent_cls
integrated_volume = copied_restraint._integrate_restraint_volume(*args)
err_msg = '{}: square_well={}, radius_cutoff={}, energy_cutoff={}\n'.format(
restraint.__class__.__name__, square_well, radius_cutoff, energy_cutoff)
err_msg += 'integrated_volume={}, analytical_volume={}'.format(integrated_volume,
analytical_volume)
assert utils.is_quantity_close(integrated_volume, analytical_volume, rtol=1e-2), err_msg
for restraint in self.restraints:
# Test integrated and analytical agree with no cutoffs.
yield assert_integrated_analytical_equal, restraint, False, None, None
for square_well in [True, False]:
# Try energies and distances singly and together.
for energy_cutoff in energy_cutoffs:
yield assert_integrated_analytical_equal, restraint, square_well, None, energy_cutoff
for radius_cutoff in radius_cutoffs:
yield assert_integrated_analytical_equal, restraint, square_well, radius_cutoff, None
for energy_cutoff, radius_cutoff in zip(energy_cutoffs, radius_cutoffs):
yield assert_integrated_analytical_equal, restraint, square_well, radius_cutoff, energy_cutoff
for energy_cutoff, radius_cutoff in zip(energy_cutoffs, reversed(radius_cutoffs)):
yield assert_integrated_analytical_equal, restraint, square_well, radius_cutoff, energy_cutoff
def test_compute_standard_state_correction(self):
"""Test standard state correction works correctly in all ensembles."""
toluene = testsystems.TolueneVacuum()
alanine = testsystems.AlanineDipeptideExplicit()
big_radius = 200.0 * unit.nanometers
temperature = 300.0 * unit.kelvin
# Limit the maximum volume to 1nm^3.
distance_unit = unit.nanometers
state_volume = 1.0 * distance_unit**3
box_vectors = np.identity(3) * np.cbrt(state_volume / distance_unit**3) * distance_unit
alanine.system.setDefaultPeriodicBoxVectors(*box_vectors)
toluene.system.setDefaultPeriodicBoxVectors(*box_vectors)
# Create systems in various ensembles (NVT, NPT and non-periodic).
nvt_state = states.ThermodynamicState(alanine.system, temperature)
npt_state = states.ThermodynamicState(alanine.system, temperature, 1.0*unit.atmosphere)
nonperiodic_state = states.ThermodynamicState(toluene.system, temperature)
def assert_equal_ssc(expected_restraint_volume, restraint, thermodynamic_state, square_well=False,
radius_cutoff=None, energy_cutoff=None, max_volume=None):
expected_ssc = -math.log(STANDARD_STATE_VOLUME/expected_restraint_volume)
ssc = restraint.compute_standard_state_correction(thermodynamic_state, square_well,
radius_cutoff, energy_cutoff, max_volume)
err_msg = '{} computed SSC != expected SSC'.format(restraint.__class__.__name__)
nose.tools.assert_equal(ssc, expected_ssc, msg=err_msg)
for restraint in self.restraints:
# In NPT ensemble, an exception is thrown if max_volume is not provided.
with nose.tools.assert_raises_regexp(TypeError, "max_volume must be provided"):
restraint.compute_standard_state_correction(npt_state)
# With non-periodic systems and reweighting to square-well
# potential, a cutoff must be given.
with nose.tools.assert_raises_regexp(TypeError, "One between radius_cutoff"):
restraint.compute_standard_state_correction(nonperiodic_state, square_well=True)
# While there are no problems if we don't reweight to a square-well potential.
restraint.compute_standard_state_correction(nonperiodic_state, square_well=False)
# SSC is limited by max_volume (in NVT and NPT).
assert_equal_ssc(state_volume, restraint, nvt_state, radius_cutoff=big_radius)
assert_equal_ssc(state_volume, restraint, npt_state, radius_cutoff=big_radius,
max_volume='system')
# SSC is not limited by max_volume with non periodic systems.
expected_ssc = -math.log(STANDARD_STATE_VOLUME/state_volume)
ssc = restraint.compute_standard_state_correction(nonperiodic_state, radius_cutoff=big_radius)
assert expected_ssc < ssc, (restraint, expected_ssc, ssc)
# Check reweighting to square-well potential.
expected_volume = _compute_sphere_volume(big_radius)
assert_equal_ssc(expected_volume, restraint, nonperiodic_state,
square_well=True, radius_cutoff=big_radius)
energy_cutoff = 10 * nonperiodic_state.kT
radius_cutoff = _compute_harmonic_radius(self.spring_constant, energy_cutoff)
if isinstance(restraint, FlatBottomRestraintForceMixIn):
radius_cutoff += self.well_radius
expected_volume = _compute_sphere_volume(radius_cutoff)
assert_equal_ssc(expected_volume, restraint, nonperiodic_state,
square_well=True, radius_cutoff=radius_cutoff)
max_volume = 3.0 * unit.nanometers**3
assert_equal_ssc(max_volume, restraint, nonperiodic_state,
square_well=True, max_volume=max_volume)
|
choderalab/openmmtools
|
openmmtools/tests/test_forces.py
|
Python
|
mit
| 16,194
|
[
"OpenMM"
] |
8be20f55249b6babc19931067425c3816c008881eaf49f4cf8bcd5ab1fdf7af8
|
#######################################################################################
# Python-code: SAINT pre-processing from Scaffold "Samples Report" output
# Author: Brent Kuenzi
#######################################################################################
# This program reads in a raw Scaffold "Samples Report" output and a user generated
# bait file and autoformats it into prey and interaction files for SAINTexpress
# analysis
#######################################################################################
# Copyright (C) Brent Kuenzi.
# Permission is granted to copy, distribute and/or modify this document
# under the terms of the GNU Free Documentation License, Version 1.3
# or any later version published by the Free Software Foundation;
# with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts.
# A copy of the license is included in the section entitled "GNU
# Free Documentation License".
#######################################################################################
## REQUIRED INPUT ##
# 1) infile: Scaffold "Samples Report" output
# 2) baitfile: SAINT formatted bait file generated in Galaxy
# 3) fasta_db: fasta database for use (defaults to SwissProt_HUMAN_2014_08.fasta)
# 4) prey: Y or N for generating a prey file
# 5) make_bait: String of bait names, assignment, and test or control boolean
#######################################################################################
import sys
import os.path
import re
infile = sys.argv[1]
#Scaffold "Samples Report" output.
prey = sys.argv[2]
# Y or N boolean from Galaxy.
fasta_db = sys.argv[3]
tool_path = sys.argv[8]
if fasta_db == "None":
fasta_db = str(tool_path) + "/SwissProt_HUMAN_2014_08.fasta"
make_bait = sys.argv[6]
bait_bool = sys.argv[9]
def bait_create(baits, infile):
# Verifies the Baits are valid in the Scaffold file and writes the Bait.txt.
baits = make_bait.split()
i = 0
bait_file_tmp = open("bait.txt", "w")
order = []
bait_cache = []
while i < len(baits):
if baits[i+2] == "true":
T_C = "C"
else:
T_C = "T"
bait_line = baits[i] + "\t" + baits[i+1] + "\t" + T_C + "\n"
bait_cache.append(str(bait_line))
i = i + 3
for cache_line in bait_cache:
bait_file_tmp.write(cache_line)
bait_file_tmp.close()
if bait_bool == 'false':
bait_create(make_bait, infile)
baitfile = "bait.txt"
else:
bait_temp_file = open(sys.argv[10], 'r')
bait_cache = bait_temp_file.readlines()
bait_file_tmp = open("bait.txt", "wr")
for cache_line in bait_cache:
bait_file_tmp.write(cache_line)
bait_file_tmp.close()
baitfile = "bait.txt"
class ReturnValue1(object):
def __init__(self, sequence, gene):
self.seqlength = sequence
self.genename = gene
class ReturnValue2(object):
def __init__(self, getdata, getproteins, getheader):
self.data = getdata
self.proteins = getproteins
self.header = getheader
def main(Scaffold_input, baits):
bait_check(baitfile, Scaffold_input)
make_inter(Scaffold_input)
if prey == 'true':
make_prey(Scaffold_input)
no_error_inter(Scaffold_input)
os.rename('prey.txt', sys.argv[5])
elif prey == 'false':
if os.path.isfile('error proteins.txt') == True:
no_error_inter(Scaffold_input)
pass
elif prey != 'true' or 'false':
sys.exit("Invalid Prey Argument: Y or N")
def get_info(uniprot_accession_in):
# Get aminoacid lengths and gene name.
error = open('error proteins.txt', 'a+')
data = open(fasta_db, 'r')
data_lines = data.readlines()
db_len = len(data_lines)
seqlength = 0
count = 0
last_line = data_lines[-1]
for data_line in data_lines:
if ">sp" in data_line:
namer = data_line.split("|")[2]
if uniprot_accession_in == data_line.split("|")[1]:
match = count + 1
if 'GN=' in data_line:
lst = data_line.split('GN=')
lst2 = lst[1].split(' ')
genename = lst2[0]
if 'GN=' not in data_line:
genename = 'NA'
while ">sp" not in data_lines[match]:
if match <= db_len:
seqlength = seqlength + len(data_lines[match].strip())
if data_lines[match] == last_line:
break
match = match + 1
else:
break
return ReturnValue1(seqlength, genename)
if uniprot_accession_in == namer.split(" ")[0]:
match = count + 1
# Ensures consistent spacing throughout.
if 'GN=' in data_line:
lst = data_line.split('GN=')
lst2 = lst[1].split(' ')
genename = lst2[0]
if 'GN=' not in data_line:
genename = 'NA'
while ">sp" not in data_lines[match]:
if match <= db_len:
seqlength = seqlength + len(data_lines[match].strip())
if data_lines[match] == last_line:
break
match = match + 1
else:
break
return ReturnValue1(seqlength, genename)
count = count + 1
if seqlength == 0:
error.write(uniprot_accession_in + '\t' + "Uniprot not in Fasta" + '\n')
error.close
seqlength = 'NA'
genename = 'NA'
return ReturnValue1(seqlength, genename)
def readtab(infile):
with open(infile, 'r') as input_file:
# read in tab-delim text
output = []
for input_line in input_file:
input_line = input_line.strip()
temp = input_line.split('\t')
output.append(temp)
return output
def read_Scaffold(Scaffold_input):
# Get data, proteins and header from Scaffold output
dupes = readtab(Scaffold_input)
cnt = 0
for Scaffold_line in dupes:
cnt += 1
if Scaffold_line[0] == '#':
# Finds the start of second header.
header_start = cnt-1
header = dupes[header_start]
prot_start = header.index("Accession Number")
data = dupes[header_start+1:len(dupes)-2]
# Cut off blank line and END OF FILE.
proteins = []
for Scaffold_line in data:
Scaffold_line[4] = Scaffold_line[4].split()[0]
# Removes the (+##) that sometimes is attached.
uniprot_re = re.compile("[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}")
for protein in data:
prot_id = uniprot_re.match(protein[prot_start])
if prot_id:
proteins.append(prot_id.group())
else:
prot_ids = protein[prot_start].split("|")
for prot_id in prot_ids:
if "_HUMAN" in prot_id:
proteins.append(prot_id)
elif "_YEAST" in prot_id:
proteins.append(prot_id)
elif "_MOUSE" in prot_id:
proteins.append(prot_id)
else:
print "Accession must be uniprot ID or gene"
return ReturnValue2(data, proteins, header)
def make_inter(Scaffold_input):
bait = readtab(baitfile)
data = read_Scaffold(Scaffold_input).data
header = read_Scaffold(Scaffold_input).header
proteins = read_Scaffold(Scaffold_input).proteins
bait_index = []
for bait_line in bait:
bait_index.append(header.index(bait_line[0]))
# Find just the baits defined in bait file.
with open('inter.txt', 'w') as inter_file:
a = 0; l = 0
for bb in bait:
for lst in data:
inter_file.write(header[bait_index[l]] + '\t' + bb[1] + '\t' + proteins[a] + '\t'
+ lst[bait_index[l]] + '\n')
a += 1
if a == len(proteins):
a = 0; l += 1
def make_prey(Scaffold_input):
proteins = read_Scaffold(Scaffold_input).proteins
output_file = open("prey.txt", 'w')
for protein in proteins:
protein = protein.replace("\n", "")
# Remove \n for input into function.
protein = protein.replace("\r", "")
# Ditto for \r.
seq = get_info(protein).seqlength
GN = get_info(protein).genename
if seq != 'NA':
if GN != 'NA':
output_file.write(protein + "\t" + str(seq) + "\t" + str(GN) + "\n")
output_file.close()
def no_error_inter(Scaffold_input):
# Remake inter file without protein errors from Uniprot.
err = readtab("error proteins.txt")
bait = readtab(baitfile)
data = read_Scaffold(Scaffold_input).data
header = read_Scaffold(Scaffold_input).header
bait_index = []
for bait_line in bait:
bait_index.append(header.index(bait_line[0]))
proteins = read_Scaffold(Scaffold_input).proteins
errors = []
for e in err:
errors.append(e[0])
with open('inter.txt', 'w') as y:
l = 0; a = 0
for bb in bait:
for lst in data:
if proteins[a] not in errors:
y.write(header[bait_index[l]] + '\t' + bb[1] + '\t' + proteins[a] + '\t'
+ lst[bait_index[l]] + '\n')
a += 1
if a == len(proteins):
l += 1; a = 0
def bait_check(bait, Scaffold_input):
# Check that bait names share Scaffold header titles.
bait_in = readtab(bait)
header = read_Scaffold(Scaffold_input).header
for i in bait_in:
if i[0] not in header:
sys.exit("Bait must share header titles with Scaffold output")
if __name__ == '__main__':
main(infile, baitfile)
os.rename("inter.txt", sys.argv[4])
os.rename("bait.txt", sys.argv[7])
|
bornea/APOSTL
|
SAINT_preprocessing/toolshed_version/SAINT_preprocessing.py
|
Python
|
gpl-2.0
| 10,226
|
[
"Galaxy"
] |
306fc4d72f4998e057fd023bda6c80bd129cdb6aa72d5ff8b51ad6ae3f156387
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at https://mozilla.org/MPL/2.0/.
from ..sile import add_sile
from .sile import SileSiesta, SileCDFSiesta
from sisl._internal import set_module
from sisl._help import xml_parse
from sisl.atom import Atom
from sisl.orbital import SphericalOrbital
from sisl._array import arrayd, aranged
from sisl.unit.siesta import unit_convert
from sisl.utils.cmd import default_ArgumentParser, default_namespace
from sisl.utils import PropertyDict, strmap
__all__ = ['ionxmlSileSiesta', 'ionncSileSiesta']
@set_module("sisl.io.siesta")
class ionxmlSileSiesta(SileSiesta):
""" Basis set information in xml format
Note that the ``ion`` files are equivalent to the ``ion.xml`` files.
"""
def read_basis(self):
""" Returns data associated with the ion.xml file """
# Get the element-tree
root = xml_parse(self.file).getroot()
# Get number of orbitals
label = root.find('label').text.strip()
Z = int(root.find('z').text) # atomic number, negative for floating
mass = float(root.find('mass').text)
# Read in the PAO's
paos = root.find('paos')
# Now loop over all orbitals
orbital = []
# All orbital data
Bohr2Ang = unit_convert('Bohr', 'Ang')
for orb in paos:
n = int(orb.get('n'))
l = int(orb.get('l'))
z = int(orb.get('z')) # zeta
q0 = float(orb.get('population'))
P = not int(orb.get('ispol')) == 0
# Radial components
rad = orb.find('radfunc')
npts = int(rad.find('npts').text)
# Grid spacing in Bohr (conversion is done later
# because the normalization is easier)
delta = float(rad.find('delta').text)
# Read in data to a list
dat = arrayd(rad.find('data').text.split())
# Since the readed data has fewer significant digits we
# might as well re-create the table of the radial component.
r = aranged(npts) * delta
# To get it per Ang**3
# TODO, check that this is correct.
# The fact that we have to have it normalized means that we need
# to convert psi /sqrt(Bohr**3) -> /sqrt(Ang**3)
# \int psi^\dagger psi == 1
psi = dat[1::2] * r ** l / Bohr2Ang ** (3./2.)
# Create the sphericalorbital and then the atomicorbital
sorb = SphericalOrbital(l, (r * Bohr2Ang, psi), q0)
# This will be -l:l (this is the way siesta does it)
orbital.extend(sorb.toAtomicOrbital(n=n, zeta=z, P=P))
# Now create the atom and return
return Atom(Z, orbital, mass=mass, tag=label)
@set_module("sisl.io.siesta")
class ionncSileSiesta(SileCDFSiesta):
""" Basis set information in NetCDF files
Note that the ``ion.nc`` files are equivalent to the ``ion.xml`` files.
"""
def read_basis(self):
""" Returns data associated with the ion.xml file """
no = len(self._dimension('norbs'))
# Get number of orbitals
label = self.Label.strip()
Z = int(self.Atomic_number)
mass = float(self.Mass)
# Retrieve values
orb_l = self._variable('orbnl_l')[:] # angular quantum number
orb_n = self._variable('orbnl_n')[:] # principal quantum number
orb_z = self._variable('orbnl_z')[:] # zeta
orb_P = self._variable('orbnl_ispol')[:] > 0 # polarization shell, or not
orb_q0 = self._variable('orbnl_pop')[:] # q0 for the orbitals
orb_delta = self._variable('delta')[:] # delta for the functions
orb_psi = self._variable('orb')[:, :]
# Now loop over all orbitals
orbital = []
# All orbital data
Bohr2Ang = unit_convert('Bohr', 'Ang')
for io in range(no):
n = orb_n[io]
l = orb_l[io]
z = orb_z[io]
P = orb_P[io]
# Grid spacing in Bohr (conversion is done later
# because the normalization is easier)
delta = orb_delta[io]
# Since the readed data has fewer significant digits we
# might as well re-create the table of the radial component.
r = aranged(orb_psi.shape[1]) * delta
# To get it per Ang**3
# TODO, check that this is correct.
# The fact that we have to have it normalized means that we need
# to convert psi /sqrt(Bohr**3) -> /sqrt(Ang**3)
# \int psi^\dagger psi == 1
psi = orb_psi[io, :] * r ** l / Bohr2Ang ** (3./2.)
# Create the sphericalorbital and then the atomicorbital
sorb = SphericalOrbital(l, (r * Bohr2Ang, psi), orb_q0[io])
# This will be -l:l (this is the way siesta does it)
orbital.extend(sorb.toAtomicOrbital(n=n, zeta=z, P=P))
# Now create the atom and return
return Atom(Z, orbital, mass=mass, tag=label)
@default_ArgumentParser(description="Extracting basis-set information.")
def ArgumentParser(self, p=None, *args, **kwargs):
""" Returns the arguments that is available for this Sile """
#limit_args = kwargs.get('limit_arguments', True)
short = kwargs.get('short', False)
def opts(*args):
if short:
return args
return [args[0]]
# We limit the import to occur here
import argparse
Bohr2Ang = unit_convert('Bohr', 'Ang')
Ry2eV = unit_convert('Bohr', 'Ang')
# The first thing we do is adding the geometry to the NameSpace of the
# parser.
# This will enable custom actions to interact with the geometry in a
# straight forward manner.
# convert netcdf file to a dictionary
ion_nc = PropertyDict()
ion_nc.n = self._variable('orbnl_n')[:]
ion_nc.l = self._variable('orbnl_l')[:]
ion_nc.zeta = self._variable('orbnl_z')[:]
ion_nc.pol = self._variable('orbnl_ispol')[:]
ion_nc.orbital = self._variable('orb')[:]
# this gets converted later
delta = self._variable('delta')[:]
r = aranged(ion_nc.orbital.shape[1]).reshape(1, -1) * delta.reshape(-1, 1)
ion_nc.orbital *= r ** ion_nc.l.reshape(-1, 1) / Bohr2Ang * (3./2.)
ion_nc.r = r * Bohr2Ang
ion_nc.kb = PropertyDict()
ion_nc.kb.n = self._variable('pjnl_n')[:]
ion_nc.kb.l = self._variable('pjnl_l')[:]
ion_nc.kb.e = self._variable('pjnl_ekb')[:] * Ry2eV
ion_nc.kb.proj = self._variable('proj')[:]
delta = self._variable('kbdelta')[:]
r = aranged(ion_nc.kb.proj.shape[1]).reshape(1, -1) * delta.reshape(-1, 1)
ion_nc.kb.proj *= r ** ion_nc.kb.l.reshape(-1, 1) / Bohr2Ang * (3./2.)
ion_nc.kb.r = r * Bohr2Ang
vna = self._variable('vna')
r = aranged(vna[:].size) * vna.Vna_delta
ion_nc.vna = PropertyDict()
ion_nc.vna.v = vna[:] * Ry2eV * r / Bohr2Ang ** 3
ion_nc.vna.r = r * Bohr2Ang
# this is charge (not 1/sqrt(charge))
chlocal = self._variable('chlocal')
r = aranged(chlocal[:].size) * chlocal.Chlocal_delta
ion_nc.chlocal = PropertyDict()
ion_nc.chlocal.v = chlocal[:] * r / Bohr2Ang ** 3
ion_nc.chlocal.r = r * Bohr2Ang
vlocal = self._variable('reduced_vlocal')
r = aranged(vlocal[:].size) * vlocal.Reduced_vlocal_delta
ion_nc.vlocal = PropertyDict()
ion_nc.vlocal.v = vlocal[:] * r / Bohr2Ang ** 3
ion_nc.vlocal.r = r * Bohr2Ang
if "core" in self.variables:
# this is charge (not 1/sqrt(charge))
core = self._variable('core')
r = aranged(core[:].size) * core.Core_delta
ion_nc.core = PropertyDict()
ion_nc.core.v = core[:] * r / Bohr2Ang ** 3
ion_nc.core.r = r * Bohr2Ang
d = {
"_data": ion_nc,
"_kb_proj": False,
"_l": True,
"_n": True,
}
namespace = default_namespace(**d)
# l-quantum number
class lRange(argparse.Action):
def __call__(self, parser, ns, value, option_string=None):
value = (value
.replace("s", 0)
.replace("p", 1)
.replace("d", 2)
.replace("f", 3)
.replace("g", 4)
)
ns._l = strmap(int, value)[0]
p.add_argument('-l',
action=lRange,
help='Denote the sub-section of l-shells that are plotted: "s,f"')
# n quantum number
class nRange(argparse.Action):
def __call__(self, parser, ns, value, option_string=None):
ns._n = strmap(int, value)[0]
p.add_argument('-n',
action=nRange,
help='Denote the sub-section of n quantum numbers that are plotted: "2-4,6"')
class Plot(argparse.Action):
def __call__(self, parser, ns, value, option_string=None):
import matplotlib.pyplot as plt
# Retrieve values
data = ns._data
# We have these plots:
# - orbitals
# - projectors
# - chlocal
# - vna
# - vlocal
# - core (optional)
# We'll plot them like this:
# orbitals | projectors
# vna + vlocal | chlocal + core
#
# Determine different n, l
fig, axs = plt.subplots(2, 2)
# Now plot different orbitals
for n, l, zeta, pol, r, orb in zip(data.n, data.l, data.zeta,
data.pol, data.r, data.orbital):
if pol == 1:
pol = 'P'
else:
pol = ''
axs[0][0].plot(r, orb, label=f"n{n}l{l}Z{zeta}{pol}")
axs[0][0].set_title("Orbitals")
axs[0][0].set_xlabel("Distance [Ang]")
axs[0][0].set_ylabel("Value [a.u.]")
axs[0][0].legend()
# plot projectors
for n, l, e, r, proj in zip(
data.kb.n, data.kb.l, data.kb.e, data.kb.r, data.kb.proj):
axs[0][1].plot(r, proj, label=f"n{n}l{l} e={e:.5f}")
axs[0][1].set_title("KB projectors")
axs[0][1].set_xlabel("Distance [Ang]")
axs[0][1].set_ylabel("Value [a.u.]")
axs[0][1].legend()
axs[1][0].plot(data.vna.r, data.vna.v, label='Vna')
axs[1][0].plot(data.vlocal.r, data.vlocal.v, label='Vlocal')
axs[1][0].set_title("Potentials")
axs[1][0].set_xlabel("Distance [Ang]")
axs[1][0].set_ylabel("Potential [eV]")
axs[1][0].legend()
axs[1][1].plot(data.chlocal.r, data.chlocal.v, label='Chlocal')
if "core" in data:
axs[1][1].plot(data.core.r, data.core.v, label='core')
axs[1][1].set_title("Charge")
axs[1][1].set_xlabel("Distance [Ang]")
axs[1][1].set_ylabel("Charge [Ang^3]")
axs[1][1].legend()
if value is None:
plt.show()
else:
plt.savefig(value)
p.add_argument(*opts('--plot', '-p'), action=Plot, nargs='?', metavar='FILE',
help='Plot the content basis set file, possibly saving plot to a file.')
return p, namespace
add_sile('ion.xml', ionxmlSileSiesta, gzip=True)
add_sile('ion.nc', ionncSileSiesta)
|
zerothi/sisl
|
sisl/io/siesta/basis.py
|
Python
|
mpl-2.0
| 12,101
|
[
"NetCDF",
"SIESTA"
] |
bdb7ecc31ef99787c9594b3d8e6b0cc0181ee9199e01bc199d0c3086530bdbcc
|
#!/usr/bin/env python
import json
from colors import bcolors, print_ok, print_ok_pending, print_done
from subprocess import call
from pkg_resources import resource_filename
def drop_git(path):
call(["rm", "-rf", path + "/.git"])
def init(arg_vars, project_root):
app_name = arg_vars['app-name']
ansible_dir = app_name + "/ansible"
roles_dir = ansible_dir + "/roles"
if arg_vars.get('example_app') == 'beginner':
print_ok_pending("Cloning beginner playbook via Git. Streaming Git output")
call(["git", "clone", "https://github.com/onyx-platform/engraver-beginner-example.git", app_name])
drop_git(app_name)
print_done("Finished cloning example.")
else:
print_ok_pending("Invoking Leiningen and streaming its output")
call(["lein", "new", "onyx-app", app_name, "+docker"])
print_done("Finished executing Leiningen.")
print("")
print_ok_pending("Initializing .engraver folders")
call(["mkdir", "-p", (app_name + "/.engraver")])
call(["touch", (app_name + "/.engraver/config.json")])
print_done("Finished .engraver folder initialization.")
print("")
print_ok_pending("Creating new Ansible playbook. Streaming Ansible output")
call(["ansible-galaxy", "init", ansible_dir])
call(["cp", resource_filename(__name__, "ansible_template/ansible.cfg"), ansible_dir])
call(["cp", resource_filename(__name__, "ansible_template/refresh_cache.yml"), ansible_dir])
print_done("Finished executing Ansible.")
print("")
print_ok_pending("Updating .gitignore for Engraver files")
call("echo '.engraver/clusters/*' >> " + app_name + "/.gitignore", shell=True)
call("echo 'ansible/machines_remove.yml' >> " + app_name + "/.gitignore", shell=True)
call("echo 'ansible/cluster_remove.yml' >> " + app_name + "/.gitignore", shell=True)
call("echo 'ansible/job_submit.yml' >> " + app_name + "/.gitignore", shell=True)
call("echo 'ansible/job_kill.yml' >> " + app_name + "/.gitignore", shell=True)
print_done("Finished updating .gitignore")
print("")
print_ok_pending("Cloning Ansible AWS playbook via Git. Streaming Git output")
path = roles_dir + "/aws"
call(["git", "clone", "https://github.com/onyx-platform/engraver-aws.git", path])
drop_git(path)
print_done("Finished cloning playbook.")
print("")
print_ok_pending("Cloning Ansible Docker playbook via Git. Streaming Git output")
path = roles_dir + "/docker"
call(["git", "clone", "https://github.com/onyx-platform/engraver-docker.git", path])
drop_git(path)
print_done("Finished cloning playbook.")
print("")
print_ok_pending("Cloning Ansible ZooKeeper playbook via Git. Streaming Git output")
path = roles_dir + "/zookeeper"
call(["git", "clone", "https://github.com/onyx-platform/engraver-zookeeper.git", path])
drop_git(path)
print_done("Finished cloning playbook.")
print("")
print_ok_pending("Cloning Ansible BookKeeper playbook via Git. Streaming Git output")
path = roles_dir + "/bookkeeper"
call(["git", "clone", "https://github.com/onyx-platform/engraver-bookkeeper.git", path])
drop_git(path)
print_done("Finished cloning playbook.")
print("")
print_ok_pending("Cloning Ansible Kafka playbook via Git. Streaming Git output")
path = roles_dir + "/kafka"
call(["git", "clone", "https://github.com/onyx-platform/engraver-kafka.git", path])
drop_git(path)
print_done("Finished cloning playbook.")
print("")
print_ok_pending("Cloning Ansible Onyx playbook via Git. Streaming Git output")
path = roles_dir + "/onyx"
call(["git", "clone", "https://github.com/onyx-platform/engraver-onyx.git", path])
drop_git(path)
print_done("Finished cloning playbook.")
print("")
print_ok_pending("Initializing Ansible vars directories")
call(["mkdir", "-p", (ansible_dir + "/group_vars")])
call(["mkdir", "-p", (ansible_dir + "/vars/cluster_vars")])
print_done("Finished Ansible vars directory creation.")
print("")
|
onyx-platform/engraver
|
src/init_command.py
|
Python
|
epl-1.0
| 3,913
|
[
"Galaxy"
] |
7b8460e294b183ccfd00ffc7121c83152a73af31a0eba00f6f1071384f176aee
|
# Copyright 2016 Brian Innes
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import traceback
from vPiP import *
Vpip = vPiP.Vpip
with Vpip() as p:
p.setShowDrawing(True)
p.setPlotting(False)
try:
p.moveTo(0, 0)
p.drawTo(p.config.pixels, 0)
p.drawTo(p.config.pixels, p.config.heightPixels)
p.drawTo(0, p.config.heightPixels)
p.drawTo(0, 0)
gridX = (p.config.pixels - 20) / 10
gridY = (p.config.heightPixels - 20) / 10
x = 10
while x + gridX < p.config.pixels:
y = 10
while y + gridY < p.config.heightPixels:
p.moveTo(x, y)
p.drawTo(x + gridX, y)
p.drawTo(x + gridX, y + gridY)
p.drawTo(x, y + gridY)
p.drawTo(x, y)
y += gridY
x += gridX
p.goHome()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print("test1 main thread exception : %s" % exc_type)
traceback.print_tb(exc_traceback, limit=2, file=sys.stdout)
|
brianinnes/vPiP
|
python/test1.py
|
Python
|
apache-2.0
| 1,586
|
[
"Brian"
] |
4029a14de01c7aab201fd55cb57b9861f318277ce03e7876c271c6748abdadd8
|
#!/usr/bin/env python3
"""fasta file access with buffer"""
import pysam
class cachedFasta:
"""fasta seq fetcher, use buffer to improve performance
example:
fa = cachedFasta("hg19.fa")
seq = fa['chr1'][2000:3000]
"""
def __init__(self, fa, trans=None, buffer_size = ('-2k', '8k') ):
""" fa: fasta file name, or pysam.FastaFile object
fa file could be either text file or bgzip-ed (gzipped is *NOT OK*)
if file name is provide, filename.fai chould be provoide or
it will automately build by samtools faidx
trans: transform function before sequence return, no transform by default
eg. trans = str.upper if change seq to uppercase
buffer_size: tuple, left(negative) and right (positive) buffer size,
relative to buffer allocate query position
support both integer or number with suffix k,M
"""
if not isinstance(fa, pysam.FastaFile):
fa = pysam.FastaFile(fa)
self.fa = fa
self.ref_len = dict(zip(fa.references, fa.lengths))
self.buffer_size = buffer_size
self.__set_trans(trans)
# self.buffer_size = tuple(map(readable2num, buffer_size))
# self.buffer = _seqBuffer(self.fa, self.buffer_size)
@property
def buffer_size(self):
""" seq buffer size, relative to buffer allocate query position
get the value will return a tuple (left buffer size, right buffer size),
where left size is a negative number
when set the value, use the tuple with the same format, while each size
support both integer or number with suffix k,M
"""
return self.__buffer_size
@buffer_size.setter
def buffer_size(self, size_tuple):
self.__buffer_size = tuple(map(readable2num, size_tuple))
self.buffer = _seqBuffer(self.fa, self.buffer_size)
@property
def trans(self):
"transform function" # , set to None to disable transform"
return self.__trans
# @trans.setter
def __set_trans(self, func):
if func is None:
func = lambda x : x
self.__trans = func
def __getitem__(self, chr):
""" select chr, by chr name or chr index number
return chrFetcher object, from which seq could be accessed by slicing
"""
if isinstance(chr, int):
chr = self.fa.references[chr]
if chr not in self.ref_len:
raise KeyError("reference %s not found." % chr)
else:
return _chrFetcher(self, chr)
def fetch(self, chr, start, end):
return self[chr][start:end]
def fetch_entry(self, chr):
return self[chr][:]
def close(self):
self.fa.close()
class _chrFetcher:
"fetch seq on one reference"
def __init__(self, genome, chr):
self.database = genome
self.chr = chr
self.chr_len = genome.ref_len[chr]
self.size = self.chr_len
self.buffer = genome.buffer
self.trans = genome.trans
def __getitem__(self, key):
"get seq, start and stop are 0-based, stop exclusive"
# print(key)
if isinstance(key, int):
if key >= self.chr_len or key < - self.chr_len:
raise IndexError("index out of range: %s:%s"
% (chr, key))
key = slice(key, key+1)
if not isinstance(key, slice):
raise TypeError("Unkonwn index type")
key = slice(*key.indices(self.chr_len))
if not (self.chr == self.buffer.chr and
key.start >= self.buffer.start and
key.stop <= self.buffer.end):
self.buffer.fetch(self.chr, key.start, key.stop)
return self.trans(self.buffer[key])
def fetch(self, start, end):
return self[start:end]
def __len__(self):
return self.chr_len
class _seqBuffer:
def __init__(self, fa, buffer_size):
self.fa = fa
self.left, self.right = buffer_size
self.chr = None
def __getitem__(self, key):
start0 = key.start - self.start
stop0 = key.stop - self.start
return self.seq[slice(start0,stop0, key.step)]
def fetch(self, chr, start, stop):
chr_len = self.fa.get_reference_length(chr)
self.chr = chr
self.start = max(0, start + self.left)
self.end = min(stop + self.right, chr_len)
self.seq = self.fa.fetch(self.chr, self.start, self.end)
def readable2num(length):
"convert readable number (with suffix) to integer"
if isinstance(length, int):
return length
elif isinstance(length, str):
suffix = length[-1]
if suffix.isdigit():
return int(length)
else:
if suffix in ('K', 'k'):
base = 1000
elif suffix in ('M', 'm'):
base = 1e6
else:
raise ValueError(length)
pre = float(length[:-1])
return round(pre * base)
else:
raise ValueError(length)
|
sein-tao/pyBioUtil
|
BioUtil/cached_fasta.py
|
Python
|
gpl-2.0
| 5,118
|
[
"pysam"
] |
c7a9d37702ad8fd7fe04f197eb47eee30c1067af92db9c21e49ed47483219fea
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.modeling.misc.info Contains the InfoShower class.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
from ..component.component import ModelingComponent
from ...core.tools import formatting as fmt
from ...core.tools import filesystem as fs
from ...magic.core.frame import Frame
from ..component.galaxy import load_preparation_statistics
#from ..component.component import load_fitting_configuration
#from ..fitting.component import get_generations_table
# -----------------------------------------------------------------
class InfoShower(ModelingComponent):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param kwargs:
"""
# Call the constructor of the base class
super(InfoShower, self).__init__(*args, **kwargs)
# -----------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ..
:param kwargs:
:return:
"""
# Show general info
self.show_general()
# Show depending on which type of object
if self.modeling_type == "galaxy": self.show_galaxy()
elif self.modeling_type == "other": self.show_other()
# Show fitting info
if self.fitting_configuration is not None: self.show_fitting()
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(InfoShower, self).setup(**kwargs)
# -----------------------------------------------------------------
def show_general(self):
"""
This function ...
:return:
"""
print("")
print("Object name: " + self.modeling_configuration.name)
print("Modeling type: " + self.modeling_configuration.modeling_type)
# -----------------------------------------------------------------
def show_galaxy(self):
"""
This function ...
:return:
"""
# Get preparation statistics
statistics = load_preparation_statistics(self.config.path)
# Maps paths
maps_path = fs.join(self.config.path, "maps")
old_stars_path = fs.join(maps_path, "old_stars.fits")
young_stars_path = fs.join(maps_path, "young_stars.fits")
ionizing_stars_path = fs.join(maps_path, "ionizing_stars.fits")
dust_path = fs.join(maps_path, "dust.fits")
# Print info
print("Galaxy NGC name: " + self.modeling_configuration.ngc_name)
print("Modeling method: " + self.modeling_configuration.method)
# If preparation statistics are present
if statistics is not None:
# Open the old stars map
old_stars = Frame.from_file(old_stars_path)
# Get convolution and rebinning filter
convolution_filter = statistics.convolution_filter
rebinning_filter = statistics.rebinning_filter
print("Model pixelscale: " + str(old_stars.average_pixelscale) + " (" + str(rebinning_filter) + ")")
print("Model resolution (FWHM): " + str(old_stars.fwhm) + " (" + str(convolution_filter) + ")")
# -----------------------------------------------------------------
def show_other(self):
"""
This function ...
:return:
"""
print("")
# -----------------------------------------------------------------
def show_fitting(self):
"""
This function ...
:return:
"""
print("Reference fluxes (for the SED fitting):")
print("")
for filter_name in self.fitting_configuration.filters: print(" - " + filter_name)
print("")
fitting_configuration = load_fitting_configuration(self.config.path)
print("Free model parameters:")
print("")
for name in fitting_configuration.free_parameters: print(" - " + name + ": " + fitting_configuration.descriptions[name])
print("")
print("Parameter ranges:")
print("")
for name in fitting_configuration.free_parameters: print(" - " + name + ": " + str(fitting_configuration[name + "_range"]))
print("")
generations_table = get_generations_table(self.config.path)
print("Generations:")
print("")
for name in generations_table.generation_names: print(" - " + name)
print("")
# -----------------------------------------------------------------
|
SKIRT/PTS
|
modeling/misc/info.py
|
Python
|
agpl-3.0
| 5,110
|
[
"Galaxy"
] |
a07d6bbc640036f2c48dab0631a9b138aac1e57c63a12dcccd5cdf7030e9b6c1
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 1: Fundamental Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2013 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
============================================================================================================
This example shows how three different error metrics calculate different data sets.
Type SSE MSE RMS
Small 2505 0.01 0.10
Medium 62628 0.25 0.50
Large 250515 1.00 1.00
Huge 25051524 100.21 10.01
"""
__author__ = 'jheaton'
import os
import sys
import numpy as np
# Find the AIFH core files
aifh_dir = os.path.dirname(os.path.abspath(__file__))
aifh_dir = os.path.abspath(aifh_dir + os.sep + ".." + os.sep + "lib" + os.sep + "aifh")
sys.path.append(aifh_dir)
from error import ErrorCalculation
# Operating parameters.
SEED = 1420
ROWS = 10000
COLS = 25
LOW = -1
HIGH = 1
# Generate ideal and actual values.
def generate(seed, rows, cols, low, high, distort):
"""
Generate random data, this includes a random ideal output and a distorted actual output. This simulates an a
actual model failing to exactly predict the ideal data by the specified distortion factor.
@param seed: The seed to use.
@param rows: The number of rows to generate.
@param cols: The number of columns to generate.
@param low: The low value of the random numbers.
@param high: The high values of the random numbers.
@param distort: The amount to distort by.
@return: A dictionary that contains the actual and ideal outputs.
"""
result = {}
np.random.seed(seed)
ideal = np.zeros((rows, cols), dtype=float)
actual = np.zeros((rows, cols), dtype=float)
result['ideal'] = ideal
result['actual'] = actual
for row in xrange(0, rows):
for col in xrange(0, cols):
d = float(np.random.randint(low, high))
ideal[row][col] = d
actual[row][col] = d + (np.random.normal() * distort)
return result
# Generate data sets.
smallErrors = generate(SEED, ROWS, COLS, LOW, HIGH, 0.1)
mediumErrors = generate(SEED, ROWS, COLS, LOW, HIGH, 0.5)
largeErrors = generate(SEED, ROWS, COLS, LOW, HIGH, 1.0)
hugeErrors = generate(SEED, ROWS, COLS, LOW, HIGH, 10.0)
small_sse = ErrorCalculation.sse(smallErrors['actual'], smallErrors['ideal'])
small_mse = ErrorCalculation.mse(smallErrors['actual'], smallErrors['ideal'])
small_rms = ErrorCalculation.rms(smallErrors['actual'], smallErrors['ideal'])
medium_sse = ErrorCalculation.sse(mediumErrors['actual'], mediumErrors['ideal'])
medium_mse = ErrorCalculation.mse(mediumErrors['actual'], mediumErrors['ideal'])
medium_rms = ErrorCalculation.rms(mediumErrors['actual'], mediumErrors['ideal'])
large_sse = ErrorCalculation.sse(largeErrors['actual'], largeErrors['ideal'])
large_mse = ErrorCalculation.mse(largeErrors['actual'], largeErrors['ideal'])
large_rms = ErrorCalculation.rms(largeErrors['actual'], largeErrors['ideal'])
huge_sse = ErrorCalculation.sse(hugeErrors['actual'], hugeErrors['ideal'])
huge_mse = ErrorCalculation.mse(hugeErrors['actual'], hugeErrors['ideal'])
huge_rms = ErrorCalculation.rms(hugeErrors['actual'], hugeErrors['ideal'])
print("Type\tSSE\t\t\tMSE\t\tRMS")
print("Small\t" + str(int(small_sse)) + "\t\t" + "{0:.2f}".format(small_mse) + "\t" + "{0:.2f}".format(small_rms))
print("Medium\t" + str(int(medium_sse)) + "\t\t" + "{0:.2f}".format(medium_mse) + "\t" + "{0:.2f}".format(medium_rms))
print("Large\t" + str(int(large_sse)) + "\t\t" + "{0:.2f}".format(large_mse) + "\t" + "{0:.2f}".format(large_rms))
print("Huge\t" + str(int(huge_sse)) + "\t" + "{0:.2f}".format(huge_mse) + "\t" + "{0:.2f}".format(huge_rms))
|
PeterLauris/aifh
|
vol1/python-examples/examples/example_error.py
|
Python
|
apache-2.0
| 4,455
|
[
"VisIt"
] |
228f15829f140196de423d2c68f0bb7bcbe587c648b5b63ca9fde2e33d3c3164
|
__author__ = 'brian'
from django import forms
from django.contrib.auth.models import User
from rango.models import Page, Category, UserProfile
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text="please enter the "
"category name.")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
slug = forms.CharField(widget=forms.HiddenInput(), required=False)
class Meta:
model = Category
fields = ('name',)
class PageForm(forms.ModelForm):
title = forms.CharField(max_length=128, help_text="Please enter the title")
url = forms.URLField(max_length=200, help_text="Please enter the URL of "
"the page")
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class Meta:
model = Page
exclude = ('category',)
def clean(self):
"""
A way to amend data that's passed through via the form
"""
cleaned_data = self.cleaned_data
url = cleaned_data.get('url')
if url and not url.startswith('http://'):
url = 'http://' + url
cleaned_data['url'] = url
return cleaned_data
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ('website', 'picture')
|
bjing/tango_with_jango_project
|
rango/forms.py
|
Python
|
gpl-3.0
| 1,658
|
[
"Brian"
] |
89f7d5d328685cfdaf7ed2404397244284930007259cc683d79187277b9a9500
|
import codecs
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
about = dict(
__version__=0.4,
__title__="pymerra2",
__description__="A tool for downloading and repackaging NASA MERRA-2 Data",
__url__="https://github.com/Ouranosinc/pymerra2",
__author__="Trevor James Smith",
__author_email__="smith.trevorj@ouranos.ca",
__license__="Apache Software License 2.0",
__copyright__="Copyright 2018 Ouranos Inc.",
)
with codecs.open("README.md", "r") as fh:
long_description = fh.read()
INSTALL_REQUIRES = [line.strip() for line in open("requirements.txt")]
KEYWORDS = "nasa merra2 netcdf climate forecast reanalysis"
setup(
# -- meta information --------------------------------------------------
name=about["__title__"],
version=str(about["__version__"]),
author=about["__author__"],
author_email=about["__author_email__"],
description=about["__description__"],
long_description=long_description,
url=about["__url__"],
license=about["__license__"],
platforms="all",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Scientific/Engineering :: Atmospheric Science",
"Topic ::Utilities",
],
keywords=KEYWORDS,
packages=find_packages(exclude=["tests", "templates"]),
include_package_data=None,
python_requires=">=3.5, <4",
install_requires=INSTALL_REQUIRES,
)
|
bstdenis/pymerra2
|
setup.py
|
Python
|
apache-2.0
| 1,794
|
[
"NetCDF"
] |
a4c6ba25baa0115bf419ff6b031a55e570c275fffa0198d9e0afc5c515afa8b7
|
import unittest
import pam
import bpy
import numpy
import random
import pickle
import sys
SEED = 42
class TestAddon(unittest.TestCase):
def testAddonEnabled(self):
self.assertNotNone(pam.bl_info)
class TestPamModelCreate(unittest.TestCase):
def setUp(self):
# Seed the rng
random.seed(SEED)
def loadModel(self, path):
pam.model.loadZip(bpy.path.abspath(path))
self.model = pam.model.MODEL
self.CONNECTION_RESULTS = pam.model.CONNECTION_RESULTS
pam.model.reset()
def testModels(self):
"""Test if the pam model generated using euclidean mapping method is the same as a predefined model.
Checks the connections and the CONNECTION_RESULTS struct"""
# Import should-be pam model
self.loadModel("//model.test.zip")
# Compute mapping
bpy.ops.pam.mapping_compute()
for i in range(len(self.model.connections)):
with self.subTest(i = i):
self.assertEqual(self.model.connections[i], pam.model.MODEL.connections[i], "Connections between neuron groups differ")
numpy.testing.assert_array_equal(self.CONNECTION_RESULTS[i]['c'], pam.model.CONNECTION_RESULTS[i]['c'], "Connections are not equal in connection ID " + str(i))
numpy.testing.assert_array_equal(self.CONNECTION_RESULTS[i]['d'], pam.model.CONNECTION_RESULTS[i]['d'], "Distances between connections are incorrect in connection ID " + str(i))
numpy.testing.assert_array_equal(self.CONNECTION_RESULTS[i]['s'], pam.model.CONNECTION_RESULTS[i]['s'], "Synapse vectors are incorrect for connection " + str(i))
self.assertEqual(self.model.connection_indices[i], pam.model.MODEL.connection_indices[i], "Connection indices are incorrect")
self.assertEqual(self.model.ng_list[i], pam.model.MODEL.ng_list[i], "Neuron group list is incorrect")
self.assertEqual(self.model.ng_dict, pam.model.MODEL.ng_dict, "Neuron group dictionary is incorrect")
class TestPamModelThreaded(unittest.TestCase):
def setUp(self):
bpy.context.user_preferences.addons['pam'].preferences.use_threading = True
bpy.context.user_preferences.addons['pam'].preferences.threads = 4
def loadModel(self, path):
pam.model.loadZip(bpy.path.abspath(path))
self.model = pam.model.MODEL
self.CONNECTION_RESULTS = pam.model.CONNECTION_RESULTS
pam.model.reset()
def testModels(self):
"""Test if the pam model generated using euclidean mapping method is the same as a predefined model when using multiple threads.
Checks CONNECTIONS, CONNECTION_RESULTS, CONNECTION_INDICES, NG_LIST and NG_DICT"""
# Import should-be pam model
self.loadModel("//model.test.zip")
# Compute mapping
bpy.ops.pam.mapping_compute()
for i in range(len(self.model.connections)):
with self.subTest(i = i):
self.assertEqual(self.model.connections[i], pam.model.MODEL.connections[i], "Connections between neuron groups differ")
numpy.testing.assert_array_equal(self.CONNECTION_RESULTS[i]['c'], pam.model.CONNECTION_RESULTS[i]['c'], "Connections are not equal in connection ID " + str(i))
numpy.testing.assert_array_equal(self.CONNECTION_RESULTS[i]['d'], pam.model.CONNECTION_RESULTS[i]['d'], "Distances between connections are incorrect in connection ID " + str(i))
numpy.testing.assert_array_equal(self.CONNECTION_RESULTS[i]['s'], pam.model.CONNECTION_RESULTS[i]['s'], "Synapse vectors are incorrect for connection " + str(i))
self.assertEqual(self.model.connection_indices[i], pam.model.MODEL.connection_indices[i], "Connection indices are incorrect")
self.assertEqual(self.model.ng_list[i], pam.model.MODEL.ng_list[i], "Neuron group list is incorrect")
self.assertEqual(self.model.ng_dict, pam.model.MODEL.ng_dict, "Neuron group dictionary is incorrect")
def run():
"""Run unittest"""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestPamModelCreate))
suite.addTest(unittest.makeSuite(TestPamModelThreaded))
runner = unittest.TextTestRunner(verbosity=2)
ret = not runner.run(suite).wasSuccessful()
sys.exit(ret)
bpy.ops.wm.addon_enable(module='pam')
run()
|
MartinPyka/Parametric-Anatomical-Modeling
|
tests/model/model.test.py
|
Python
|
gpl-2.0
| 4,374
|
[
"NEURON"
] |
1d8b7e4e3ae2afe9f093269f9bbbbe1fdb74dcc26f1a3280e14e056978929b9f
|
# This file is part of beets.
# Copyright 2014, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Discogs album search support to the autotagger. Requires the
discogs-client library.
"""
from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance
from beets.plugins import BeetsPlugin
from beets.util import confit
from discogs_client import Release, Client
from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError
import beets
import logging
import re
import time
import json
log = logging.getLogger('beets')
# Silence spurious INFO log lines generated by urllib3.
urllib3_logger = logging.getLogger('requests.packages.urllib3')
urllib3_logger.setLevel(logging.CRITICAL)
USER_AGENT = 'beets/{0} +http://beets.radbox.org/'.format(beets.__version__)
class DiscogsPlugin(BeetsPlugin):
def __init__(self):
super(DiscogsPlugin, self).__init__()
self.config.add({
'apikey': 'rAzVUQYRaoFjeBjyWuWZ',
'apisecret': 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy',
'tokenfile': 'discogs_token.json',
'source_weight': 0.5,
})
self.discogs_client = None
self.register_listener('import_begin', self.setup)
def setup(self):
"""Create the `discogs_client` field. Authenticate if necessary.
"""
c_key = self.config['apikey'].get(unicode)
c_secret = self.config['apisecret'].get(unicode)
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except IOError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.discogs_client = Client(USER_AGENT, c_key, c_secret,
token, secret)
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confit.Filename(in_app_dir=True))
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = Client(USER_AGENT, c_key, c_secret)
_, _, url = auth_client.get_authorize_url()
beets.ui.print_("To authenticate with Discogs, visit:")
beets.ui.print_(url)
# Ask for the code and validate it.
code = beets.ui.input_("Enter the code:")
try:
token, secret = auth_client.get_access_token(code)
except DiscogsAPIError:
raise beets.ui.UserError('Discogs authorization failed')
# Save the token for later use.
log.debug('Discogs token {0}, secret {1}'.format(token, secret))
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def album_distance(self, items, album_info, mapping):
"""Returns the album distance.
"""
dist = Distance()
if album_info.data_source == 'Discogs':
dist.add('source', self.config['source_weight'].as_number())
return dist
def candidates(self, items, artist, album, va_likely):
"""Returns a list of AlbumInfo objects for discogs search results
matching an album and artist (if not various).
"""
if not self.discogs_client:
return
if va_likely:
query = album
else:
query = '%s %s' % (artist, album)
try:
return self.get_albums(query)
except DiscogsAPIError as e:
log.debug(u'Discogs API Error: {0} (query: {1})'.format(e, query))
return []
except ConnectionError as e:
log.debug(u'HTTP Connection Error: {0}'.format(e))
return []
def album_for_id(self, album_id):
"""Fetches an album by its Discogs ID and returns an AlbumInfo object
or None if the album is not found.
"""
if not self.discogs_client:
return
log.debug(u'Searching Discogs for release {0}'.format(str(album_id)))
# Discogs-IDs are simple integers. We only look for those at the end
# of an input string as to avoid confusion with other metadata plugins.
# An optional bracket can follow the integer, as this is how discogs
# displays the release ID on its webpage.
match = re.search(r'(^|\[*r|discogs\.com/.+/release/)(\d+)($|\])',
album_id)
if not match:
return None
result = Release(self.discogs_client, {'id': int(match.group(2))})
# Try to obtain title to verify that we indeed have a valid Release
try:
getattr(result, 'title')
except DiscogsAPIError as e:
if e.message != '404 Not Found':
log.debug(u'Discogs API Error: {0} (query: {1})'
.format(e, result._uri))
return None
except ConnectionError as e:
log.debug(u'HTTP Connection Error: {0}'.format(e))
return None
return self.get_album_info(result)
def get_albums(self, query):
"""Returns a list of AlbumInfo objects for a discogs search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
# TEMPORARY: Encode as ASCII to work around a bug:
# https://github.com/sampsyo/beets/issues/1051
# When the library is fixed, we should encode as UTF-8.
query = re.sub(r'(?u)\W+', ' ', query).encode('ascii', "replace")
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'(?i)\b(CD|disc)\s*\d+', '', query)
releases = self.discogs_client.search(query, type='release').page(1)
return [self.get_album_info(release) for release in releases[:5]]
def get_album_info(self, result):
"""Returns an AlbumInfo object for a discogs Release object.
"""
artist, artist_id = self.get_artist([a.data for a in result.artists])
album = re.sub(r' +', ' ', result.title)
album_id = result.data['id']
# Use `.data` to access the tracklist directly instead of the
# convenient `.tracklist` property, which will strip out useful artist
# information and leave us with skeleton `Artist` objects that will
# each make an API call just to get the same data back.
tracks = self.get_tracks(result.data['tracklist'])
albumtype = ', '.join(
result.data['formats'][0].get('descriptions', [])) or None
va = result.data['artists'][0]['name'].lower() == 'various'
year = result.data['year']
label = result.data['labels'][0]['name']
mediums = len(set(t.medium for t in tracks))
catalogno = result.data['labels'][0]['catno']
if catalogno == 'none':
catalogno = None
country = result.data.get('country')
media = result.data['formats'][0]['name']
data_url = result.data['uri']
return AlbumInfo(album, album_id, artist, artist_id, tracks, asin=None,
albumtype=albumtype, va=va, year=year, month=None,
day=None, label=label, mediums=mediums,
artist_sort=None, releasegroup_id=None,
catalognum=catalogno, script=None, language=None,
country=country, albumstatus=None, media=media,
albumdisambig=None, artist_credit=None,
original_year=None, original_month=None,
original_day=None, data_source='Discogs',
data_url=data_url)
def get_artist(self, artists):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of discogs album or track artists.
"""
artist_id = None
bits = []
for i, artist in enumerate(artists):
if not artist_id:
artist_id = artist['id']
name = artist['name']
# Strip disambiguation number.
name = re.sub(r' \(\d+\)$', '', name)
# Move articles to the front.
name = re.sub(r'(?i)^(.*?), (a|an|the)$', r'\2 \1', name)
bits.append(name)
if artist['join'] and i < len(artists) - 1:
bits.append(artist['join'])
artist = ' '.join(bits).replace(' ,', ',') or None
return artist, artist_id
def get_tracks(self, tracklist):
"""Returns a list of TrackInfo objects for a discogs tracklist.
"""
tracks = []
index_tracks = {}
index = 0
for track in tracklist:
# Only real tracks have `position`. Otherwise, it's an index track.
if track['position']:
index += 1
tracks.append(self.get_track_info(track, index))
else:
index_tracks[index + 1] = track['title']
# Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order.
medium = None
medium_count, index_count = 0, 0
for track in tracks:
# Handle special case where a different medium does not indicate a
# new disc, when there is no medium_index and the ordinal of medium
# is not sequential. For example, I, II, III, IV, V. Assume these
# are the track index, not the medium.
medium_is_index = track.medium and not track.medium_index and (
len(track.medium) != 1 or
ord(track.medium) - 64 != medium_count + 1
)
if not medium_is_index and medium != track.medium:
# Increment medium_count and reset index_count when medium
# changes.
medium = track.medium
medium_count += 1
index_count = 0
index_count += 1
track.medium, track.medium_index = medium_count, index_count
# Get `disctitle` from Discogs index tracks. Assume that an index track
# before the first track of each medium is a disc title.
for track in tracks:
if track.medium_index == 1:
if track.index in index_tracks:
disctitle = index_tracks[track.index]
else:
disctitle = None
track.disctitle = disctitle
return tracks
def get_track_info(self, track, index):
"""Returns a TrackInfo object for a discogs track.
"""
title = track['title']
track_id = None
medium, medium_index = self.get_track_index(track['position'])
artist, artist_id = self.get_artist(track.get('artists', []))
length = self.get_track_length(track['duration'])
return TrackInfo(title, track_id, artist, artist_id, length, index,
medium, medium_index, artist_sort=None,
disctitle=None, artist_credit=None)
def get_track_index(self, position):
"""Returns the medium and medium index for a discogs track position.
"""
# medium_index is a number at the end of position. medium is everything
# else. E.g. (A)(1), (Side A, Track )(1), (A)(), ()(1), etc.
match = re.match(r'^(.*?)(\d*)$', position.upper())
if match:
medium, index = match.groups()
else:
log.debug(u'Invalid Discogs position: {0}'.format(position))
medium = index = None
return medium or None, index or None
def get_track_length(self, duration):
"""Returns the track length in seconds for a discogs duration.
"""
try:
length = time.strptime(duration, '%M:%S')
except ValueError:
return None
return length.tm_min * 60 + length.tm_sec
|
Wen777/beets
|
beetsplug/discogs.py
|
Python
|
mit
| 12,882
|
[
"VisIt"
] |
6f36ad9957a2a79ea61280e2459b849fdf15a366a5671ba4d0d237f07333fc16
|
from test import test_support
import unittest
import codecs
import locale
import sys, StringIO
try:
import _testcapi
except ImportError:
_testcapi = None
class Queue(object):
"""
queue: write bytes at one end, read bytes from the other end
"""
def __init__(self):
self._buffer = ""
def write(self, chars):
self._buffer += chars
def read(self, size=-1):
if size<0:
s = self._buffer
self._buffer = ""
return s
else:
s = self._buffer[:size]
self._buffer = self._buffer[size:]
return s
class ReadTest(unittest.TestCase):
def check_partial(self, input, partialresults):
# get a StreamReader for the encoding and feed the bytestring version
# of input to the reader byte by byte. Read everything available from
# the StreamReader and check that the results equal the appropriate
# entries from partialresults.
q = Queue()
r = codecs.getreader(self.encoding)(q)
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
q.write(c)
result += r.read()
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(r.read(), u"")
self.assertEqual(r.bytebuffer, "")
self.assertEqual(r.charbuffer, u"")
# do the check again, this time using a incremental decoder
d = codecs.getincrementaldecoder(self.encoding)()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# Check whether the reset method works properly
d.reset()
result = u""
for (c, partialresult) in zip(input.encode(self.encoding), partialresults):
result += d.decode(c)
self.assertEqual(result, partialresult)
# check that there's nothing left in the buffers
self.assertEqual(d.decode("", True), u"")
self.assertEqual(d.buffer, "")
# check iterdecode()
encoded = input.encode(self.encoding)
self.assertEqual(
input,
u"".join(codecs.iterdecode(encoded, self.encoding))
)
def test_readline(self):
def getreader(input):
stream = StringIO.StringIO(input.encode(self.encoding))
return codecs.getreader(self.encoding)(stream)
def readalllines(input, keepends=True, size=None):
reader = getreader(input)
lines = []
while True:
line = reader.readline(size=size, keepends=keepends)
if not line:
break
lines.append(line)
return "|".join(lines)
s = u"foo\nbar\r\nbaz\rspam\u2028eggs"
sexpected = u"foo\n|bar\r\n|baz\r|spam\u2028|eggs"
sexpectednoends = u"foo|bar|baz|spam|eggs"
self.assertEqual(readalllines(s, True), sexpected)
self.assertEqual(readalllines(s, False), sexpectednoends)
self.assertEqual(readalllines(s, True, 10), sexpected)
self.assertEqual(readalllines(s, False, 10), sexpectednoends)
# Test long lines (multiple calls to read() in readline())
vw = []
vwo = []
for (i, lineend) in enumerate(u"\n \r\n \r \u2028".split()):
vw.append((i*200)*u"\3042" + lineend)
vwo.append((i*200)*u"\3042")
self.assertEqual(readalllines("".join(vw), True), "".join(vw))
self.assertEqual(readalllines("".join(vw), False),"".join(vwo))
# Test lines where the first read might end with \r, so the
# reader has to look ahead whether this is a lone \r or a \r\n
for size in xrange(80):
for lineend in u"\n \r\n \r \u2028".split():
s = 10*(size*u"a" + lineend + u"xxx\n")
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=True),
size*u"a" + lineend,
)
reader = getreader(s)
for i in xrange(10):
self.assertEqual(
reader.readline(keepends=False),
size*u"a",
)
def test_bug1175396(self):
s = [
'<%!--===================================================\r\n',
' BLOG index page: show recent articles,\r\n',
' today\'s articles, or articles of a specific date.\r\n',
'========================================================--%>\r\n',
'<%@inputencoding="ISO-8859-1"%>\r\n',
'<%@pagetemplate=TEMPLATE.y%>\r\n',
'<%@import=import frog.util, frog%>\r\n',
'<%@import=import frog.objects%>\r\n',
'<%@import=from frog.storageerrors import StorageError%>\r\n',
'<%\r\n',
'\r\n',
'import logging\r\n',
'log=logging.getLogger("Snakelets.logger")\r\n',
'\r\n',
'\r\n',
'user=self.SessionCtx.user\r\n',
'storageEngine=self.SessionCtx.storageEngine\r\n',
'\r\n',
'\r\n',
'def readArticlesFromDate(date, count=None):\r\n',
' entryids=storageEngine.listBlogEntries(date)\r\n',
' entryids.reverse() # descending\r\n',
' if count:\r\n',
' entryids=entryids[:count]\r\n',
' try:\r\n',
' return [ frog.objects.BlogEntry.load(storageEngine, date, Id) for Id in entryids ]\r\n',
' except StorageError,x:\r\n',
' log.error("Error loading articles: "+str(x))\r\n',
' self.abort("cannot load articles")\r\n',
'\r\n',
'showdate=None\r\n',
'\r\n',
'arg=self.Request.getArg()\r\n',
'if arg=="today":\r\n',
' #-------------------- TODAY\'S ARTICLES\r\n',
' self.write("<h2>Today\'s articles</h2>")\r\n',
' showdate = frog.util.isodatestr() \r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'elif arg=="active":\r\n',
' #-------------------- ACTIVE ARTICLES redirect\r\n',
' self.Yredirect("active.y")\r\n',
'elif arg=="login":\r\n',
' #-------------------- LOGIN PAGE redirect\r\n',
' self.Yredirect("login.y")\r\n',
'elif arg=="date":\r\n',
' #-------------------- ARTICLES OF A SPECIFIC DATE\r\n',
' showdate = self.Request.getParameter("date")\r\n',
' self.write("<h2>Articles written on %s</h2>"% frog.util.mediumdatestr(showdate))\r\n',
' entries = readArticlesFromDate(showdate)\r\n',
'else:\r\n',
' #-------------------- RECENT ARTICLES\r\n',
' self.write("<h2>Recent articles</h2>")\r\n',
' dates=storageEngine.listBlogEntryDates()\r\n',
' if dates:\r\n',
' entries=[]\r\n',
' SHOWAMOUNT=10\r\n',
' for showdate in dates:\r\n',
' entries.extend( readArticlesFromDate(showdate, SHOWAMOUNT-len(entries)) )\r\n',
' if len(entries)>=SHOWAMOUNT:\r\n',
' break\r\n',
' \r\n',
]
stream = StringIO.StringIO("".join(s).encode(self.encoding))
reader = codecs.getreader(self.encoding)(stream)
for (i, line) in enumerate(reader):
self.assertEqual(line, s[i])
def test_readlinequeue(self):
q = Queue()
writer = codecs.getwriter(self.encoding)(q)
reader = codecs.getreader(self.encoding)(q)
# No lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=False), u"foo")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=False), u"")
self.assertEqual(reader.readline(keepends=False), u"bar")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=False), u"baz")
self.assertEqual(reader.readline(keepends=False), u"")
# Lineends
writer.write(u"foo\r")
self.assertEqual(reader.readline(keepends=True), u"foo\r")
writer.write(u"\nbar\r")
self.assertEqual(reader.readline(keepends=True), u"\n")
self.assertEqual(reader.readline(keepends=True), u"bar\r")
writer.write(u"baz")
self.assertEqual(reader.readline(keepends=True), u"baz")
self.assertEqual(reader.readline(keepends=True), u"")
writer.write(u"foo\r\n")
self.assertEqual(reader.readline(keepends=True), u"foo\r\n")
def test_bug1098990_a(self):
s1 = u"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy\r\n"
s2 = u"offending line: ladfj askldfj klasdj fskla dfzaskdj fasklfj laskd fjasklfzzzzaa%whereisthis!!!\r\n"
s3 = u"next line.\r\n"
s = (s1+s2+s3).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), u"")
def test_bug1098990_b(self):
s1 = u"aaaaaaaaaaaaaaaaaaaaaaaa\r\n"
s2 = u"bbbbbbbbbbbbbbbbbbbbbbbb\r\n"
s3 = u"stillokay:bbbbxx\r\n"
s4 = u"broken!!!!badbad\r\n"
s5 = u"againokay.\r\n"
s = (s1+s2+s3+s4+s5).encode(self.encoding)
stream = StringIO.StringIO(s)
reader = codecs.getreader(self.encoding)(stream)
self.assertEqual(reader.readline(), s1)
self.assertEqual(reader.readline(), s2)
self.assertEqual(reader.readline(), s3)
self.assertEqual(reader.readline(), s4)
self.assertEqual(reader.readline(), s5)
self.assertEqual(reader.readline(), u"")
class UTF32Test(ReadTest):
encoding = "utf-32"
spamle = ('\xff\xfe\x00\x00'
's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00'
's\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m\x00\x00\x00')
spambe = ('\x00\x00\xfe\xff'
'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m'
'\x00\x00\x00s\x00\x00\x00p\x00\x00\x00a\x00\x00\x00m')
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEqual(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO(4*"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO(8*"\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"", # first byte of BOM read
u"", # second byte of BOM read
u"", # third byte of BOM read
u"", # fourth byte of BOM read => byteorder known
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_handlers(self):
self.assertEqual((u'\ufffd', 1),
codecs.utf_32_decode('\x01', 'replace', True))
self.assertEqual((u'', 1),
codecs.utf_32_decode('\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded_le = '\xff\xfe\x00\x00' + '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_le)[0])
encoded_be = '\x00\x00\xfe\xff' + '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_decode(encoded_be)[0])
class UTF32LETest(ReadTest):
encoding = "utf-32-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_simple(self):
self.assertEqual(u"\U00010203".encode(self.encoding), "\x03\x02\x01\x00")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_le_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x00\x01\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_le_decode(encoded)[0])
class UTF32BETest(ReadTest):
encoding = "utf-32-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"",
u"",
u"\x00",
u"\x00",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_simple(self):
self.assertEqual(u"\U00010203".encode(self.encoding), "\x00\x01\x02\x03")
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_32_be_decode,
"\xff", "strict", True)
def test_issue8941(self):
# Issue #8941: insufficient result allocation when decoding into
# surrogate pairs on UCS-2 builds.
encoded = '\x00\x01\x00\x00' * 1024
self.assertEqual(u'\U00010000' * 1024,
codecs.utf_32_be_decode(encoded)[0])
class UTF16Test(ReadTest):
encoding = "utf-16"
spamle = '\xff\xfes\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
spambe = '\xfe\xff\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
def test_only_one_bom(self):
_,_,reader,writer = codecs.lookup(self.encoding)
# encode some stream
s = StringIO.StringIO()
f = writer(s)
f.write(u"spam")
f.write(u"spam")
d = s.getvalue()
# check whether there is exactly one BOM in it
self.assertTrue(d == self.spamle or d == self.spambe)
# try to read it back
s = StringIO.StringIO(d)
f = reader(s)
self.assertEqual(f.read(), u"spamspam")
def test_badbom(self):
s = StringIO.StringIO("\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
s = StringIO.StringIO("\xff\xff\xff\xff")
f = codecs.getreader(self.encoding)(s)
self.assertRaises(UnicodeError, f.read)
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"", # first byte of BOM read
u"", # second byte of BOM read => byteorder known
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_handlers(self):
self.assertEqual((u'\ufffd', 1),
codecs.utf_16_decode('\x01', 'replace', True))
self.assertEqual((u'', 1),
codecs.utf_16_decode('\x01', 'ignore', True))
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_decode, "\xff", "strict", True)
def test_bug691291(self):
# Files are always opened in binary mode, even if no binary mode was
# specified. This means that no automatic conversion of '\n' is done
# on reading and writing.
s1 = u'Hello\r\nworld\r\n'
s = s1.encode(self.encoding)
self.addCleanup(test_support.unlink, test_support.TESTFN)
with open(test_support.TESTFN, 'wb') as fp:
fp.write(s)
with codecs.open(test_support.TESTFN, 'U', encoding=self.encoding) as reader:
self.assertEqual(reader.read(), s1)
class UTF16LETest(ReadTest):
encoding = "utf-16-le"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_le_decode, "\xff", "strict", True)
class UTF16BETest(ReadTest):
encoding = "utf-16-be"
def test_partial(self):
self.check_partial(
u"\x00\xff\u0100\uffff",
[
u"",
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u0100",
u"\x00\xff\u0100",
u"\x00\xff\u0100\uffff",
]
)
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_be_decode, "\xff", "strict", True)
class UTF8Test(ReadTest):
encoding = "utf-8"
def test_partial(self):
self.check_partial(
u"\x00\xff\u07ff\u0800\uffff",
[
u"\x00",
u"\x00",
u"\x00\xff",
u"\x00\xff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800",
u"\x00\xff\u07ff\u0800\uffff",
]
)
class UTF7Test(ReadTest):
encoding = "utf-7"
def test_partial(self):
self.check_partial(
u"a+-b",
[
u"a",
u"a",
u"a+",
u"a+-",
u"a+-b",
]
)
class UTF16ExTest(unittest.TestCase):
def test_errors(self):
self.assertRaises(UnicodeDecodeError, codecs.utf_16_ex_decode, "\xff", "strict", 0, True)
def test_bad_args(self):
self.assertRaises(TypeError, codecs.utf_16_ex_decode)
class ReadBufferTest(unittest.TestCase):
def test_array(self):
import array
self.assertEqual(
codecs.readbuffer_encode(array.array("c", "spam")),
("spam", 4)
)
def test_empty(self):
self.assertEqual(codecs.readbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.readbuffer_encode)
self.assertRaises(TypeError, codecs.readbuffer_encode, 42)
class CharBufferTest(unittest.TestCase):
def test_string(self):
self.assertEqual(codecs.charbuffer_encode("spam"), ("spam", 4))
def test_empty(self):
self.assertEqual(codecs.charbuffer_encode(""), ("", 0))
def test_bad_args(self):
self.assertRaises(TypeError, codecs.charbuffer_encode)
self.assertRaises(TypeError, codecs.charbuffer_encode, 42)
class UTF8SigTest(ReadTest):
encoding = "utf-8-sig"
def test_partial(self):
self.check_partial(
u"\ufeff\x00\xff\u07ff\u0800\uffff",
[
u"",
u"",
u"", # First BOM has been read and skipped
u"",
u"",
u"\ufeff", # Second BOM has been read and emitted
u"\ufeff\x00", # "\x00" read and emitted
u"\ufeff\x00", # First byte of encoded u"\xff" read
u"\ufeff\x00\xff", # Second byte of encoded u"\xff" read
u"\ufeff\x00\xff", # First byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff", # Second byte of encoded u"\u07ff" read
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800",
u"\ufeff\x00\xff\u07ff\u0800\uffff",
]
)
def test_bug1601501(self):
# SF bug #1601501: check that the codec works with a buffer
unicode("\xef\xbb\xbf", "utf-8-sig")
def test_bom(self):
d = codecs.getincrementaldecoder("utf-8-sig")()
s = u"spam"
self.assertEqual(d.decode(s.encode("utf-8-sig")), s)
def test_stream_bom(self):
unistring = u"ABC\u00A1\u2200XYZ"
bytestring = codecs.BOM_UTF8 + "ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + range(1, 11) + \
[64, 128, 256, 512, 1024]:
istream = reader(StringIO.StringIO(bytestring))
ostream = StringIO.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
def test_stream_bare(self):
unistring = u"ABC\u00A1\u2200XYZ"
bytestring = "ABC\xC2\xA1\xE2\x88\x80XYZ"
reader = codecs.getreader("utf-8-sig")
for sizehint in [None] + range(1, 11) + \
[64, 128, 256, 512, 1024]:
istream = reader(StringIO.StringIO(bytestring))
ostream = StringIO.StringIO()
while 1:
if sizehint is not None:
data = istream.read(sizehint)
else:
data = istream.read()
if not data:
break
ostream.write(data)
got = ostream.getvalue()
self.assertEqual(got, unistring)
class EscapeDecodeTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(codecs.escape_decode(""), ("", 0))
class RecodingTest(unittest.TestCase):
def test_recoding(self):
f = StringIO.StringIO()
f2 = codecs.EncodedFile(f, "unicode_internal", "utf-8")
f2.write(u"a")
f2.close()
# Python used to crash on this at exit because of a refcount
# bug in _codecsmodule.c
# From RFC 3492
punycode_testcases = [
# A Arabic (Egyptian):
(u"\u0644\u064A\u0647\u0645\u0627\u0628\u062A\u0643\u0644"
u"\u0645\u0648\u0634\u0639\u0631\u0628\u064A\u061F",
"egbpdaj6bu4bxfgehfvwxn"),
# B Chinese (simplified):
(u"\u4ED6\u4EEC\u4E3A\u4EC0\u4E48\u4E0D\u8BF4\u4E2D\u6587",
"ihqwcrb4cv8a8dqg056pqjye"),
# C Chinese (traditional):
(u"\u4ED6\u5011\u7232\u4EC0\u9EBD\u4E0D\u8AAA\u4E2D\u6587",
"ihqwctvzc91f659drss3x8bo0yb"),
# D Czech: Pro<ccaron>prost<ecaron>nemluv<iacute><ccaron>esky
(u"\u0050\u0072\u006F\u010D\u0070\u0072\u006F\u0073\u0074"
u"\u011B\u006E\u0065\u006D\u006C\u0075\u0076\u00ED\u010D"
u"\u0065\u0073\u006B\u0079",
"Proprostnemluvesky-uyb24dma41a"),
# E Hebrew:
(u"\u05DC\u05DE\u05D4\u05D4\u05DD\u05E4\u05E9\u05D5\u05D8"
u"\u05DC\u05D0\u05DE\u05D3\u05D1\u05E8\u05D9\u05DD\u05E2"
u"\u05D1\u05E8\u05D9\u05EA",
"4dbcagdahymbxekheh6e0a7fei0b"),
# F Hindi (Devanagari):
(u"\u092F\u0939\u0932\u094B\u0917\u0939\u093F\u0928\u094D"
u"\u0926\u0940\u0915\u094D\u092F\u094B\u0902\u0928\u0939"
u"\u0940\u0902\u092C\u094B\u0932\u0938\u0915\u0924\u0947"
u"\u0939\u0948\u0902",
"i1baa7eci9glrd9b2ae1bj0hfcgg6iyaf8o0a1dig0cd"),
#(G) Japanese (kanji and hiragana):
(u"\u306A\u305C\u307F\u3093\u306A\u65E5\u672C\u8A9E\u3092"
u"\u8A71\u3057\u3066\u304F\u308C\u306A\u3044\u306E\u304B",
"n8jok5ay5dzabd5bym9f0cm5685rrjetr6pdxa"),
# (H) Korean (Hangul syllables):
(u"\uC138\uACC4\uC758\uBAA8\uB4E0\uC0AC\uB78C\uB4E4\uC774"
u"\uD55C\uAD6D\uC5B4\uB97C\uC774\uD574\uD55C\uB2E4\uBA74"
u"\uC5BC\uB9C8\uB098\uC88B\uC744\uAE4C",
"989aomsvi5e83db1d2a355cv1e0vak1dwrv93d5xbh15a0dt30a5j"
"psd879ccm6fea98c"),
# (I) Russian (Cyrillic):
(u"\u043F\u043E\u0447\u0435\u043C\u0443\u0436\u0435\u043E"
u"\u043D\u0438\u043D\u0435\u0433\u043E\u0432\u043E\u0440"
u"\u044F\u0442\u043F\u043E\u0440\u0443\u0441\u0441\u043A"
u"\u0438",
"b1abfaaepdrnnbgefbaDotcwatmq2g4l"),
# (J) Spanish: Porqu<eacute>nopuedensimplementehablarenEspa<ntilde>ol
(u"\u0050\u006F\u0072\u0071\u0075\u00E9\u006E\u006F\u0070"
u"\u0075\u0065\u0064\u0065\u006E\u0073\u0069\u006D\u0070"
u"\u006C\u0065\u006D\u0065\u006E\u0074\u0065\u0068\u0061"
u"\u0062\u006C\u0061\u0072\u0065\u006E\u0045\u0073\u0070"
u"\u0061\u00F1\u006F\u006C",
"PorqunopuedensimplementehablarenEspaol-fmd56a"),
# (K) Vietnamese:
# T<adotbelow>isaoh<odotbelow>kh<ocirc>ngth<ecirchookabove>ch\
# <ihookabove>n<oacute>iti<ecircacute>ngVi<ecircdotbelow>t
(u"\u0054\u1EA1\u0069\u0073\u0061\u006F\u0068\u1ECD\u006B"
u"\u0068\u00F4\u006E\u0067\u0074\u0068\u1EC3\u0063\u0068"
u"\u1EC9\u006E\u00F3\u0069\u0074\u0069\u1EBF\u006E\u0067"
u"\u0056\u0069\u1EC7\u0074",
"TisaohkhngthchnitingVit-kjcr8268qyxafd2f1b9g"),
#(L) 3<nen>B<gumi><kinpachi><sensei>
(u"\u0033\u5E74\u0042\u7D44\u91D1\u516B\u5148\u751F",
"3B-ww4c5e180e575a65lsy2b"),
# (M) <amuro><namie>-with-SUPER-MONKEYS
(u"\u5B89\u5BA4\u5948\u7F8E\u6075\u002D\u0077\u0069\u0074"
u"\u0068\u002D\u0053\u0055\u0050\u0045\u0052\u002D\u004D"
u"\u004F\u004E\u004B\u0045\u0059\u0053",
"-with-SUPER-MONKEYS-pc58ag80a8qai00g7n9n"),
# (N) Hello-Another-Way-<sorezore><no><basho>
(u"\u0048\u0065\u006C\u006C\u006F\u002D\u0041\u006E\u006F"
u"\u0074\u0068\u0065\u0072\u002D\u0057\u0061\u0079\u002D"
u"\u305D\u308C\u305E\u308C\u306E\u5834\u6240",
"Hello-Another-Way--fc4qua05auwb3674vfr0b"),
# (O) <hitotsu><yane><no><shita>2
(u"\u3072\u3068\u3064\u5C4B\u6839\u306E\u4E0B\u0032",
"2-u9tlzr9756bt3uc0v"),
# (P) Maji<de>Koi<suru>5<byou><mae>
(u"\u004D\u0061\u006A\u0069\u3067\u004B\u006F\u0069\u3059"
u"\u308B\u0035\u79D2\u524D",
"MajiKoi5-783gue6qz075azm5e"),
# (Q) <pafii>de<runba>
(u"\u30D1\u30D5\u30A3\u30FC\u0064\u0065\u30EB\u30F3\u30D0",
"de-jg4avhby1noc0d"),
# (R) <sono><supiido><de>
(u"\u305D\u306E\u30B9\u30D4\u30FC\u30C9\u3067",
"d9juau41awczczp"),
# (S) -> $1.00 <-
(u"\u002D\u003E\u0020\u0024\u0031\u002E\u0030\u0030\u0020"
u"\u003C\u002D",
"-> $1.00 <--")
]
for i in punycode_testcases:
if len(i)!=2:
print repr(i)
class PunycodeTest(unittest.TestCase):
def test_encode(self):
for uni, puny in punycode_testcases:
# Need to convert both strings to lower case, since
# some of the extended encodings use upper case, but our
# code produces only lower case. Converting just puny to
# lower is also insufficient, since some of the input characters
# are upper case.
self.assertEqual(uni.encode("punycode").lower(), puny.lower())
def test_decode(self):
for uni, puny in punycode_testcases:
self.assertEqual(uni, puny.decode("punycode"))
class UnicodeInternalTest(unittest.TestCase):
def test_bug1251300(self):
# Decoding with unicode_internal used to not correctly handle "code
# points" above 0x10ffff on UCS-4 builds.
if sys.maxunicode > 0xffff:
ok = [
("\x00\x10\xff\xff", u"\U0010ffff"),
("\x00\x00\x01\x01", u"\U00000101"),
("", u""),
]
not_ok = [
"\x7f\xff\xff\xff",
"\x80\x00\x00\x00",
"\x81\x00\x00\x00",
"\x00",
"\x00\x00\x00\x00\x00",
]
for internal, uni in ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertEqual(uni, internal.decode("unicode_internal"))
for internal in not_ok:
if sys.byteorder == "little":
internal = "".join(reversed(internal))
self.assertRaises(UnicodeDecodeError, internal.decode,
"unicode_internal")
def test_decode_error_attributes(self):
if sys.maxunicode > 0xffff:
try:
"\x00\x00\x00\x00\x00\x11\x11\x00".decode("unicode_internal")
except UnicodeDecodeError, ex:
self.assertEqual("unicode_internal", ex.encoding)
self.assertEqual("\x00\x00\x00\x00\x00\x11\x11\x00", ex.object)
self.assertEqual(4, ex.start)
self.assertEqual(8, ex.end)
else:
self.fail()
def test_decode_callback(self):
if sys.maxunicode > 0xffff:
codecs.register_error("UnicodeInternalTest", codecs.ignore_errors)
decoder = codecs.getdecoder("unicode_internal")
ab = u"ab".encode("unicode_internal")
ignored = decoder("%s\x22\x22\x22\x22%s" % (ab[:4], ab[4:]),
"UnicodeInternalTest")
self.assertEqual((u"ab", 12), ignored)
def test_encode_length(self):
# Issue 3739
encoder = codecs.getencoder("unicode_internal")
self.assertEqual(encoder(u"a")[1], 1)
self.assertEqual(encoder(u"\xe9\u0142")[1], 2)
encoder = codecs.getencoder("string-escape")
self.assertEqual(encoder(r'\x00')[1], 4)
# From http://www.gnu.org/software/libidn/draft-josefsson-idn-test-vectors.html
nameprep_tests = [
# 3.1 Map to nothing.
('foo\xc2\xad\xcd\x8f\xe1\xa0\x86\xe1\xa0\x8bbar'
'\xe2\x80\x8b\xe2\x81\xa0baz\xef\xb8\x80\xef\xb8\x88\xef'
'\xb8\x8f\xef\xbb\xbf',
'foobarbaz'),
# 3.2 Case folding ASCII U+0043 U+0041 U+0046 U+0045.
('CAFE',
'cafe'),
# 3.3 Case folding 8bit U+00DF (german sharp s).
# The original test case is bogus; it says \xc3\xdf
('\xc3\x9f',
'ss'),
# 3.4 Case folding U+0130 (turkish capital I with dot).
('\xc4\xb0',
'i\xcc\x87'),
# 3.5 Case folding multibyte U+0143 U+037A.
('\xc5\x83\xcd\xba',
'\xc5\x84 \xce\xb9'),
# 3.6 Case folding U+2121 U+33C6 U+1D7BB.
# XXX: skip this as it fails in UCS-2 mode
#('\xe2\x84\xa1\xe3\x8f\x86\xf0\x9d\x9e\xbb',
# 'telc\xe2\x88\x95kg\xcf\x83'),
(None, None),
# 3.7 Normalization of U+006a U+030c U+00A0 U+00AA.
('j\xcc\x8c\xc2\xa0\xc2\xaa',
'\xc7\xb0 a'),
# 3.8 Case folding U+1FB7 and normalization.
('\xe1\xbe\xb7',
'\xe1\xbe\xb6\xce\xb9'),
# 3.9 Self-reverting case folding U+01F0 and normalization.
# The original test case is bogus, it says `\xc7\xf0'
('\xc7\xb0',
'\xc7\xb0'),
# 3.10 Self-reverting case folding U+0390 and normalization.
('\xce\x90',
'\xce\x90'),
# 3.11 Self-reverting case folding U+03B0 and normalization.
('\xce\xb0',
'\xce\xb0'),
# 3.12 Self-reverting case folding U+1E96 and normalization.
('\xe1\xba\x96',
'\xe1\xba\x96'),
# 3.13 Self-reverting case folding U+1F56 and normalization.
('\xe1\xbd\x96',
'\xe1\xbd\x96'),
# 3.14 ASCII space character U+0020.
(' ',
' '),
# 3.15 Non-ASCII 8bit space character U+00A0.
('\xc2\xa0',
' '),
# 3.16 Non-ASCII multibyte space character U+1680.
('\xe1\x9a\x80',
None),
# 3.17 Non-ASCII multibyte space character U+2000.
('\xe2\x80\x80',
' '),
# 3.18 Zero Width Space U+200b.
('\xe2\x80\x8b',
''),
# 3.19 Non-ASCII multibyte space character U+3000.
('\xe3\x80\x80',
' '),
# 3.20 ASCII control characters U+0010 U+007F.
('\x10\x7f',
'\x10\x7f'),
# 3.21 Non-ASCII 8bit control character U+0085.
('\xc2\x85',
None),
# 3.22 Non-ASCII multibyte control character U+180E.
('\xe1\xa0\x8e',
None),
# 3.23 Zero Width No-Break Space U+FEFF.
('\xef\xbb\xbf',
''),
# 3.24 Non-ASCII control character U+1D175.
('\xf0\x9d\x85\xb5',
None),
# 3.25 Plane 0 private use character U+F123.
('\xef\x84\xa3',
None),
# 3.26 Plane 15 private use character U+F1234.
('\xf3\xb1\x88\xb4',
None),
# 3.27 Plane 16 private use character U+10F234.
('\xf4\x8f\x88\xb4',
None),
# 3.28 Non-character code point U+8FFFE.
('\xf2\x8f\xbf\xbe',
None),
# 3.29 Non-character code point U+10FFFF.
('\xf4\x8f\xbf\xbf',
None),
# 3.30 Surrogate code U+DF42.
('\xed\xbd\x82',
None),
# 3.31 Non-plain text character U+FFFD.
('\xef\xbf\xbd',
None),
# 3.32 Ideographic description character U+2FF5.
('\xe2\xbf\xb5',
None),
# 3.33 Display property character U+0341.
('\xcd\x81',
'\xcc\x81'),
# 3.34 Left-to-right mark U+200E.
('\xe2\x80\x8e',
None),
# 3.35 Deprecated U+202A.
('\xe2\x80\xaa',
None),
# 3.36 Language tagging character U+E0001.
('\xf3\xa0\x80\x81',
None),
# 3.37 Language tagging character U+E0042.
('\xf3\xa0\x81\x82',
None),
# 3.38 Bidi: RandALCat character U+05BE and LCat characters.
('foo\xd6\xbebar',
None),
# 3.39 Bidi: RandALCat character U+FD50 and LCat characters.
('foo\xef\xb5\x90bar',
None),
# 3.40 Bidi: RandALCat character U+FB38 and LCat characters.
('foo\xef\xb9\xb6bar',
'foo \xd9\x8ebar'),
# 3.41 Bidi: RandALCat without trailing RandALCat U+0627 U+0031.
('\xd8\xa71',
None),
# 3.42 Bidi: RandALCat character U+0627 U+0031 U+0628.
('\xd8\xa71\xd8\xa8',
'\xd8\xa71\xd8\xa8'),
# 3.43 Unassigned code point U+E0002.
# Skip this test as we allow unassigned
#('\xf3\xa0\x80\x82',
# None),
(None, None),
# 3.44 Larger test (shrinking).
# Original test case reads \xc3\xdf
('X\xc2\xad\xc3\x9f\xc4\xb0\xe2\x84\xa1j\xcc\x8c\xc2\xa0\xc2'
'\xaa\xce\xb0\xe2\x80\x80',
'xssi\xcc\x87tel\xc7\xb0 a\xce\xb0 '),
# 3.45 Larger test (expanding).
# Original test case reads \xc3\x9f
('X\xc3\x9f\xe3\x8c\x96\xc4\xb0\xe2\x84\xa1\xe2\x92\x9f\xe3\x8c'
'\x80',
'xss\xe3\x82\xad\xe3\x83\xad\xe3\x83\xa1\xe3\x83\xbc\xe3'
'\x83\x88\xe3\x83\xabi\xcc\x87tel\x28d\x29\xe3\x82'
'\xa2\xe3\x83\x91\xe3\x83\xbc\xe3\x83\x88')
]
class NameprepTest(unittest.TestCase):
def test_nameprep(self):
from encodings.idna import nameprep
for pos, (orig, prepped) in enumerate(nameprep_tests):
if orig is None:
# Skipped
continue
# The Unicode strings are given in UTF-8
orig = unicode(orig, "utf-8")
if prepped is None:
# Input contains prohibited characters
self.assertRaises(UnicodeError, nameprep, orig)
else:
prepped = unicode(prepped, "utf-8")
try:
self.assertEqual(nameprep(orig), prepped)
except Exception,e:
raise test_support.TestFailed("Test 3.%d: %s" % (pos+1, str(e)))
class IDNACodecTest(unittest.TestCase):
def test_builtin_decode(self):
self.assertEqual(unicode("python.org", "idna"), u"python.org")
self.assertEqual(unicode("python.org.", "idna"), u"python.org.")
self.assertEqual(unicode("xn--pythn-mua.org", "idna"), u"pyth\xf6n.org")
self.assertEqual(unicode("xn--pythn-mua.org.", "idna"), u"pyth\xf6n.org.")
def test_builtin_encode(self):
self.assertEqual(u"python.org".encode("idna"), "python.org")
self.assertEqual("python.org.".encode("idna"), "python.org.")
self.assertEqual(u"pyth\xf6n.org".encode("idna"), "xn--pythn-mua.org")
self.assertEqual(u"pyth\xf6n.org.".encode("idna"), "xn--pythn-mua.org.")
def test_stream(self):
import StringIO
r = codecs.getreader("idna")(StringIO.StringIO("abc"))
r.read(3)
self.assertEqual(r.read(), u"")
def test_incremental_decode(self):
self.assertEqual(
"".join(codecs.iterdecode("python.org", "idna")),
u"python.org"
)
self.assertEqual(
"".join(codecs.iterdecode("python.org.", "idna")),
u"python.org."
)
self.assertEqual(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
self.assertEqual(
"".join(codecs.iterdecode("xn--pythn-mua.org.", "idna")),
u"pyth\xf6n.org."
)
decoder = codecs.getincrementaldecoder("idna")()
self.assertEqual(decoder.decode("xn--xam", ), u"")
self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEqual(decoder.decode(u"rg"), u"")
self.assertEqual(decoder.decode(u"", True), u"org")
decoder.reset()
self.assertEqual(decoder.decode("xn--xam", ), u"")
self.assertEqual(decoder.decode("ple-9ta.o", ), u"\xe4xample.")
self.assertEqual(decoder.decode("rg."), u"org.")
self.assertEqual(decoder.decode("", True), u"")
def test_incremental_encode(self):
self.assertEqual(
"".join(codecs.iterencode(u"python.org", "idna")),
"python.org"
)
self.assertEqual(
"".join(codecs.iterencode(u"python.org.", "idna")),
"python.org."
)
self.assertEqual(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
self.assertEqual(
"".join(codecs.iterencode(u"pyth\xf6n.org.", "idna")),
"xn--pythn-mua.org."
)
encoder = codecs.getincrementalencoder("idna")()
self.assertEqual(encoder.encode(u"\xe4x"), "")
self.assertEqual(encoder.encode(u"ample.org"), "xn--xample-9ta.")
self.assertEqual(encoder.encode(u"", True), "org")
encoder.reset()
self.assertEqual(encoder.encode(u"\xe4x"), "")
self.assertEqual(encoder.encode(u"ample.org."), "xn--xample-9ta.org.")
self.assertEqual(encoder.encode(u"", True), "")
class CodecsModuleTest(unittest.TestCase):
def test_decode(self):
self.assertEqual(codecs.decode('\xe4\xf6\xfc', 'latin-1'),
u'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.decode)
self.assertEqual(codecs.decode('abc'), u'abc')
self.assertRaises(UnicodeDecodeError, codecs.decode, '\xff', 'ascii')
def test_encode(self):
self.assertEqual(codecs.encode(u'\xe4\xf6\xfc', 'latin-1'),
'\xe4\xf6\xfc')
self.assertRaises(TypeError, codecs.encode)
self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
self.assertEqual(codecs.encode(u'abc'), 'abc')
self.assertRaises(UnicodeEncodeError, codecs.encode, u'\xffff', 'ascii')
def test_register(self):
self.assertRaises(TypeError, codecs.register)
self.assertRaises(TypeError, codecs.register, 42)
def test_lookup(self):
self.assertRaises(TypeError, codecs.lookup)
self.assertRaises(LookupError, codecs.lookup, "__spam__")
self.assertRaises(LookupError, codecs.lookup, " ")
def test_getencoder(self):
self.assertRaises(TypeError, codecs.getencoder)
self.assertRaises(LookupError, codecs.getencoder, "__spam__")
def test_getdecoder(self):
self.assertRaises(TypeError, codecs.getdecoder)
self.assertRaises(LookupError, codecs.getdecoder, "__spam__")
def test_getreader(self):
self.assertRaises(TypeError, codecs.getreader)
self.assertRaises(LookupError, codecs.getreader, "__spam__")
def test_getwriter(self):
self.assertRaises(TypeError, codecs.getwriter)
self.assertRaises(LookupError, codecs.getwriter, "__spam__")
def test_lookup_issue1813(self):
# Issue #1813: under Turkish locales, lookup of some codecs failed
# because 'I' is lowercased as a dotless "i"
oldlocale = locale.getlocale(locale.LC_CTYPE)
self.addCleanup(locale.setlocale, locale.LC_CTYPE, oldlocale)
try:
locale.setlocale(locale.LC_CTYPE, 'tr_TR')
except locale.Error:
# Unsupported locale on this system
self.skipTest('test needs Turkish locale')
c = codecs.lookup('ASCII')
self.assertEqual(c.name, 'ascii')
class StreamReaderTest(unittest.TestCase):
def setUp(self):
self.reader = codecs.getreader('utf-8')
self.stream = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
def test_readlines(self):
f = self.reader(self.stream)
self.assertEqual(f.readlines(), [u'\ud55c\n', u'\uae00'])
class EncodedFileTest(unittest.TestCase):
def test_basic(self):
f = StringIO.StringIO('\xed\x95\x9c\n\xea\xb8\x80')
ef = codecs.EncodedFile(f, 'utf-16-le', 'utf-8')
self.assertEqual(ef.read(), '\\\xd5\n\x00\x00\xae')
f = StringIO.StringIO()
ef = codecs.EncodedFile(f, 'utf-8', 'latin1')
ef.write('\xc3\xbc')
self.assertEqual(f.getvalue(), '\xfc')
class Str2StrTest(unittest.TestCase):
def test_read(self):
sin = "\x80".encode("base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.read()
self.assertEqual(sout, "\x80")
self.assertIsInstance(sout, str)
def test_readline(self):
sin = "\x80".encode("base64_codec")
reader = codecs.getreader("base64_codec")(StringIO.StringIO(sin))
sout = reader.readline()
self.assertEqual(sout, "\x80")
self.assertIsInstance(sout, str)
all_unicode_encodings = [
"ascii",
"base64_codec",
"big5",
"big5hkscs",
"charmap",
"cp037",
"cp1006",
"cp1026",
"cp1140",
"cp1250",
"cp1251",
"cp1252",
"cp1253",
"cp1254",
"cp1255",
"cp1256",
"cp1257",
"cp1258",
"cp424",
"cp437",
"cp500",
"cp720",
"cp737",
"cp775",
"cp850",
"cp852",
"cp855",
"cp856",
"cp857",
"cp858",
"cp860",
"cp861",
"cp862",
"cp863",
"cp864",
"cp865",
"cp866",
"cp869",
"cp874",
"cp875",
"cp932",
"cp949",
"cp950",
"euc_jis_2004",
"euc_jisx0213",
"euc_jp",
"euc_kr",
"gb18030",
"gb2312",
"gbk",
"hex_codec",
"hp_roman8",
"hz",
"idna",
"iso2022_jp",
"iso2022_jp_1",
"iso2022_jp_2",
"iso2022_jp_2004",
"iso2022_jp_3",
"iso2022_jp_ext",
"iso2022_kr",
"iso8859_1",
"iso8859_10",
"iso8859_11",
"iso8859_13",
"iso8859_14",
"iso8859_15",
"iso8859_16",
"iso8859_2",
"iso8859_3",
"iso8859_4",
"iso8859_5",
"iso8859_6",
"iso8859_7",
"iso8859_8",
"iso8859_9",
"johab",
"koi8_r",
"koi8_u",
"latin_1",
"mac_cyrillic",
"mac_greek",
"mac_iceland",
"mac_latin2",
"mac_roman",
"mac_turkish",
"palmos",
"ptcp154",
"punycode",
"raw_unicode_escape",
"rot_13",
"shift_jis",
"shift_jis_2004",
"shift_jisx0213",
"tis_620",
"unicode_escape",
"unicode_internal",
"utf_16",
"utf_16_be",
"utf_16_le",
"utf_7",
"utf_8",
]
if hasattr(codecs, "mbcs_encode"):
all_unicode_encodings.append("mbcs")
# The following encodings work only with str, not unicode
all_string_encodings = [
"quopri_codec",
"string_escape",
"uu_codec",
]
# The following encoding is not tested, because it's not supposed
# to work:
# "undefined"
# The following encodings don't work in stateful mode
broken_unicode_with_streams = [
"base64_codec",
"hex_codec",
"punycode",
"unicode_internal"
]
broken_incremental_coders = broken_unicode_with_streams[:]
# The following encodings only support "strict" mode
only_strict_mode = [
"idna",
"zlib_codec",
"bz2_codec",
]
try:
import bz2
except ImportError:
pass
else:
all_unicode_encodings.append("bz2_codec")
broken_unicode_with_streams.append("bz2_codec")
try:
import zlib
except ImportError:
pass
else:
all_unicode_encodings.append("zlib_codec")
broken_unicode_with_streams.append("zlib_codec")
class BasicUnicodeTest(unittest.TestCase):
def test_basics(self):
s = u"abc123" # all codecs should be able to encode these
for encoding in all_unicode_encodings:
name = codecs.lookup(encoding).name
if encoding.endswith("_codec"):
name += "_codec"
elif encoding == "latin_1":
name = "latin_1"
self.assertEqual(encoding.replace("_", "-"), name.replace("_", "-"))
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s), "%r != %r (encoding=%r)" % (size, len(s), encoding))
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
if encoding not in broken_unicode_with_streams:
# check stream reader/writer
q = Queue()
writer = codecs.getwriter(encoding)(q)
encodedresult = ""
for c in s:
writer.write(c)
encodedresult += q.read()
q = Queue()
reader = codecs.getreader(encoding)(q)
decodedresult = u""
for c in encodedresult:
q.write(c)
decodedresult += reader.read()
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
if encoding not in broken_incremental_coders and _testcapi:
# check incremental decoder/encoder (fetched via the Python
# and C API) and iterencode()/iterdecode()
try:
encoder = codecs.getincrementalencoder(encoding)()
cencoder = _testcapi.codec_incrementalencoder(encoding)
except LookupError: # no IncrementalEncoder
pass
else:
# check incremental decoder/encoder
encodedresult = ""
for c in s:
encodedresult += encoder.encode(c)
encodedresult += encoder.encode(u"", True)
decoder = codecs.getincrementaldecoder(encoding)()
decodedresult = u""
for c in encodedresult:
decodedresult += decoder.decode(c)
decodedresult += decoder.decode("", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check C API
encodedresult = ""
for c in s:
encodedresult += cencoder.encode(c)
encodedresult += cencoder.encode(u"", True)
cdecoder = _testcapi.codec_incrementaldecoder(encoding)
decodedresult = u""
for c in encodedresult:
decodedresult += cdecoder.decode(c)
decodedresult += cdecoder.decode("", True)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
# check iterencode()/iterdecode()
result = u"".join(codecs.iterdecode(codecs.iterencode(s, encoding), encoding))
self.assertEqual(result, s, "%r != %r (encoding=%r)" % (result, s, encoding))
# check iterencode()/iterdecode() with empty string
result = u"".join(codecs.iterdecode(codecs.iterencode(u"", encoding), encoding))
self.assertEqual(result, u"")
if encoding not in only_strict_mode:
# check incremental decoder/encoder with errors argument
try:
encoder = codecs.getincrementalencoder(encoding)("ignore")
cencoder = _testcapi.codec_incrementalencoder(encoding, "ignore")
except LookupError: # no IncrementalEncoder
pass
else:
encodedresult = "".join(encoder.encode(c) for c in s)
decoder = codecs.getincrementaldecoder(encoding)("ignore")
decodedresult = u"".join(decoder.decode(c) for c in encodedresult)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
encodedresult = "".join(cencoder.encode(c) for c in s)
cdecoder = _testcapi.codec_incrementaldecoder(encoding, "ignore")
decodedresult = u"".join(cdecoder.decode(c) for c in encodedresult)
self.assertEqual(decodedresult, s, "%r != %r (encoding=%r)" % (decodedresult, s, encoding))
def test_seek(self):
# all codecs should be able to encode these
s = u"%s\n%s\n" % (100*u"abc123", 100*u"def456")
for encoding in all_unicode_encodings:
if encoding == "idna": # FIXME: See SF bug #1163178
continue
if encoding in broken_unicode_with_streams:
continue
reader = codecs.getreader(encoding)(StringIO.StringIO(s.encode(encoding)))
for t in xrange(5):
# Test that calling seek resets the internal codec state and buffers
reader.seek(0, 0)
line = reader.readline()
self.assertEqual(s[:len(line)], line)
def test_bad_decode_args(self):
for encoding in all_unicode_encodings:
decoder = codecs.getdecoder(encoding)
self.assertRaises(TypeError, decoder)
if encoding not in ("idna", "punycode"):
self.assertRaises(TypeError, decoder, 42)
def test_bad_encode_args(self):
for encoding in all_unicode_encodings:
encoder = codecs.getencoder(encoding)
self.assertRaises(TypeError, encoder)
def test_encoding_map_type_initialized(self):
from encodings import cp1140
# This used to crash, we are only verifying there's no crash.
table_type = type(cp1140.encoding_table)
self.assertEqual(table_type, table_type)
class BasicStrTest(unittest.TestCase):
def test_basics(self):
s = "abc123"
for encoding in all_string_encodings:
(bytes, size) = codecs.getencoder(encoding)(s)
self.assertEqual(size, len(s))
(chars, size) = codecs.getdecoder(encoding)(bytes)
self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
class CharmapTest(unittest.TestCase):
def test_decode_with_string_map(self):
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "strict", u"abc"),
(u"abc", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab"),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "replace", u"ab\ufffe"),
(u"ab\ufffd", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab"),
(u"ab", 3)
)
self.assertEqual(
codecs.charmap_decode("\x00\x01\x02", "ignore", u"ab\ufffe"),
(u"ab", 3)
)
allbytes = "".join(chr(i) for i in xrange(256))
self.assertEqual(
codecs.charmap_decode(allbytes, "ignore", u""),
(u"", len(allbytes))
)
class WithStmtTest(unittest.TestCase):
def test_encodedfile(self):
f = StringIO.StringIO("\xc3\xbc")
with codecs.EncodedFile(f, "latin-1", "utf-8") as ef:
self.assertEqual(ef.read(), "\xfc")
def test_streamreaderwriter(self):
f = StringIO.StringIO("\xc3\xbc")
info = codecs.lookup("utf-8")
with codecs.StreamReaderWriter(f, info.streamreader,
info.streamwriter, 'strict') as srw:
self.assertEqual(srw.read(), u"\xfc")
class BomTest(unittest.TestCase):
def test_seek0(self):
data = u"1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
self.addCleanup(test_support.unlink, test_support.TESTFN)
for encoding in tests:
# Check if the BOM is written only once
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# Check that the BOM is written after a seek(0)
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data[0])
self.assertNotEqual(f.tell(), 0)
f.seek(0)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# (StreamWriter) Check that the BOM is written after a seek(0)
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data[0])
self.assertNotEqual(f.writer.tell(), 0)
f.writer.seek(0)
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data)
# Check that the BOM is not written after a seek() at a position
# different than the start
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.write(data)
f.seek(f.tell())
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
# (StreamWriter) Check that the BOM is not written after a seek()
# at a position different than the start
with codecs.open(test_support.TESTFN, 'w+', encoding=encoding) as f:
f.writer.write(data)
f.writer.seek(f.writer.tell())
f.writer.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
def test_main():
test_support.run_unittest(
UTF32Test,
UTF32LETest,
UTF32BETest,
UTF16Test,
UTF16LETest,
UTF16BETest,
UTF8Test,
UTF8SigTest,
UTF7Test,
UTF16ExTest,
ReadBufferTest,
CharBufferTest,
EscapeDecodeTest,
RecodingTest,
PunycodeTest,
UnicodeInternalTest,
NameprepTest,
IDNACodecTest,
CodecsModuleTest,
StreamReaderTest,
EncodedFileTest,
Str2StrTest,
BasicUnicodeTest,
BasicStrTest,
CharmapTest,
WithStmtTest,
BomTest,
)
if __name__ == "__main__":
test_main()
|
bussiere/pypyjs
|
website/demo/home/rfk/repos/pypy/lib-python/2.7/test/test_codecs.py
|
Python
|
mit
| 57,686
|
[
"FEFF"
] |
6e04c80e8c91b34f842767ab94f307ab1d0f1e3e22f24f8f4735df3c75aeacae
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
try:
from setuptools import setup
from setuptools.command.test import test as TestCommand
except ImportError:
from distutils.core import setup
from distutils.core.command.test import test as TestCommand
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
# via https://pytest.org/latest/goodpractises.html
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
print('Being built on ReadTheDocs so we are avoiding pulling in scikit-bio since it imports numpy...')
requirements = []
else:
with open('requirements.txt') as f:
requirements = f.read().splitlines()
test_requirements = [
'pytest',
'funcy',
'gensim'
]
setup(
name='pyLDAvis',
version='2.0.0',
description="Interactive topic model visualization. Port of the R package.",
long_description=readme + '\n\n' + history,
author="Ben Mabey",
author_email='ben@benmabey.com',
url='https://github.com/bmabey/pyLDAvis',
download_url = 'https://github.com/bmabey/pyLDAvis/tarball/2.0.0',
packages=[
'pyLDAvis',
],
package_dir={'pyLDAvis':
'pyLDAvis'},
include_package_data=True,
install_requires=requirements,
license="MIT",
zip_safe=False,
keywords=['data science', 'visualization'],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements,
cmdclass = {'test': PyTest}
)
|
napsternxg/pyLDAvis
|
setup.py
|
Python
|
bsd-3-clause
| 2,649
|
[
"scikit-bio"
] |
dc11f8623eb3569f7646ecf2bae732fd01747c698e7875790cdcd7701cc70ef8
|
from fabric.api import local, cd, put, env, shell_env, run
from fabric.context_managers import lcd
from fabric.contrib.project import rsync_project
env.use_ssh_config = True
env.hosts = ['root@server.artquest.ninja']
def deploy(what='all'):
with shell_env(GOOS="linux", GOARCH="amd64"):
local('godep go build')
with lcd('frontend'):
local('gulp build')
with cd('/srv/artquest'):
run("stop artquest || true")
rsync_project("/srv/artquest/static", "frontend/dist/")
put('rollingballs-server', 'artquest-server', mode=0755)
run("start artquest")
|
RollingBalls/rollingballs-server
|
fabfile.py
|
Python
|
mit
| 608
|
[
"GULP"
] |
8c40dd63b2432cdb8f9a3c04bb4dd602d734ed6f3613f0623e665e6fb0abfb0e
|
"""This module contains the "Viz" objects
These objects represent the backend of all the visualizations that
Superset can render.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
import hashlib
import logging
import traceback
import uuid
import zlib
from collections import OrderedDict, defaultdict
from itertools import product
from datetime import datetime, timedelta
import pandas as pd
import numpy as np
from flask import request
from flask_babel import lazy_gettext as _
from markdown import markdown
import simplejson as json
from six import string_types, PY3
from dateutil import relativedelta as rdelta
from superset import app, utils, cache, get_manifest_file
from superset.utils import DTTM_ALIAS
config = app.config
stats_logger = config.get('STATS_LOGGER')
class BaseViz(object):
"""All visualizations derive this base class"""
viz_type = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
analytics = False
def __init__(self, datasource, form_data):
if not datasource:
raise Exception(_("Viz is missing a datasource"))
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.form_data = form_data
self.query = ""
self.token = self.form_data.get(
'token', 'token_' + uuid.uuid4().hex[:8])
self.metrics = self.form_data.get('metrics') or []
self.groupby = self.form_data.get('groupby') or []
self.status = None
self.error_message = None
def get_df(self, query_obj=None):
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
self.error_msg = ""
self.results = None
timestamp_format = None
if self.datasource.type == 'table':
dttm_col = self.datasource.get_col(query_obj['granularity'])
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
self.query = self.results.query
self.status = self.results.status
self.error_message = self.results.error_message
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if df is None or df.empty:
self.status = utils.QueryStatus.FAILED
if not self.error_message:
self.error_message = "No data."
return pd.DataFrame()
else:
if DTTM_ALIAS in df.columns:
if timestamp_format in ("epoch_s", "epoch_ms"):
df[DTTM_ALIAS] = pd.to_datetime(df[DTTM_ALIAS], utc=False)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df.replace([np.inf, -np.inf], np.nan)
df = df.fillna(0)
return df
def get_extra_filters(self):
extra_filters = self.form_data.get('extra_filters', [])
return {f['col']: f['val'] for f in extra_filters}
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
gb = form_data.get("groupby") or []
metrics = form_data.get("metrics") or []
columns = form_data.get("columns") or []
groupby = []
for o in gb + columns:
if o not in groupby:
groupby.append(o)
is_timeseries = self.is_timeseries
if DTTM_ALIAS in groupby:
groupby.remove(DTTM_ALIAS)
is_timeseries = True
# extra_filters are temporary/contextual filters that are external
# to the slice definition. We use those for dynamic interactive
# filters like the ones emitted by the "Filter Box" visualization
extra_filters = self.get_extra_filters()
granularity = (
form_data.get("granularity") or form_data.get("granularity_sqla")
)
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(
form_data.get("row_limit") or config.get("ROW_LIMIT"))
# __form and __to are special extra_filters that target time
# boundaries. The rest of extra_filters are simple
# [column_name in list_of_values]. `__` prefix is there to avoid
# potential conflicts with column that would be named `from` or `to`
since = (
extra_filters.get('__from') or
form_data.get("since") or
config.get("SUPERSET_DEFAULT_SINCE", "1 year ago")
)
from_dttm = utils.parse_human_datetime(since)
now = datetime.now()
if from_dttm > now:
from_dttm = now - (from_dttm - now)
until = extra_filters.get('__to') or form_data.get("until", "now")
to_dttm = utils.parse_human_datetime(until)
if from_dttm > to_dttm:
raise Exception(_("From date cannot be larger than to date"))
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
'where': form_data.get("where", ''),
'having': form_data.get("having", ''),
'having_druid': form_data.get('having_filters') \
if 'having_filters' in form_data else [],
'time_grain_sqla': form_data.get("time_grain_sqla", ''),
'druid_time_origin': form_data.get("druid_time_origin", ''),
}
filters = form_data['filters'] if 'filters' in form_data \
else []
for col, vals in self.get_extra_filters().items():
if not (col and vals) or col.startswith('__'):
continue
elif col in self.datasource.filterable_column_names:
# Quote values with comma to avoid conflict
filters += [{
'col': col,
'op': 'in',
'val': vals,
}]
d = {
'granularity': granularity,
'from_dttm': from_dttm,
'to_dttm': to_dttm,
'is_timeseries': is_timeseries,
'groupby': groupby,
'metrics': metrics,
'row_limit': row_limit,
'filter': filters,
'timeseries_limit': limit,
'extras': extras,
'timeseries_limit_metric': timeseries_limit_metric,
'form_data': form_data,
}
return d
@property
def cache_timeout(self):
if self.form_data.get('cache_timeout'):
return int(self.form_data.get('cache_timeout'))
if self.datasource.cache_timeout:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, 'database') and
self.datasource.database.cache_timeout):
return self.datasource.database.cache_timeout
return config.get("CACHE_DEFAULT_TIMEOUT")
def get_json(self, force=False):
return json.dumps(
self.get_payload(force),
default=utils.json_int_dttm_ser, ignore_nan=True)
@property
def cache_key(self):
s = str([(k, self.form_data[k]) for k in sorted(self.form_data.keys())])
return hashlib.md5(s.encode('utf-8')).hexdigest()
def get_payload(self, force=False):
"""Handles caching around the json payload retrieval"""
cache_key = self.cache_key
payload = None
force = force if force else self.form_data.get('force') == 'true'
if not force and cache:
payload = cache.get(cache_key)
if payload:
stats_logger.incr('loaded_from_source')
is_cached = True
try:
cached_data = zlib.decompress(payload)
if PY3:
cached_data = cached_data.decode('utf-8')
payload = json.loads(cached_data)
except Exception as e:
logging.error("Error reading cache: " +
utils.error_msg_from_exception(e))
payload = None
logging.info("Serving from cache")
if not payload:
stats_logger.incr('loaded_from_cache')
data = None
is_cached = False
cache_timeout = self.cache_timeout
stacktrace = None
try:
df = self.get_df()
if not self.error_message:
data = self.get_data(df)
except Exception as e:
logging.exception(e)
if not self.error_message:
self.error_message = str(e)
self.status = utils.QueryStatus.FAILED
data = None
stacktrace = traceback.format_exc()
payload = {
'cache_key': cache_key,
'cache_timeout': cache_timeout,
'data': data,
'error': self.error_message,
'form_data': self.form_data,
'query': self.query,
'status': self.status,
'stacktrace': stacktrace,
}
payload['cached_dttm'] = datetime.utcnow().isoformat().split('.')[0]
logging.info("Caching for the next {} seconds".format(
cache_timeout))
data = self.json_dumps(payload)
if PY3:
data = bytes(data, 'utf-8')
if cache and self.status != utils.QueryStatus.FAILED:
try:
cache.set(
cache_key,
zlib.compress(data),
timeout=cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logging.warning("Could not cache key {}".format(cache_key))
logging.exception(e)
cache.delete(cache_key)
payload['is_cached'] = is_cached
return payload
def json_dumps(self, obj):
return json.dumps(obj, default=utils.json_int_dttm_ser, ignore_nan=True)
@property
def data(self):
"""This is the data object serialized to the js layer"""
content = {
'form_data': self.form_data,
'token': self.token,
'viz_name': self.viz_type,
'filter_select_enabled': self.datasource.filter_select_enabled,
}
return content
def get_csv(self):
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, encoding="utf-8")
def get_data(self, df):
return []
@property
def json_data(self):
return json.dumps(self.data)
class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def should_be_timeseries(self):
fd = self.form_data
# TODO handle datasource-type-specific code in datasource
conditions_met = (
(fd.get('granularity') and fd.get('granularity') != 'all') or
(fd.get('granularity_sqla') and fd.get('time_grain_sqla'))
)
if fd.get('include_time') and not conditions_met:
raise Exception(_(
"Pick a granularity in the Time section or "
"uncheck 'Include Time'"))
return fd.get('include_time')
def query_obj(self):
d = super(TableViz, self).query_obj()
fd = self.form_data
if fd.get('all_columns') and (fd.get('groupby') or fd.get('metrics')):
raise Exception(_(
"Choose either fields to [Group By] and [Metrics] or "
"[Columns], not both"))
if fd.get('all_columns'):
d['columns'] = fd.get('all_columns')
d['groupby'] = []
order_by_cols = fd.get('order_by_cols') or []
d['orderby'] = [json.loads(t) for t in order_by_cols]
d['is_timeseries'] = self.should_be_timeseries()
return d
def get_data(self, df):
if not self.should_be_timeseries() and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
return dict(
records=df.to_dict(orient="records"),
columns=list(df.columns),
)
def json_dumps(self, obj):
if self.form_data.get('all_columns'):
return json.dumps(obj, default=utils.json_iso_dttm_ser)
else:
return super(TableViz, self).json_dumps(obj)
class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
viz_type = "pivot_table"
verbose_name = _("Pivot Table")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super(PivotTableViz, self).query_obj()
groupby = self.form_data.get('groupby')
columns = self.form_data.get('columns')
metrics = self.form_data.get('metrics')
if not columns:
columns = []
if not groupby:
groupby = []
if not groupby:
raise Exception(_("Please choose at least one \"Group by\" field "))
if not metrics:
raise Exception(_("Please choose at least one metric"))
if (
any(v in groupby for v in columns) or
any(v in columns for v in groupby)):
raise Exception(_("'Group By' and 'Columns' can't overlap"))
return d
def get_data(self, df):
if (
self.form_data.get("granularity") == "all" and
DTTM_ALIAS in df):
del df[DTTM_ALIAS]
df = df.pivot_table(
index=self.form_data.get('groupby'),
columns=self.form_data.get('columns'),
values=self.form_data.get('metrics'),
aggfunc=self.form_data.get('pandas_aggfunc'),
margins=self.form_data.get('pivot_margins'),
)
# Display metrics side by side with each column
if self.form_data.get('combine_metric'):
df = df.stack(0).unstack()
return dict(
columns=list(df.columns),
html=df.to_html(
na_rep='',
classes=(
"dataframe table table-striped table-bordered "
"table-condensed table-hover").split(" ")),
)
class MarkupViz(BaseViz):
"""Use html or markdown to create a free form widget"""
viz_type = "markup"
verbose_name = _("Markup")
is_timeseries = False
def get_df(self):
return True
def get_data(self, df):
markup_type = self.form_data.get("markup_type")
code = self.form_data.get("code", '')
if markup_type == "markdown":
code = markdown(code)
return dict(html=code, theme_css=get_manifest_file('theme.css'))
class SeparatorViz(MarkupViz):
"""Use to create section headers in a dashboard, similar to `Markup`"""
viz_type = "separator"
verbose_name = _("Separator")
def get_data(self, df):
code = markdown(self.form_data.get("code", ''))
return dict(html=code)
class WordCloudViz(BaseViz):
"""Build a colorful word cloud
Uses the nice library at:
https://github.com/jasondavies/d3-cloud
"""
viz_type = "word_cloud"
verbose_name = _("Word Cloud")
is_timeseries = False
def query_obj(self):
d = super(WordCloudViz, self).query_obj()
d['metrics'] = [self.form_data.get('metric')]
d['groupby'] = [self.form_data.get('series')]
return d
def get_data(self, df):
# Ordering the columns
df = df[[self.form_data.get('series'), self.form_data.get('metric')]]
# Labeling the columns for uniform json schema
df.columns = ['text', 'size']
return df.to_dict(orient="records")
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
viz_type = "treemap"
verbose_name = _("Treemap")
credits = '<a href="https://d3js.org">d3.js</a>'
is_timeseries = False
def _nest(self, metric, df):
nlevels = df.index.nlevels
if nlevels == 1:
result = [{"name": n, "value": v}
for n, v in zip(df.index, df[metric])]
else:
result = [{"name": l, "children": self._nest(metric, df.loc[l])}
for l in df.index.levels[0]]
return result
def get_data(self, df):
df = df.set_index(self.form_data.get("groupby"))
chart_data = [{"name": metric, "children": self._nest(metric, df)}
for metric in df.columns]
return chart_data
class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
viz_type = "cal_heatmap"
verbose_name = _("Calendar Heatmap")
credits = (
'<a href=https://github.com/wa0x6e/cal-heatmap>cal-heatmap</a>')
is_timeseries = True
def get_data(self, df):
form_data = self.form_data
df.columns = ["timestamp", "metric"]
timestamps = {str(obj["timestamp"].value / 10**9):
obj.get("metric") for obj in df.to_dict("records")}
start = utils.parse_human_datetime(form_data.get("since"))
end = utils.parse_human_datetime(form_data.get("until"))
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24*60*60) + 1
else:
range_ = diff_secs // (60*60) + 1
return {
"timestamps": timestamps,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
def query_obj(self):
qry = super(CalHeatmapViz, self).query_obj()
qry["metrics"] = [self.form_data["metric"]]
return qry
class NVD3Viz(BaseViz):
"""Base class for all nvd3 vizs"""
credits = '<a href="http://nvd3.org/">NVD3.org</a>'
viz_type = None
verbose_name = "Base NVD3 Viz"
is_timeseries = False
class BoxPlotViz(NVD3Viz):
"""Box plot viz from ND3"""
viz_type = "box_plot"
verbose_name = _("Box Plot")
sort_series = False
is_timeseries = True
def to_series(self, df, classed='', title_suffix=''):
label_sep = " - "
chart_data = []
for index_value, row in zip(df.index, df.to_dict(orient="records")):
if isinstance(index_value, tuple):
index_value = label_sep.join(index_value)
boxes = defaultdict(dict)
for (label, key), value in row.items():
if key == "median":
key = "Q2"
boxes[label][key] = value
for label, box in boxes.items():
if len(self.form_data.get("metrics")) > 1:
# need to render data labels with metrics
chart_label = label_sep.join([index_value, label])
else:
chart_label = index_value
chart_data.append({
"label": chart_label,
"values": box,
})
return chart_data
def get_data(self, df):
form_data = self.form_data
df = df.fillna(0)
# conform to NVD3 names
def Q1(series): # need to be named functions - can't use lambdas
return np.percentile(series, 25)
def Q3(series):
return np.percentile(series, 75)
whisker_type = form_data.get('whisker_options')
if whisker_type == "Tukey":
def whisker_high(series):
upper_outer_lim = Q3(series) + 1.5 * (Q3(series) - Q1(series))
series = series[series <= upper_outer_lim]
return series[np.abs(series - upper_outer_lim).argmin()]
def whisker_low(series):
lower_outer_lim = Q1(series) - 1.5 * (Q3(series) - Q1(series))
# find the closest value above the lower outer limit
series = series[series >= lower_outer_lim]
return series[np.abs(series - lower_outer_lim).argmin()]
elif whisker_type == "Min/max (no outliers)":
def whisker_high(series):
return series.max()
def whisker_low(series):
return series.min()
elif " percentiles" in whisker_type:
low, high = whisker_type.replace(" percentiles", "").split("/")
def whisker_high(series):
return np.percentile(series, int(high))
def whisker_low(series):
return np.percentile(series, int(low))
else:
raise ValueError("Unknown whisker type: {}".format(whisker_type))
def outliers(series):
above = series[series > whisker_high(series)]
below = series[series < whisker_low(series)]
# pandas sometimes doesn't like getting lists back here
return set(above.tolist() + below.tolist())
aggregate = [Q1, np.median, Q3, whisker_high, whisker_low, outliers]
df = df.groupby(form_data.get('groupby')).agg(aggregate)
chart_data = self.to_series(df)
return chart_data
class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
viz_type = "bubble"
verbose_name = _("Bubble Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super(BubbleViz, self).query_obj()
d['groupby'] = [
form_data.get('entity')
]
if form_data.get('series'):
d['groupby'].append(form_data.get('series'))
self.x_metric = form_data.get('x')
self.y_metric = form_data.get('y')
self.z_metric = form_data.get('size')
self.entity = form_data.get('entity')
self.series = form_data.get('series') or self.entity
d['row_limit'] = form_data.get('limit')
d['metrics'] = [
self.z_metric,
self.x_metric,
self.y_metric,
]
if not all(d['metrics'] + [self.entity]):
raise Exception(_("Pick a metric for x, y and size"))
return d
def get_data(self, df):
df['x'] = df[[self.x_metric]]
df['y'] = df[[self.y_metric]]
df['size'] = df[[self.z_metric]]
df['shape'] = 'circle'
df['group'] = df[[self.series]]
series = defaultdict(list)
for row in df.to_dict(orient='records'):
series[row['group']].append(row)
chart_data = []
for k, v in series.items():
chart_data.append({
'key': k,
'values': v})
return chart_data
class BulletViz(NVD3Viz):
"""Based on the NVD3 bullet chart"""
viz_type = "bullet"
verbose_name = _("Bullet Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super(BulletViz, self).query_obj()
self.metric = form_data.get('metric')
def as_strings(field):
value = form_data.get(field)
return value.split(',') if value else []
def as_floats(field):
return [float(x) for x in as_strings(field)]
self.ranges = as_floats('ranges')
self.range_labels = as_strings('range_labels')
self.markers = as_floats('markers')
self.marker_labels = as_strings('marker_labels')
self.marker_lines = as_floats('marker_lines')
self.marker_line_labels = as_strings('marker_line_labels')
d['metrics'] = [
self.metric,
]
if not self.metric:
raise Exception(_("Pick a metric to display"))
return d
def get_data(self, df):
df = df.fillna(0)
df['metric'] = df[[self.metric]]
values = df['metric'].values
return {
'measures': values.tolist(),
'ranges': self.ranges or [0, values.max() * 1.1],
'rangeLabels': self.range_labels or None,
'markers': self.markers or None,
'markerLabels': self.marker_labels or None,
'markerLines': self.marker_lines or None,
'markerLineLabels': self.marker_line_labels or None,
}
class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self):
d = super(BigNumberViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
raise Exception(_("Pick a metric!"))
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
def get_data(self, df):
form_data = self.form_data
df.sort_values(by=df.columns[0], inplace=True)
compare_lag = form_data.get("compare_lag")
return {
'data': df.values.tolist(),
'compare_lag': compare_lag,
'compare_suffix': form_data.get('compare_suffix', ''),
}
class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super(BigNumberTotalViz, self).query_obj()
metric = self.form_data.get('metric')
if not metric:
raise Exception(_("Pick a metric!"))
d['metrics'] = [self.form_data.get('metric')]
self.form_data['metric'] = metric
return d
def get_data(self, df):
form_data = self.form_data
df.sort_values(by=df.columns[0], inplace=True)
return {
'data': df.values.tolist(),
'subheader': form_data.get('subheader', ''),
}
class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
def to_series(self, df, classed='', title_suffix=''):
cols = []
for col in df.columns:
if col == '':
cols.append('N/A')
elif col is None:
cols.append('NULL')
else:
cols.append(col)
df.columns = cols
series = df.to_dict('series')
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
if isinstance(name, string_types):
series_title = name
else:
name = ["{}".format(s) for s in name]
if len(self.form_data.get('metrics')) > 1:
series_title = ", ".join(name)
else:
series_title = ", ".join(name[1:])
if title_suffix:
series_title += title_suffix
d = {
"key": series_title,
"classed": classed,
"values": [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.index
],
}
chart_data.append(d)
return chart_data
def get_data(self, df):
fd = self.form_data
df = df.fillna(0)
if fd.get("granularity") == "all":
raise Exception(_("Pick a time granularity for your time series"))
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get('groupby'),
values=fd.get('metrics'))
fm = fd.get("resample_fillmethod")
if not fm:
fm = None
how = fd.get("resample_how")
rule = fd.get("resample_rule")
if how and rule:
df = df.resample(rule, how=how, fill_method=fm)
if not fm:
df = df.fillna(0)
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
if fd.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
rolling_periods = fd.get("rolling_periods")
rolling_type = fd.get("rolling_type")
if rolling_type in ('mean', 'std', 'sum') and rolling_periods:
if rolling_type == 'mean':
df = pd.rolling_mean(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'std':
df = pd.rolling_std(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'sum':
df = pd.rolling_sum(df, int(rolling_periods), min_periods=0)
elif rolling_type == 'cumsum':
df = df.cumsum()
num_period_compare = fd.get("num_period_compare")
if num_period_compare:
num_period_compare = int(num_period_compare)
prt = fd.get('period_ratio_type')
if prt and prt == 'growth':
df = (df / df.shift(num_period_compare)) - 1
elif prt and prt == 'value':
df = df - df.shift(num_period_compare)
else:
df = df / df.shift(num_period_compare)
df = df[num_period_compare:]
chart_data = self.to_series(df)
time_compare = fd.get('time_compare')
if time_compare:
query_object = self.query_obj()
delta = utils.parse_human_timedelta(time_compare)
query_object['inner_from_dttm'] = query_object['from_dttm']
query_object['inner_to_dttm'] = query_object['to_dttm']
query_object['from_dttm'] -= delta
query_object['to_dttm'] -= delta
df2 = self.get_df(query_object)
df2[DTTM_ALIAS] += delta
df2 = df2.pivot_table(
index=DTTM_ALIAS,
columns=fd.get('groupby'),
values=fd.get('metrics'))
chart_data += self.to_series(
df2, classed='superset', title_suffix="---")
chart_data = sorted(chart_data, key=lambda x: x['key'])
return chart_data
class NVD3DualLineViz(NVD3Viz):
"""A rich line chart with dual axis"""
viz_type = "dual_line"
verbose_name = _("Time Series - Dual Axis Line Chart")
sort_series = False
is_timeseries = True
def query_obj(self):
d = super(NVD3DualLineViz, self).query_obj()
m1 = self.form_data.get('metric')
m2 = self.form_data.get('metric_2')
d['metrics'] = [m1, m2]
if not m1:
raise Exception(_("Pick a metric for left axis!"))
if not m2:
raise Exception(_("Pick a metric for right axis!"))
if m1 == m2:
raise Exception(_("Please choose different metrics"
" on left and right axis"))
return d
def to_series(self, df, classed=''):
cols = []
for col in df.columns:
if col == '':
cols.append('N/A')
elif col is None:
cols.append('NULL')
else:
cols.append(col)
df.columns = cols
series = df.to_dict('series')
chart_data = []
metrics = [
self.form_data.get('metric'),
self.form_data.get('metric_2')
]
for i, m in enumerate(metrics):
ys = series[m]
if df[m].dtype.kind not in "biufc":
continue
series_title = m
d = {
"key": series_title,
"classed": classed,
"values": [
{'x': ds, 'y': ys[ds] if ds in ys else None}
for ds in df.index
],
"yAxis": i+1,
"type": "line"
}
chart_data.append(d)
return chart_data
def get_data(self, df):
fd = self.form_data
df = df.fillna(0)
if self.form_data.get("granularity") == "all":
raise Exception(_("Pick a time granularity for your time series"))
metric = fd.get('metric')
metric_2 = fd.get('metric_2')
df = df.pivot_table(
index=DTTM_ALIAS,
values=[metric, metric_2])
chart_data = self.to_series(df)
return chart_data
class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
viz_type = "bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = 'compare'
verbose_name = _("Time Series - Percent Change")
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
viz_type = "area"
verbose_name = _("Time Series - Stacked")
sort_series = True
class DistributionPieViz(NVD3Viz):
"""Annoy visualization snobs with this controversial pie chart"""
viz_type = "pie"
verbose_name = _("Distribution - NVD3 - Pie Chart")
is_timeseries = False
def get_data(self, df):
df = df.pivot_table(
index=self.groupby,
values=[self.metrics[0]])
df.sort_values(by=self.metrics[0], ascending=False, inplace=True)
df = df.reset_index()
df.columns = ['x', 'y']
return df.to_dict(orient="records")
class HistogramViz(BaseViz):
"""Histogram"""
viz_type = "histogram"
verbose_name = _("Histogram")
is_timeseries = False
def query_obj(self):
"""Returns the query object for this visualization"""
d = super(HistogramViz, self).query_obj()
d['row_limit'] = self.form_data.get(
'row_limit', int(config.get('VIZ_ROW_LIMIT')))
numeric_column = self.form_data.get('all_columns_x')
if numeric_column is None:
raise Exception(_("Must have one numeric column specified"))
d['columns'] = [numeric_column]
return d
def get_data(self, df):
"""Returns the chart data"""
chart_data = df[df.columns[0]].values.tolist()
return chart_data
class DistributionBarViz(DistributionPieViz):
"""A good old bar chart"""
viz_type = "dist_bar"
verbose_name = _("Distribution - Bar Chart")
is_timeseries = False
def query_obj(self):
d = super(DistributionBarViz, self).query_obj() # noqa
fd = self.form_data
if (
len(d['groupby']) <
len(fd.get('groupby') or []) + len(fd.get('columns') or [])
):
raise Exception(
_("Can't have overlap between Series and Breakdowns"))
if not fd.get('metrics'):
raise Exception(_("Pick at least one metric"))
if not fd.get('groupby'):
raise Exception(_("Pick at least one field for [Series]"))
return d
def get_data(self, df):
fd = self.form_data
row = df.groupby(self.groupby).sum()[self.metrics[0]].copy()
row.sort_values(ascending=False, inplace=True)
columns = fd.get('columns') or []
pt = df.pivot_table(
index=self.groupby,
columns=columns,
values=self.metrics)
if fd.get("contribution"):
pt = pt.fillna(0)
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
chart_data = []
for name, ys in pt.iteritems():
if pt[name].dtype.kind not in "biufc" or name in self.groupby:
continue
if isinstance(name, string_types):
series_title = name
elif len(self.metrics) > 1:
series_title = ", ".join(name)
else:
l = [str(s) for s in name[1:]]
series_title = ", ".join(l)
values = []
for i, v in ys.iteritems():
x = i
if isinstance(x, (tuple, list)):
x = ', '.join([str(s) for s in x])
else:
x = str(x)
values.append({
'x': x,
'y': v,
})
d = {
"key": series_title,
"values": values,
}
chart_data.append(d)
return chart_data
class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
viz_type = "sunburst"
verbose_name = _("Sunburst")
is_timeseries = False
credits = (
'Kerry Rodden '
'@<a href="https://bl.ocks.org/kerryrodden/7090426">bl.ocks.org</a>')
def get_data(self, df):
# if m1 == m2 duplicate the metric column
cols = self.form_data.get('groupby')
metric = self.form_data.get('metric')
secondary_metric = self.form_data.get('secondary_metric')
if metric == secondary_metric:
ndf = df
ndf.columns = [cols + ['m1', 'm2']]
else:
cols += [
self.form_data['metric'], self.form_data['secondary_metric']]
ndf = df[cols]
return json.loads(ndf.to_json(orient="values")) # TODO fix this nonsense
def query_obj(self):
qry = super(SunburstViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric'], self.form_data['secondary_metric']]
return qry
class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
viz_type = "sankey"
verbose_name = _("Sankey")
is_timeseries = False
credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
def query_obj(self):
qry = super(SankeyViz, self).query_obj()
if len(qry['groupby']) != 2:
raise Exception(_("Pick exactly 2 columns as [Source / Target]"))
qry['metrics'] = [
self.form_data['metric']]
return qry
def get_data(self, df):
df.columns = ['source', 'target', 'value']
recs = df.to_dict(orient='records')
hierarchy = defaultdict(set)
for row in recs:
hierarchy[row['source']].add(row['target'])
def find_cycle(g):
"""Whether there's a cycle in a directed graph"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return (vertex, neighbour)
path.remove(vertex)
for v in g:
cycle = visit(v)
if cycle:
return cycle
cycle = find_cycle(hierarchy)
if cycle:
raise Exception(_(
"There's a loop in your Sankey, please provide a tree. "
"Here's a faulty link: {}").format(cycle))
return recs
class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
viz_type = "directed_force"
verbose_name = _("Directed Force Layout")
credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
is_timeseries = False
def query_obj(self):
qry = super(DirectedForceViz, self).query_obj()
if len(self.form_data['groupby']) != 2:
raise Exception(_("Pick exactly 2 columns to 'Group By'"))
qry['metrics'] = [self.form_data['metric']]
return qry
def get_data(self, df):
df.columns = ['source', 'target', 'value']
return df.to_dict(orient='records')
class ChordViz(BaseViz):
"""A Chord diagram"""
viz_type = "chord"
verbose_name = _("Directed Force Layout")
credits = '<a href="https://github.com/d3/d3-chord">Bostock</a>'
is_timeseries = False
def query_obj(self):
qry = super(ChordViz, self).query_obj()
fd = self.form_data
qry['groupby'] = [fd.get('groupby'), fd.get('columns')]
qry['metrics'] = [fd.get('metric')]
return qry
def get_data(self, df):
df.columns = ['source', 'target', 'value']
# Preparing a symetrical matrix like d3.chords calls for
nodes = list(set(df['source']) | set(df['target']))
matrix = {}
for source, target in product(nodes, nodes):
matrix[(source, target)] = 0
for source, target, value in df.to_records(index=False):
matrix[(source, target)] = value
m = [[matrix[(n1, n2)] for n1 in nodes] for n2 in nodes]
return {
'nodes': list(nodes),
'matrix': m,
}
class CountryMapViz(BaseViz):
"""A country centric"""
viz_type = "country_map"
verbose_name = _("Country Map")
is_timeseries = False
credits = 'From bl.ocks.org By john-guerra'
def query_obj(self):
qry = super(CountryMapViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric']]
qry['groupby'] = [self.form_data['entity']]
return qry
def get_data(self, df):
from superset.data import countries
fd = self.form_data
cols = [fd.get('entity')]
metric = fd.get('metric')
cols += [metric]
ndf = df[cols]
df = ndf
df.columns = ['country_id', 'metric']
d = df.to_dict(orient='records')
return d
class WorldMapViz(BaseViz):
"""A country centric world map"""
viz_type = "world_map"
verbose_name = _("World Map")
is_timeseries = False
credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
def query_obj(self):
qry = super(WorldMapViz, self).query_obj()
qry['metrics'] = [
self.form_data['metric'], self.form_data['secondary_metric']]
qry['groupby'] = [self.form_data['entity']]
return qry
def get_data(self, df):
from superset.data import countries
fd = self.form_data
cols = [fd.get('entity')]
metric = fd.get('metric')
secondary_metric = fd.get('secondary_metric')
if metric == secondary_metric:
ndf = df[cols]
# df[metric] will be a DataFrame
# because there are duplicate column names
ndf['m1'] = df[metric].iloc[:, 0]
ndf['m2'] = ndf['m1']
else:
cols += [metric, secondary_metric]
ndf = df[cols]
df = ndf
df.columns = ['country', 'm1', 'm2']
d = df.to_dict(orient='records')
for row in d:
country = None
if isinstance(row['country'], string_types):
country = countries.get(
fd.get('country_fieldtype'), row['country'])
if country:
row['country'] = country['cca3']
row['latitude'] = country['lat']
row['longitude'] = country['lng']
row['name'] = country['name']
else:
row['country'] = "XXX"
return d
class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
viz_type = "filter_box"
verbose_name = _("Filters")
is_timeseries = False
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
def query_obj(self):
qry = super(FilterBoxViz, self).query_obj()
groupby = self.form_data.get('groupby')
if len(groupby) < 1 and not self.form_data.get('date_filter'):
raise Exception(_("Pick at least one filter field"))
qry['metrics'] = [
self.form_data['metric']]
return qry
def get_data(self, df):
qry = self.query_obj()
filters = [g for g in self.form_data['groupby']]
d = {}
for flt in filters:
qry['groupby'] = [flt]
df = super(FilterBoxViz, self).get_df(qry)
d[flt] = [{
'id': row[0],
'text': row[0],
'filter': flt,
'metric': row[1]}
for row in df.itertuples(index=False)
]
return d
class IFrameViz(BaseViz):
"""You can squeeze just about anything in this iFrame component"""
viz_type = "iframe"
verbose_name = _("iFrame")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def get_df(self):
return None
class ParallelCoordinatesViz(BaseViz):
"""Interactive parallel coordinate implementation
Uses this amazing javascript library
https://github.com/syntagmatic/parallel-coordinates
"""
viz_type = "para"
verbose_name = _("Parallel Coordinates")
credits = (
'<a href="https://syntagmatic.github.io/parallel-coordinates/">'
'Syntagmatic\'s library</a>')
is_timeseries = False
def query_obj(self):
d = super(ParallelCoordinatesViz, self).query_obj()
fd = self.form_data
d['metrics'] = copy.copy(fd.get('all_columns'))
second = fd.get('secondary_metric')
if second not in d['metrics']:
d['metrics'] += [second]
'''
d['groupby'] = [fd.get('series')]
'''
return d
def get_data(self, df):
return df.to_dict(orient="records")
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
viz_type = "heatmap"
verbose_name = _("Heatmap")
is_timeseries = False
credits = (
'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
'bl.ocks.org</a>')
def query_obj(self):
d = super(HeatmapViz, self).query_obj()
fd = self.form_data
d['metrics'] = [fd.get('metric')]
d['groupby'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
return d
def get_data(self, df):
fd = self.form_data
x = fd.get('all_columns_x')
y = fd.get('all_columns_y')
v = fd.get('metric')
if x == y:
df.columns = ['x', 'y', 'v']
else:
df = df[[x, y, v]]
df.columns = ['x', 'y', 'v']
norm = fd.get('normalize_across')
overall = False
if norm == 'heatmap':
overall = True
else:
gb = df.groupby(norm, group_keys=False)
if len(gb) <= 1:
overall = True
else:
df['perc'] = (
gb.apply(
lambda x: (x.v - x.v.min()) / (x.v.max() - x.v.min()))
)
if overall:
v = df.v
min_ = v.min()
df['perc'] = (v - min_) / (v.max() - min_)
return df.to_dict(orient="records")
class HorizonViz(NVD3TimeSeriesViz):
"""Horizon chart
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "horizon"
verbose_name = _("Horizon Charts")
credits = (
'<a href="https://www.npmjs.com/package/d3-horizon-chart">'
'd3-horizon-chart</a>')
class KmeansViz(BaseViz):
"""k-Means clustering
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "kmeans"
verbose_name = _("k-Means clustering")
analytics = True
credits = (
'<a href="http://madlib.incubator.apache.org/docs/latest/group__grp__kmeans.html">'
'k-Means Clustering</a>')
def query_obj(self):
logging.info("[kmeans] query_obj")
d = super(KmeansViz, self).query_obj()
fd = self.form_data
d['metrics'] = copy.copy(fd.get('all_columns'))
second = fd.get('secondary_metric')
if second not in d['metrics']:
d['metrics'] += [second]
'''
if fd.get('all_columns') and (fd.get('groupby') or fd.get('metrics')):
raise Exception(_(
"Choose either fields to [Group By] and [Metrics] or "
"[Columns], not both"))
'''
if fd.get('all_columns'):
d['columns'] = fd.get('all_columns')
d['groupby'] = []
order_by_cols = fd.get('order_by_cols') or []
d['orderby'] = [json.loads(t) for t in order_by_cols]
fd['analysis'] = True
statement = 'NULL::public.kmeans_udf_result, '
statement_col = ''
for col in d['columns']:
statement_col = statement_col + '\'' + col + '\','
statement_col = statement_col[:-1]
statement = statement + 'ARRAY[ ' + statement_col + ' ]'
statement = statement + ', ' + str(fd.get('num_cluster'))
statement = statement + ', \'' + fd.get('fn_dist') + '\''
statement = statement + ', \'' + fd.get('agg_centroid') + '\''
statement = statement + ', ' + str(fd.get('max_num_iterations'))
statement = statement + ', ' + str(fd.get('min_frac_reassigned'))
logging.info(statement)
fd['from_statement'] = 'kmeans_udf( ' + statement + ' )'
return d
def get_data(self, df):
logging.info("[kmeans] get_data")
return df.to_dict(orient="records")
class ArimaViz(BaseViz):
"""ARIMA - Time Series Analysis
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "arima"
verbose_name = _("ARIMA - Time Series Analysis")
analytics = True
credits = (
'<a href="http://madlib.apache.org/docs/latest/group__grp__arima.html">'
'ARIMA Time Series Analysis</a>')
def query_obj(self):
logging.info("[ARIMA] query_obj")
d = super(ArimaViz, self).query_obj()
fd = self.form_data
if fd.get('all_columns') and (fd.get('groupby') or fd.get('metrics')):
raise Exception(_(
"Choose either fields to [Group By] and [Metrics] or "
"[Columns], not both"))
if fd.get('timestamp_column'):
d['time_col'] = fd.get('timestamp_column')
if fd.get('timeseries_column'):
d['data_col'] = fd.get('timeseries_column')
d['groupby'] = []
order_by_cols = fd.get('order_by_cols') or []
d['orderby'] = [json.loads(t) for t in order_by_cols]
fd['analysis'] = True
statement = 'NULL::public.arima_udf_result'
statement = statement + ', \'' + self.datasource.name + '\''
statement = statement + ', \'' + d['time_col'] + '\''
statement = statement + ', \'' + d['data_col'] + '\''
statement = statement + ', ' + str(fd.get('steps_ahead'))
logging.info(statement)
fd['from_statement'] = 'arima_udf( ' + statement + ' )'
return d
def get_data(self, df):
logging.info("[ARIMA] get_data")
return dict(
records=df.to_dict(orient="records"),
columns=list(df.columns),
)
class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
viz_type = "mapbox"
verbose_name = _("Mapbox")
is_timeseries = False
credits = (
'<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>')
def query_obj(self):
d = super(MapboxViz, self).query_obj()
fd = self.form_data
label_col = fd.get('mapbox_label')
if not fd.get('groupby'):
d['columns'] = [fd.get('all_columns_x'), fd.get('all_columns_y')]
if label_col and len(label_col) >= 1:
if label_col[0] == "count":
raise Exception(_(
"Must have a [Group By] column to have 'count' as the [Label]"))
d['columns'].append(label_col[0])
if fd.get('point_radius') != 'Auto':
d['columns'].append(fd.get('point_radius'))
d['columns'] = list(set(d['columns']))
else:
# Ensuring columns chosen are all in group by
if (label_col and len(label_col) >= 1 and
label_col[0] != "count" and
label_col[0] not in fd.get('groupby')):
raise Exception(_(
"Choice of [Label] must be present in [Group By]"))
if (fd.get("point_radius") != "Auto" and
fd.get("point_radius") not in fd.get('groupby')):
raise Exception(_(
"Choice of [Point Radius] must be present in [Group By]"))
if (fd.get('all_columns_x') not in fd.get('groupby') or
fd.get('all_columns_y') not in fd.get('groupby')):
raise Exception(_(
"[Longitude] and [Latitude] columns must be present in [Group By]"))
return d
def get_data(self, df):
fd = self.form_data
label_col = fd.get('mapbox_label')
custom_metric = label_col and len(label_col) >= 1
metric_col = [None] * len(df.index)
if custom_metric:
if label_col[0] == fd.get('all_columns_x'):
metric_col = df[fd.get('all_columns_x')]
elif label_col[0] == fd.get('all_columns_y'):
metric_col = df[fd.get('all_columns_y')]
else:
metric_col = df[label_col[0]]
point_radius_col = (
[None] * len(df.index)
if fd.get("point_radius") == "Auto"
else df[fd.get("point_radius")])
# using geoJSON formatting
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {
"metric": metric,
"radius": point_radius,
},
"geometry": {
"type": "Point",
"coordinates": [lon, lat],
}
}
for lon, lat, metric, point_radius
in zip(
df[fd.get('all_columns_x')],
df[fd.get('all_columns_y')],
metric_col, point_radius_col)
]
}
return {
"geoJSON": geo_json,
"customMetric": custom_metric,
"mapboxApiKey": config.get('MAPBOX_API_KEY'),
"mapStyle": fd.get("mapbox_style"),
"aggregatorName": fd.get("pandas_aggfunc"),
"clusteringRadius": fd.get("clustering_radius"),
"pointRadiusUnit": fd.get("point_radius_unit"),
"globalOpacity": fd.get("global_opacity"),
"viewportLongitude": fd.get("viewport_longitude"),
"viewportLatitude": fd.get("viewport_latitude"),
"viewportZoom": fd.get("viewport_zoom"),
"renderWhileDragging": fd.get("render_while_dragging"),
"tooltip": fd.get("rich_tooltip"),
"color": fd.get("mapbox_color"),
}
class EventFlowViz(BaseViz):
"""A visualization to explore patterns in event sequences"""
viz_type = "event_flow"
verbose_name = _("Event flow")
credits = 'from <a href="https://github.com/williaster/data-ui">@data-ui</a>'
is_timeseries = True
def query_obj(self):
query = super(EventFlowViz, self).query_obj()
form_data = self.form_data
event_key = form_data.get('all_columns_x')
entity_key = form_data.get('entity')
meta_keys = [
col for col in form_data.get('all_columns') if col != event_key and col != entity_key
]
query['columns'] = [event_key, entity_key] + meta_keys
if form_data['order_by_entity']:
query['orderby'] = [(entity_key, True)]
return query
def get_data(self, df):
return df.to_dict(orient="records")
viz_types_list = [
TableViz,
PivotTableViz,
NVD3TimeSeriesViz,
NVD3DualLineViz,
NVD3CompareTimeSeriesViz,
NVD3TimeSeriesStackedViz,
NVD3TimeSeriesBarViz,
DistributionBarViz,
DistributionPieViz,
BubbleViz,
BulletViz,
MarkupViz,
WordCloudViz,
BigNumberViz,
BigNumberTotalViz,
SunburstViz,
DirectedForceViz,
SankeyViz,
CountryMapViz,
ChordViz,
WorldMapViz,
FilterBoxViz,
IFrameViz,
ParallelCoordinatesViz,
HeatmapViz,
BoxPlotViz,
TreemapViz,
CalHeatmapViz,
HorizonViz,
MapboxViz,
HistogramViz,
SeparatorViz,
EventFlowViz,
KmeansViz,
ArimaViz,
]
viz_types = OrderedDict([(v.viz_type, v) for v in viz_types_list
if v.viz_type not in config.get('VIZ_TYPE_BLACKLIST')])
|
nekia/incubator-superset-dev
|
superset/viz.py
|
Python
|
apache-2.0
| 58,235
|
[
"VisIt"
] |
1f93dd14390b4e8402665d88f17ee3207bef40afb735929dab9e947986ea62c7
|
import argparse
from os import path as path
import re
from subprocess import check_output, Popen, PIPE
import json
from os import remove
parser = argparse.ArgumentParser(description="Convert a Seqware decider JSON file for BWA to an ansible host inventory file")
parser.add_argument("--run_generator", help="Enabling this option automatically runs the generator and passes the standard out of the generator to the program as input. If you enable this option you should indicate the path to the generator JAR via the --generator_path argument.", action="store_true")
parser.add_argument("--generator_path", help="Path to the generator JAR")
parser.add_argument("--input_file_path", help="Full path to the input JSON file")
parser.add_argument("output_file_path", help="Full path to the inventory host file to be generated")
args = parser.parse_args()
arg_dict = vars(args)
run_generator = arg_dict["run_generator"]
generator_path = arg_dict["generator_path"]
input_file_path = arg_dict["input_file_path"]
output_file_path = arg_dict["output_file_path"]
tmp_json_filename = "generator_tmp.json"
if run_generator:
(prog_out, prog_err) = Popen(["java", "-jar", generator_path, "--aws", "--output", tmp_json_filename],stdin=PIPE, stdout=PIPE, stderr=PIPE).communicate()
input_file_path = tmp_json_filename
infile = open(input_file_path, "r")
json_text = json.load(infile)
infile.close()
if run_generator:
remove(tmp_json_filename)
outfile = open(output_file_path, "w")
outfile.write("[all-masters]\n")
for instance_name in json_text:
webservice_url = json_text[instance_name]["webservice"]
my_match = re.search(r'[0-9]+(?:\.[0-9]+){3}', webservice_url)
my_ip = ""
if my_match:
my_ip = my_match.group(0)
outfile.write(instance_name + " ansible_ssh_host=" + my_ip + "\n")
outfile.close()
|
SeqWare/seqware-sandbox
|
ansible_pancancer/host_inventory_form_json.py
|
Python
|
gpl-3.0
| 1,811
|
[
"BWA"
] |
55e4aee073690b28c5f7f5673e70dd83b8a3a10e72ba4483255ba33100beb42d
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from gluon.validators import IS_NOT_EMPTY
from s3 import FS, ICON, IS_LOCATION, IS_ONE_OF, S3DateTime, S3LocationSelector, S3Represent, \
S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget, \
s3_auth_user_represent_name, s3_avatar_represent
datetime_represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
def config(settings):
"""
Template settings for Requests Management
- for Philippines
http://eden.sahanafoundation.org/wiki/Deployments/Philippines/Haiyan
"""
T = current.T
s3 = current.response.s3
# -----------------------------------------------------------------------------
# Pre-Populate
settings.base.prepopulate += ("historic/Philippines", "default/users")
settings.base.system_name = T("Sahana")
settings.base.system_name_short = T("Sahana")
# =============================================================================
# System Settings
# -----------------------------------------------------------------------------
# Authorization Settings
# Users can self-register
#settings.security.self_registration = False
# Users need to verify their email
settings.auth.registration_requires_verification = True
# Users don't need to be approved
#settings.auth.registration_requires_approval = True
# Organisation links are either done automatically
# - by registering with official domain of Org
# or Manually by Call Center staff
#settings.auth.registration_requests_organisation = True
#settings.auth.registration_organisation_required = True
settings.auth.registration_requests_site = False
# Uncomment this to allow Admin to see Organisations in user Admin even if the Registration doesn't request this
settings.auth.admin_sees_organisation = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff")}
settings.auth.registration_link_user_to_default = ["staff"]
settings.auth.registration_roles = {"organisation_id": ["USER"],
}
# Terms of Service to be able to Register on the system
# uses <template>/views/tos.html
settings.auth.terms_of_service = True
settings.auth.show_utc_offset = False
settings.auth.show_link = False
# -----------------------------------------------------------------------------
# Security Policy
settings.security.policy = 5 # Apply Controller, Function and Table ACLs
settings.security.map = True
# Owner Entity
settings.auth.person_realm_human_resource_site_then_org = False
# -----------------------------------------------------------------------------
# Theme (folder to use for views/layout.html)
settings.base.theme = "historic.Philippines"
# Formstyles
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
# Icons
settings.ui.icons = "font-awesome3"
# Maps
#settings.gis.map_height = 600
#settings.gis.map_width = 854
# Uncomment to disable responsive behavior of datatables
# - Disabled until tested
settings.ui.datatables_responsive = False
# -----------------------------------------------------------------------------
# L10n (Localization) settings
settings.L10n.languages = OrderedDict([
("en", "English"),
# ("tl", "Tagalog"),
])
# Default Language
settings.L10n.default_language = "en"
# Default timezone for users
settings.L10n.utc_offset = "+0800"
# Unsortable 'pretty' date format
settings.L10n.date_format = "%d %b %Y"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Uncomment this to Translate CMS Series Names
# - we want this on when running s3translate but off in normal usage as we use the English names to lookup icons in render_posts
#settings.L10n.translate_cms_series = True
# Uncomment this to Translate Location Names
#settings.L10n.translate_gis_location = True
# Restrict the Location Selector to just certain countries
settings.gis.countries = ["PH"]
# Until we add support to S3LocationSelector to set dropdowns from LatLons
#settings.gis.check_within_parent_boundaries = False
# Uncomment to hide Layer Properties tool
#settings.gis.layer_properties = False
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# -----------------------------------------------------------------------------
# Finance settings
settings.fin.currencies = {
"PHP" : "Philippine Pesos",
#"EUR" : "Euros",
#"GBP" : "Great British Pounds",
"USD" : "United States Dollars",
}
settings.fin.currency_default = "PHP"
# -----------------------------------------------------------------------------
# Enable this for a UN-style deployment
#settings.ui.cluster = True
# Enable this to use the label 'Camp' instead of 'Shelter'
#settings.ui.camp = True
# -----------------------------------------------------------------------------
# Uncomment to restrict the export formats available
#settings.ui.export_formats = ["xls"]
settings.ui.update_label = "Edit"
# -----------------------------------------------------------------------------
# Summary Pages
settings.ui.summary = [#{"common": True,
# "name": "cms",
# "widgets": [{"method": "cms"}]
# },
{"name": "table",
"label": "Table",
"widgets": [{"method": "datatable"}]
},
{"name": "map",
"label": "Map",
"widgets": [{"method": "map", "ajax_init": True}],
},
{"name": "charts",
"label": "Reports",
"widgets": [{"method": "report", "ajax_init": True}]
},
]
settings.search.filter_manager = False
# Filter forms - style for Summary pages
#def filter_formstyle(row_id, label, widget, comment, hidden=False):
# return DIV(label, widget, comment,
# _id=row_id,
# _class="horiz_filter_form")
# =============================================================================
# Module Settings
# -----------------------------------------------------------------------------
# Human Resource Management
settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an organisation
settings.hrm.org_required = False
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to disable the use of HR Skills
settings.hrm.use_skills = False
# Uncomment to disable the use of HR Teams
settings.hrm.teams = False
# Uncomment to hide fields in S3AddPersonWidget[2]
settings.pr.request_dob = False
settings.pr.request_gender = False
# -----------------------------------------------------------------------------
# Org
#settings.org.site_label = "Office/Shelter/Hospital"
settings.org.site_label = "Site"
settings.org.site_autocomplete = True
# Extra fields to show in Autocomplete Representations
settings.org.site_autocomplete_fields = ["location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
]
# -----------------------------------------------------------------------------
# Project
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# Links to Filtered Components for Donors & Partners
#settings.project.organisation_roles = {
# 1: T("Host National Society"),
# 2: T("Partner"),
# 3: T("Donor"),
# #4: T("Customer"), # T("Beneficiary")?
# #5: T("Supplier"),
# 9: T("Partner National Society"),
#}
# -----------------------------------------------------------------------------
# Notifications
# Template for the subject line in update notifications
#settings.msg.notify_subject = "$S %s" % T("Notification")
settings.msg.notify_subject = "$S Notification"
# -----------------------------------------------------------------------------
def currency_represent(v):
"""
Custom Representation of Currencies
"""
if v == "USD":
return "$"
elif v == "EUR":
return "€"
elif v == "GBP":
return "£"
else:
# e.g. CHF
return v
# -----------------------------------------------------------------------------
def render_contacts(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Contacts on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["hrm_human_resource.id"]
item_class = "thumbnail"
raw = record._row
#author = record["hrm_human_resource.modified_by"]
date = record["hrm_human_resource.modified_on"]
fullname = record["hrm_human_resource.person_id"]
job_title = raw["hrm_human_resource.job_title_id"] or ""
if job_title:
job_title = "- %s" % record["hrm_human_resource.job_title_id"]
#organisation = record["hrm_human_resource.organisation_id"]
organisation_id = raw["hrm_human_resource.organisation_id"]
#org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
pe_id = raw["pr_person.pe_id"]
person_id = raw["hrm_human_resource.person_id"]
location = record["org_site.location_id"]
location_id = raw["org_site.location_id"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"] or T("no office assigned")
email = raw["pr_email_contact.value"] or T("no email address")
if isinstance(email, list):
email = email[0]
phone = raw["pr_phone_contact.value"] or T("no phone number")
if isinstance(phone, list):
phone = phone[0]
db = current.db
s3db = current.s3db
ltable = s3db.pr_person_user
query = (ltable.pe_id == pe_id)
row = db(query).select(ltable.user_id,
limitby=(0, 1)
).first()
if row:
# Use Personal Avatar
# @ToDo: Optimise by not doing DB lookups (especially duplicate) within render, but doing these in the bulk query
avatar = s3_avatar_represent(row.user_id,
_class="media-object")
else:
avatar = IMG(_src=URL(c="static", f="img", args="blank-user.gif"),
_class="media-object")
# Edit Bar
permit = current.auth.s3_has_permission
table = db.pr_person
if permit("update", table, record_id=person_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_url = URL(c="hrm", f="person",
args=[person_id, "update.popup"],
vars=vars)
title_update = current.response.s3.crud_strings.hrm_human_resource.title_update
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=edit_url,
_class="s3_modal",
_title=title_update,
)
else:
edit_btn = ""
edit_url = "#"
title_update = ""
# Deletions failing due to Integrity Errors
#if permit("delete", table, record_id=person_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
#else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
avatar = A(avatar,
_href=edit_url,
_class="pull-left s3_modal",
_title=title_update,
)
# Render the item
body = TAG[""](P(fullname,
" ",
SPAN(job_title),
_class="person_pos",
),
P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
I(_class="icon-envelope-alt"),
" ",
SPAN(email),
_class="card_1_line",
),
P(I(_class="icon-home"),
" ",
address,
_class="card_manylines",
))
item = DIV(DIV(SPAN(" ", _class="card-title"),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
# Organisation only needed if displaying elsewhere than org profile
# Author confusing with main contact record
#DIV(#author,
# #" - ",
# A(organisation,
# _href=org_url,
# _class="card-organisation",
# ),
# _class="card-person",
# ),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def quote_unicode(s):
"""
Quote unicode strings for URLs for Rocket
"""
chars = []
for char in s:
o = ord(char)
if o < 128:
chars.append(char)
else:
chars.append(hex(o).replace("0x", "%").upper())
return "".join(chars)
# -----------------------------------------------------------------------------
def render_locations(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = raw["gis_location.name"]
level = raw["gis_location.level"]
L1 = raw["gis_location.L1"]
L2 = raw["gis_location.L2"]
L3 = raw["gis_location.L3"]
L4 = raw["gis_location.L4"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
if level == "L1":
represent = name
if level == "L2":
represent = "%s (%s)" % (name, L1)
elif level == "L3":
represent = "%s (%s, %s)" % (name, L2, L1)
elif level == "L4":
represent = "%s (%s, %s, %s)" % (name, L3, L2, L1)
else:
# L0 or specific
represent = name
# Users don't edit locations
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars={"refresh": list_id,
# "record": record_id}),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Tallies
# NB We assume that all records are readable here
# Search all sub-locations
locations = current.gis.get_children(record_id)
locations = [l.id for l in locations]
locations.append(record_id)
db = current.db
s3db = current.s3db
stable = s3db.org_site
query = (stable.deleted == False) & \
(stable.location_id.belongs(locations))
count = stable.id.count()
row = db(query).select(count).first()
if row:
tally_sites = row[count]
else:
tally_sites = 0
table = s3db.req_req
query = (table.deleted == False) & \
(stable.site_id == table.site_id) & \
(stable.location_id.belongs(locations))
count = table.id.count()
row = db(query).select(count).first()
if row:
tally_reqs = row[count]
else:
tally_reqs = 0
table = s3db.req_commit
query = (table.deleted == False) & \
(table.location_id.belongs(locations))
count = table.id.count()
row = db(query).select(count).first()
if row:
tally_commits = row[count]
else:
tally_commits = 0
if level == "L4":
next_Lx = ""
next_Lx_label = ""
else:
if level == "L0":
next_Lx = "L1"
next_Lx_label = "Regions"
if level == "L1":
next_Lx = "L2"
next_Lx_label = "Provinces"
elif level == "L2":
next_Lx = "L3"
next_Lx_label = "Municipalities / Cities"
elif level == "L3":
next_Lx = "L4"
next_Lx_label = "Barangays"
table = db.gis_location
query = (table.deleted == False) & \
(table.level == next_Lx) & \
(table.parent == record_id)
count = table.id.count()
row = db(query).select(count).first()
if row:
tally_Lx = row[count]
else:
tally_Lx = 0
next_url = URL(c="gis", f="location",
args=["datalist"],
vars={"~.level": next_Lx,
"~.parent": record_id,
})
next_Lx_label = A(next_Lx_label,
_href=next_url,
)
next_Lx = SPAN(tally_Lx,
_class="badge",
)
# Build the icon, if it doesn't already exist
filename = "%s.svg" % record_id
import os
filepath = os.path.join(current.request.folder, "static", "cache", "svg", filename)
if not os.path.exists(filepath):
gtable = db.gis_location
loc = db(gtable.id == record_id).select(gtable.wkt,
limitby=(0, 1)
).first()
if loc:
from s3.s3codecs.svg import S3SVG
S3SVG.write_file(filename, loc.wkt)
# Render the item
item = DIV(DIV(A(IMG(_class="media-object",
_src=URL(c="static",
f="cache",
args=["svg", filename],
)
),
_class="pull-left",
_href=location_url,
),
DIV(SPAN(A(represent,
_href=location_url,
_class="media-heading"
),
),
#edit_bar,
_class="card-header-select",
),
DIV(P(next_Lx_label,
next_Lx,
T("Sites"),
SPAN(tally_sites,
_class="badge",
),
T("Requests"),
SPAN(tally_reqs,
_class="badge",
),
T("Donations"),
SPAN(tally_commits,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_locations_profile(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Locations on the Profile Page
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["gis_location.id"]
item_class = "thumbnail"
raw = record._row
name = record["gis_location.name"]
location_url = URL(c="gis", f="location",
args=[record_id, "profile"])
# Placeholder to maintain style
#logo = DIV(IMG(_class="media-object"),
# _class="pull-left")
# We don't Edit Locations
# Edit Bar
# permit = current.auth.s3_has_permission
# table = current.db.gis_location
# if permit("update", table, record_id=record_id):
# vars = {"refresh": list_id,
# "record": record_id,
# }
# f = current.request.function
# if f == "organisation" and organisation_id:
# vars["(organisation)"] = organisation_id
# edit_btn = A(I(" ", _class="icon icon-edit"),
# _href=URL(c="gis", f="location",
# args=[record_id, "update.popup"],
# vars=vars),
# _class="s3_modal",
# _title=current.response.s3.crud_strings.gis_location.title_update,
# )
# else:
# edit_btn = ""
# if permit("delete", table, record_id=record_id):
# delete_btn = A(I(" ", _class="icon icon-trash"),
# _class="dl-item-delete",
# )
# else:
# delete_btn = ""
# edit_bar = DIV(edit_btn,
# delete_btn,
# _class="edit-bar fright",
# )
# Render the item
item = DIV(DIV(DIV(#SPAN(A(name,
# _href=location_url,
# ),
# _class="location-title"),
#" ",
#edit_bar,
P(A(name,
_href=location_url,
),
_class="card_comments"),
_class="span5"), # card-details
_class="row",
),
)
return item
# -----------------------------------------------------------------------------
def render_sites(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Facilities on the Profile pages
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_facility.id"]
item_class = "thumbnail"
raw = record._row
name = record["org_facility.name"]
site_id = raw["org_facility.id"]
opening_times = raw["org_facility.opening_times"] or ""
author = record["org_facility.modified_by"]
date = record["org_facility.modified_on"]
organisation = record["org_facility.organisation_id"]
organisation_id = raw["org_facility.organisation_id"]
location = record["org_facility.location_id"]
level = raw["gis_location.level"]
if level:
location_id = raw["org_facility.location_id"]
else:
location_id = raw["gis_location.parent"]
location_url = URL(c="gis", f="location",
args=[location_id, "profile"])
address = raw["gis_location.addr_street"] or ""
phone = raw["org_facility.phone1"] or ""
facility_type = record["org_site_facility_type.facility_type_id"]
comments = record["org_facility.comments"] or ""
logo = raw["org_organisation.logo"]
site_url = URL(c="org", f="facility", args=[site_id, "profile"])
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
facility_status = raw["org_site_status.facility_status"] or ""
if facility_status:
if facility_status == 1:
icon = "thumbs-up-alt"
colour = "green"
elif facility_status == 2:
icon = "thumbs-down-alt"
colour = "amber"
elif facility_status == 3:
icon = "reply-all"
colour = "red"
elif facility_status == 4:
icon = "remove"
colour = "red"
elif facility_status == 99:
icon = "question"
colour = ""
facility_status = P(#I(_class="icon-%s" % icon),
#" ",
SPAN("%s: %s" % (T("Status"), record["org_site_status.facility_status"])),
" ",
_class="card_1_line %s" % colour,
)
power_supply_type = raw["org_site_status.power_supply_type"] or ""
if power_supply_type:
if power_supply_type == 1:
icon = "thumbs-up-alt"
colour = "green"
elif power_supply_type == 2:
icon = "cogs"
colour = "amber"
elif power_supply_type == 98:
icon = "question"
colour = "amber"
elif power_supply_type == 99:
icon = "remove"
colour = "red"
power_supply_type = P(#I(_class="icon-%s" % icon),
#" ",
SPAN("%s: %s" % (T("Power"), record["org_site_status.power_supply_type"])),
" ",
_class="card_1_line %s" % colour,
)
# Edit Bar
permit = current.auth.s3_has_permission
table = current.db.org_facility
if permit("update", table, record_id=record_id):
vars = {"refresh": list_id,
"record": record_id,
}
f = current.request.function
if f == "organisation" and organisation_id:
vars["(organisation)"] = organisation_id
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="facility",
args=[record_id, "update.popup"],
vars=vars),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_facility.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Render the item
body = TAG[""](P(I(_class="icon-flag"),
" ",
SPAN(facility_type),
" ",
_class="card_1_line",
),
P(I(_class="icon-home"),
" ",
address,
_class="card_manylines",
),
P(I(_class="icon-time"),
" ",
SPAN(opening_times),
" ",
_class="card_1_line",
),
P(I(_class="icon-phone"),
" ",
SPAN(phone),
" ",
_class="card_1_line",
),
facility_status,
power_supply_type,
P(comments,
_class="card_manylines s3-truncate",
),
)
item = DIV(DIV(SPAN(A(name,
_href=site_url,
),
_class="card-title",
),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(DIV(body,
DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_organisations(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Organisations on the Stakeholder Selection Page
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["org_organisation.id"]
item_class = "thumbnail span6" # span6 for 2 cols
raw = record._row
name = record["org_organisation.name"]
logo = raw["org_organisation.logo"]
phone = raw["org_organisation.phone"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
money = raw["req_organisation_needs.money"]
if money:
money_details = record["req_organisation_needs.money_details"]
money_details = SPAN(XML(money_details),
_class="s3-truncate")
money_details = P(I(_class="icon icon-dollar"),
" ",
money_details,
_class="card_manylines",
)
else:
# Include anyway to make cards align
money_details = P(I(_class="icon icon-dollar"),
" ",
_class="card_1_line",
)
#time = raw["req_organisation_needs.vol"]
#if time:
# time_details = record["req_organisation_needs.vol_details"]
# time_details = P(I(_class="icon icon-time"),
# " ",
# XML(time_details),
# _class="card_1_line",
# )
#else:
# time_details = ""
org_url = URL(c="org", f="organisation", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
db = current.db
permit = current.auth.s3_has_permission
table = db.org_organisation
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.org_organisation.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
# Tallies
# NB We assume that all records are readable here
s3db = current.s3db
stable = s3db.org_site
query = (stable.deleted == False) & \
(stable.obsolete == False) & \
(stable.organisation_id == record_id)
tally_sites = db(query).count()
table = s3db.req_req
query = (table.deleted == False) & \
(stable.site_id == table.site_id) & \
(stable.organisation_id == record_id)
tally_reqs = db(query).count()
table = s3db.req_commit
query = (table.deleted == False) & \
(table.organisation_id == record_id)
tally_commits = db(query).count()
# Render the item
item = DIV(DIV(logo,
DIV(SPAN(A(name,
_href=org_url,
_class="media-heading"
),
),
edit_bar,
_class="card-header-select",
),
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
money_details,
#time_details,
P(T("Sites"),
SPAN(tally_sites,
_class="badge",
),
T("Requests"),
SPAN(tally_reqs,
_class="badge",
),
T("Donations"),
SPAN(tally_commits,
_class="badge",
),
_class="tally",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_org_needs(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Needs
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["req_organisation_needs.id"]
item_class = "thumbnail"
raw = record._row
logo = raw["org_organisation.logo"]
phone = raw["org_organisation.phone"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
author = record["req_organisation_needs.modified_by"]
date = record["req_organisation_needs.modified_on"]
money = raw["req_organisation_needs.money"]
if money:
money_details = record["req_organisation_needs.money_details"]
money_details = P(I(_class="icon icon-dollar"),
" ",
XML(money_details),
_class="card_manylines",
)
else:
money_details = ""
time = raw["req_organisation_needs.vol"]
if time:
time_details = record["req_organisation_needs.vol_details"]
time_details = P(I(_class="icon icon-time"),
" ",
XML(time_details),
_class="card_manylines",
)
else:
time_details = ""
org_id = raw["org_organisation.id"]
org_url = URL(c="org", f="organisation", args=[org_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=org_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
permit = current.auth.s3_has_permission
table = current.db.req_organisation_needs
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="req", f="organisation_needs",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.req_organisation_needs.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
if current.request.controller == "org":
# Org Profile page - no need to repeat Org Name
title = " "
else:
title = raw["org_organisation.name"]
# Render the item
item = DIV(DIV(SPAN(title, _class="card-title"),
SPAN(author, _class="location-title"),
SPAN(date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
money_details,
time_details,
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
s3.render_org_needs = render_org_needs
# -----------------------------------------------------------------------------
def render_site_needs(list_id, item_id, resource, rfields, record):
"""
Custom dataList item renderer for Needs
- UNUSED
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["req_site_needs.id"]
item_class = "thumbnail"
raw = record._row
logo = raw["org_organisation.logo"]
addresses = raw["gis_location.addr_street"]
if addresses:
if isinstance(addresses, list):
address = addresses[0]
else:
address = addresses
else:
address = ""
#contact = raw["org_facility.contact"] or ""
opening_times = raw["org_facility.opening_times"] or ""
phone = raw["org_facility.phone1"] or ""
website = raw["org_organisation.website"] or ""
if website:
website = A(website, _href=website)
author = record["req_site_needs.modified_by"]
date = record["req_site_needs.modified_on"]
#goods = raw["req_site_needs.goods"]
#if goods:
# goods_details = record["req_site_needs.goods_details"]
# goods_details = P(I(_class="icon icon-truck"),
# " ",
# XML(goods_details),
# _class="card_1_line",
# )
#else:
# goods_details = ""
#time = raw["req_site_needs.vol"]
#if time:
# time_details = record["req_site_needs.vol_details"]
# time_details = P(I(_class="icon icon-time"),
# " ",
# XML(time_details),
# _class="card_1_line",
# )
#else:
# time_details = ""
site_url = URL(c="org", f="facility", args=[record_id, "profile"])
if logo:
logo = A(IMG(_src=URL(c="default", f="download", args=[logo]),
_class="media-object",
),
_href=site_url,
_class="pull-left",
)
else:
logo = DIV(IMG(_class="media-object"),
_class="pull-left")
permit = current.auth.s3_has_permission
table = current.db.req_site_needs
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="req", f="site_needs",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.response.s3.crud_strings.req_site_needs.title_update,
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-trash"),
_class="dl-item-delete",
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
if current.request.controller == "org":
# Site Profile page - no need to repeat Site Name
title = " "
else:
title = raw["org_facility.name"]
# Render the item
item = DIV(DIV(SPAN(title, _class="card-title"),
SPAN(author, _class="location-title"),
SPAN(date, _class="date-title"),
edit_bar,
_class="card-header",
),
DIV(logo,
DIV(#goods_details,
#time_details,
P(I(_class="icon icon-home"),
" ",
address,
_class="card_manylines",
),
P(I(_class="icon-time"),
" ",
SPAN(opening_times),
" ",
_class="card_1_line",
),
P(I(_class="icon icon-phone"),
" ",
phone,
_class="card_1_line",
),
P(I(_class="icon icon-map"),
" ",
website,
_class="card_1_line",
),
P(I(_class="icon icon-user"),
" ",
contact,
_class="card_1_line",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
s3.render_site_needs = render_site_needs
# -----------------------------------------------------------------------------
def customise_gis_location_controller(**attr):
"""
Customise gis_location controller
- Profile Page
"""
db = current.db
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
s3db = current.s3db
table = s3db.gis_location
if r.method == "datalist":
# Lx selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
# Default 5 triggers an AJAX call, we should load all by default
s3.dl_pagelength = 17
level = current.request.get_vars.get("~.level", None)
if not level:
# Just show PH L1s
level = "L1"
query = (table.L0 == "Philippines") & (table.level == "L1")
r.resource.add_filter(query)
parent = current.request.get_vars.get("~.parent", None)
if level == "L1":
s3.crud_strings["gis_location"].title_list = T("Regions")
elif level == "L2":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Provinces in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Provinces")
elif level == "L3":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Municipalities and Cities in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Municipalities and Cities")
elif level == "L4":
if parent:
parent = db(table.id == parent).select(table.name,
limitby=(0, 1)
).first().name
s3.crud_strings["gis_location"].title_list = T("Barangays in %s") % parent
else:
s3.crud_strings["gis_location"].title_list = T("Barangays")
list_fields = ["name",
"level",
"L1",
"L2",
"L3",
"L4",
]
s3db.configure("gis_location",
filter_widgets = None,
list_fields = list_fields,
list_layout = render_locations,
)
elif r.method == "profile":
# Customise tables used by widgets
#customise_hrm_human_resource_fields()
customise_org_facility_fields()
req_customise_req_fields()
req_customise_commit_fields()
# gis_location table (Sub-Locations)
table.parent.represent = s3db.gis_LocationRepresent(sep=" | ")
list_fields = ["name",
"id",
]
location = r.record
record_id = location.id
# Override context as that's a Path
default = "~.(location)=%s" % record_id
map_widget = dict(label = "Map",
type = "map",
context = "location",
icon = "icon-map",
height = 383,
width = 568,
bbox = {"lat_max" : location.lat_max,
"lon_max" : location.lon_max,
"lat_min" : location.lat_min,
"lon_min" : location.lon_min
},
)
#locations_widget = dict(label = "Locations",
# insert = False,
# #label_create = "Create Location",
# type = "datalist",
# tablename = "gis_location",
# context = "location",
# icon = "globe",
# # @ToDo: Show as Polygons?
# show_on_map = False,
# list_layout = render_locations_profile,
# )
#needs_widget = dict(label = "Needs",
# label_create = "Add New Need",
# type = "datalist",
# tablename = "req_site_needs",
# context = "location",
# icon = "icon-hand-up",
# multiple = False,
# # Would just show up on Sites
# show_on_map = False,
# list_layout = render_site_needs,
# )
reqs_widget = dict(label = "Requests",
label_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "location",
default = default,
filter = FS("req_status").belongs([0, 1]),
icon = "request",
layer = "Requests",
# provided by Catalogue Layer
#marker = "request",
list_layout = req_req_list_layout,
)
commits_widget = dict(label = "Donations",
label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "location",
default = default,
filter = FS("cancel") == False,
icon = "truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = req_commit_list_layout,
)
#resources_widget = dict(label = "Resources",
# label_create = "Create Resource",
# type = "datalist",
# tablename = "org_resource",
# context = "location",
# default = default,
# #filter = FS("req_status").belongs([0, 1]),
# icon = "wrench",
# layer = "Resources",
# # provided by Catalogue Layer
# #marker = "resource",
# list_layout = s3db.org_resource_list_layout,
# )
sites_widget = dict(label = "Sites",
label_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
context = "location",
default = default,
filter = FS("obsolete") == False,
icon = "home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
# Build the icon, if it doesn't already exist
filename = "%s.svg" % record_id
import os
filepath = os.path.join(current.request.folder, "static", "cache", "svg", filename)
if not os.path.exists(filepath):
gtable = db.gis_location
loc = db(gtable.id == record_id).select(gtable.wkt,
limitby=(0, 1)
).first()
if loc and loc.wkt:
from s3.s3codecs.svg import S3SVG
S3SVG.write_file(filename, loc.wkt)
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class="icon icon-edit"),
_href=URL(c="gis", f="location",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["gis_location"].title_update,
)
else:
edit_btn = ""
name = location.name
s3db.configure("gis_location",
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["gis_location"].title_list,
name),
profile_header = DIV(edit_btn,
A(IMG(_class="media-object",
_src=URL(c="static",
f="cache",
args=["svg", filename],
),
),
_class="pull-left",
#_href=location_url,
),
H2(name),
_class="profile-header",
),
profile_widgets = [reqs_widget,
map_widget,
commits_widget,
#resources_widget,
sites_widget,
#locations_widget,
],
)
return True
s3.prep = custom_prep
return attr
settings.customise_gis_location_controller = customise_gis_location_controller
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_fields():
"""
Customise hrm_human_resource for Profile widgets and 'more' popups
"""
s3db = current.s3db
table = s3db.hrm_human_resource
table.site_id.represent = S3Represent(lookup="org_site")
s3db.org_site.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
#table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["person_id",
"person_id$pe_id",
"organisation_id",
"site_id$location_id",
"site_id$location_id$addr_street",
"job_title_id",
"email.value",
"phone.value",
#"modified_by",
"modified_on",
]
s3db.configure("hrm_human_resource",
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
"""
Customise hrm_human_resource controller
- used for 'more' popups
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "datalist":
customise_hrm_human_resource_fields()
current.s3db.configure("hrm_human_resource",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_contacts,
)
return True
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -----------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
s3 = current.response.s3
table = current.s3db.hrm_job_title
# Configure fields
field = table.organisation_id
field.readable = field.writable = False
field.default = None
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="hrm", f="job_title",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("hrm_job_title")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("hrm_job_title")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -----------------------------------------------------------------------------
def customise_org_facility_fields():
"""
Customise org_facility for Profile widgets and 'more' popups
"""
# Truncate comments fields
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
s3db = current.s3db
tablename = "org_facility"
table = s3db.org_facility
table.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
field = table.comments
field.represent = lambda body: XML(s3_URLise(body))
field.comment = None
table.phone1.label = T("Phone")
# CRUD strings
ADD_FAC = T("Add Site")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_FAC,
title_display = T("Site Details"),
title_list = T("Sites"),
title_update = T("Edit Site"),
label_list_button = T("List Sites"),
label_delete_button = T("Delete Site"),
msg_record_created = T("Site Added"),
msg_record_modified = T("Site Updated"),
msg_record_deleted = T("Site Canceled"),
msg_list_empty = T("No Sites registered"))
list_fields = ["name",
"code",
"site_facility_type.facility_type_id",
"organisation_id",
"location_id",
"location_id$addr_street",
"location_id$level",
"location_id$parent",
"modified_by",
"modified_on",
"organisation_id$logo",
"opening_times",
"human_resource.person_id",
#"contact",
"phone1",
"status.facility_status",
"status.power_supply_type",
"comments",
]
crud_form = S3SQLCustomForm("name",
"code",
S3SQLInlineComponentMultiSelectWidget(
"facility_type",
label = T("Facility Type"),
field = "facility_type_id",
widget = "multiselect",
),
"organisation_id",
"location_id",
"opening_times",
# This is too Ugly right now!
#S3SQLInlineComponent(
# "human_resource_site",
# label = T("Focal Point"),
# field = ["human_resource_id"],
# multiple = False,
#),
#"contact",
"phone1",
# This is too Ugly right now!
#S3SQLInlineComponent(
# "needs",
# label = T("Needs"),
# multiple = False,
#),
S3SQLInlineComponent(
"status",
label = T("Status"),
multiple = False,
),
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
# -----------------------------------------------------------------------------
def customise_org_facility_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
table = s3db.org_facility
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive:
customise_org_facility_fields()
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
# Filter from a Profile page?
# If so, then default the fields we know
get_vars = current.request.get_vars
location_id = get_vars.get("~.(location)", None)
organisation_id = get_vars.get("~.(organisation)", None)
if organisation_id:
org_field = table.organisation_id
org_field.default = organisation_id
org_field.readable = org_field.writable = False
location_field = table.location_id
if location_id:
location_field.default = location_id
location_field.readable = location_field.writable = False
else:
# Don't add new Locations here
location_field.comment = None
location_field.requires = IS_LOCATION()
location_field.widget = S3LocationSelector(levels=levels,
show_address=True,
show_map=True,
)
# @ToDo: Proper button if we want this & amend functionality for Bootstrap)
#s3.cancel = True
if r.method == "datalist":
# Site selection page
# 2-column datalist, 6 rows per page
#s3.dl_pagelength = 12
#s3.dl_rowsize = 2
from s3.s3filter import S3TextFilter, S3OptionsFilter, S3LocationFilter
filter_widgets = [
S3LocationFilter("location_id",
levels = levels,
hidden = True,
),
S3OptionsFilter(name = "type",
label = T("Type"),
field="site_facility_type.facility_type_id",
hidden = True,
),
S3OptionsFilter(name = "status",
label = T("Status"),
field = "status.facility_status",
hidden = True,
),
S3OptionsFilter(name = "power",
label = T("Power Supply"),
field = "status.power_supply_type",
hidden = True,
),
]
#get_vars = current.request.get_vars
#goods = get_vars.get("needs.goods", None)
#vol = get_vars.get("needs.vol", None)
#if goods:
# needs_fields = ["needs.goods_details"]
# s3.crud_strings["org_facility"].title_list = T("Sites where you can Drop-off Goods")
#elif vol:
# needs_fields = ["needs.vol_details"]
# s3.crud_strings["org_facility"].title_list = T("Sites where you can Volunteer your time")
#else:
# yesno = {True: T("Yes"), False: T("No")}
# needs_fields = ["needs.goods_details", "needs.vol_details"]
# filter_widgets.insert(0, S3OptionsFilter("needs.goods",
# label = T("Drop-off Goods"),
# cols = 2,
# options = yesno,
# multiple = False,
# hidden = True,
# ))
# filter_widgets.insert(1, S3OptionsFilter("needs.vol",
# label = T("Volunteer Time"),
# cols = 2,
# options = yesno,
# multiple = False,
# hidden = True,
# ))
filter_widgets.insert(0, S3TextFilter(["name",
"code",
"comments",
], #+ needs_fields,
label = T("Search")))
s3db.configure("org_facility",
# Don't include a Create form in 'More' popups
listadd = False,
list_layout = render_sites,
filter_widgets = filter_widgets,
)
elif r.method == "profile":
# Customise tables used by widgets
customise_hrm_human_resource_fields()
customise_site_needs_fields(profile=True)
req_customise_req_fields()
list_fields = ["name",
"id",
]
record = r.record
record_id = record.id
# @ToDo: Center on the Site
map_widget = dict(label = "Map",
type = "map",
context = "site",
icon = "icon-map",
height = 383,
width = 568,
)
contacts_widget = dict(label = "Contacts",
label_create = "Create Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "site",
create_controller = "pr",
create_function = "person",
icon = "icon-contact",
show_on_map = False, # Since they will show within Sites
list_layout = render_contacts,
)
reqs_widget = dict(label = "Requests",
label_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "site",
filter = FS("req_status").belongs([0, 1]),
icon = "request",
show_on_map = False, # Since they will show within Sites
list_layout = req_req_list_layout,
)
commits_widget = dict(label = "Donations",
#label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "site",
filter = FS("cancel") == False,
icon = "truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = req_commit_list_layout,
)
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="org", f="facility",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["org_facility"].title_update,
)
else:
edit_btn = ""
name = record.name
code = record.code
if code:
name_code = "%s - %s" % (name, code)
else:
name_code = code
location = table.location_id.represent(record.location_id)
organisation_id = record.organisation_id
db = current.db
otable = db.org_organisation
query = (otable.id == organisation_id)
org = db(query).select(otable.name,
otable.logo,
limitby=(0, 1)).first()
if org and org.logo:
logo = URL(c="default", f="download", args=[org.logo])
else:
# @ToDo: Placeholder
logo = "#"
# Add primary resource to map
# Lookup Marker (type-dependent)
ftable = s3db.org_facility
ltable = s3db.org_site_facility_type
query = (ftable == record_id) & \
(ftable.site_id == ltable.site_id)
facility_type = db(query).select(ltable.facility_type_id,
limitby = (0, 1)
).first()
# Lookup Marker
if facility_type:
layer_filter = "facility_type.facility_type_id=%s" % \
facility_type.id
else:
layer_filter = ""
marker = current.gis.get_marker(controller = "org",
function = "facility",
filter = layer_filter)
lat = None
lon = None
gtable = s3db.gis_location
query = (r.id == ftable.id) & \
(ftable.location_id == gtable.id)
lat_lon = db(query).select(gtable.lat,
gtable.lon,
limitby = (0,1)).first()
if lat_lon:
lat = lat_lon["gis_location.lat"]
lon = lat_lon["gis_location.lon"]
map_widget["lat"] = lat
map_widget["lon"] = lon
tablename = "org_facility"
layer = dict(name = record.name,
id = "profile-header-%s-%s" % (tablename, record_id),
active = True,
tablename = r.tablename,
url = "/%s/org/facility.geojson?facility.id=%s" % \
(r.application, record_id),
marker = marker,
)
s3db.configure(tablename,
list_fields = list_fields,
profile_title = "%s : %s" % (s3.crud_strings["org_facility"].title_list,
name),
profile_header = DIV(edit_btn,
IMG(_class="media-object",
_src=logo,
),
H2(name),
record.code and P(record.code) or "",
P(I(_class="icon-sitemap"),
" ",
SPAN(org and org.name or current.messages.NONE),
" ",
_class="card_1_line",
),
P(I(_class="icon-globe"),
" ",
SPAN(location),
" ",
_class="card_1_line",
),
P(record.comments,
_class="s3-truncate"),
_class="profile-header",
),
profile_layers = [layer],
profile_widgets = [reqs_widget,
map_widget,
commits_widget,
contacts_widget,
],
)
if r.interactive or r.representation == "aadata":
# Configure fields
#table.code.readable = table.code.writable = False
#table.phone1.readable = table.phone1.writable = False
table.phone2.readable = table.phone2.writable = False
table.email.readable = table.email.writable = False
elif r.representation == "geojson":
# Don't represent facility_status, but just show integers
s3db.org_site_status.facility_status.represent = None
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive:
if isinstance(output, dict) and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="org", f="facility",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Add New Site"),
)
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "read"]))
]
db = current.db
auth = current.auth
has_permission = auth.s3_has_permission
ownership_required = auth.permission.ownership_required
s3_accessible_query = auth.s3_accessible_query
if has_permission("update", table):
action = dict(label=str(T("Edit")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "update"]),
)
if ownership_required("update", table):
# Check which records can be updated
query = s3_accessible_query("update", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
if has_permission("delete", table):
action = dict(label=str(T("Delete")),
_class="action-btn",
url=URL(c="org", f="facility",
args=["[id]", "delete"]),
)
if ownership_required("delete", table):
# Check which records can be deleted
query = s3_accessible_query("delete", table)
rows = db(query).select(table._id)
restrict = []
rappend = restrict.append
for row in rows:
row_id = row.get("id", None)
if row_id:
rappend(str(row_id))
action["restrict"] = restrict
actions.append(action)
s3.actions = actions
if isinstance(output, dict):
if "form" in output:
output["form"].add_class("org_facility")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("org_facility")
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
# @ToDo: Don't just hide but prevent building
#attr["rheader"] = None
return attr
settings.customise_org_facility_controller = customise_org_facility_controller
# -----------------------------------------------------------------------------
def customise_org_needs_fields(profile=False):
# Truncate details field(s)
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
s3db = current.s3db
table = s3db.req_organisation_needs
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
table.vol.readable = table.vol.writable = False
table.vol_details.readable = table.vol_details.writable = False
# Hide money_details unless used
s3.jquery_ready.append(
'''$('#req_organisation_needs_money_details__row').hide()
$('#req_organisation_needs_money').change(function(){
$('#req_organisation_needs_money_details__row').toggle($(this).prop('checked'))
}).change()''')
list_fields = ["id",
"organisation_id",
# @ToDo: Are these better displayed elsewhere in Profile view?
"organisation_id$logo",
"organisation_id$phone",
"organisation_id$website",
"money",
"money_details",
#"vol",
#"vol_details",
"modified_on",
"modified_by",
]
if not profile:
list_fields += ["organisation_id$name",
]
s3db.configure("req_organisation_needs",
list_fields=list_fields,
)
return
# -----------------------------------------------------------------------------
def customise_req_organisation_needs_controller(**attr):
"""
Customise req_organisation_needs controller
"""
customise_org_needs_fields()
return attr
settings.customise_req_organisation_needs_controller = customise_req_organisation_needs_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
"""
Customise org_organisation controller
- Profile Page
- Requests
"""
s3 = current.response.s3
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.interactive or r.representation == "aadata":
# Load normal Model
s3db = current.s3db
table = s3db.org_organisation
list_fields = ["id",
"name",
"logo",
"phone",
"website",
"needs.money",
"needs.money_details",
#"needs.vol",
#"needs.vol_details",
]
if r.method == "profile":
# Customise tables used by widgets
customise_hrm_human_resource_fields()
customise_org_facility_fields()
customise_org_needs_fields(profile=True)
s3db.org_customise_org_resource_fields("profile")
contacts_widget = dict(label = "Contacts",
label_create = "Create Contact",
type = "datalist",
tablename = "hrm_human_resource",
context = "organisation",
create_controller = "pr",
create_function = "person",
icon = "icon-contact",
show_on_map = False, # Since they will show within Offices
list_layout = render_contacts,
)
map_widget = dict(label = "Map",
type = "map",
context = "organisation",
icon = "icon-map",
height = 383,
width = 568,
)
needs_widget = dict(label = "Needs",
label_create = "Add New Need",
type = "datalist",
tablename = "req_organisation_needs",
multiple = False,
context = "organisation",
icon = "icon-hand-up",
show_on_map = False,
list_layout = render_org_needs,
)
reqs_widget = dict(label = "Requests",
label_create = "Add New Request",
type = "datalist",
tablename = "req_req",
context = "organisation",
filter = FS("req_status").belongs([0, 1]),
icon = "request",
layer = "Requests",
# provided by Catalogue Layer
#marker = "request",
list_layout = req_req_list_layout,
)
#resources_widget = dict(label = "Resources",
# label_create = "Create Resource",
# type = "datalist",
# tablename = "org_resource",
# context = "organisation",
# #filter = FS("req_status").belongs([0, 1]),
# icon = "wrench",
# layer = "Resources",
# # provided by Catalogue Layer
# #marker = "resource",
# list_layout = s3db.org_resource_list_layout,
# )
commits_widget = dict(label = "Donations",
#label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "organisation",
filter = FS("cancel") == False,
icon = "truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = req_commit_list_layout,
)
sites_widget = dict(label = "Sites",
label_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
context = "organisation",
filter = FS("obsolete") == False,
icon = "home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
record = r.record
record_id = record.id
if current.auth.s3_has_permission("update", table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="org", f="organisation",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["org_organisation"].title_update,
)
else:
edit_btn = ""
s3db.configure("org_organisation",
profile_title = "%s : %s" % (s3.crud_strings["org_organisation"].title_list,
record.name),
profile_header = DIV(edit_btn,
IMG(_class="media-object",
_src=URL(c="default", f="download",
args=[record.logo]),
),
H2(record.name),
_class="profile-header",
),
profile_widgets = [reqs_widget,
map_widget,
# @ToDo: Move to profile_header
#needs_widget,
#resources_widget,
commits_widget,
needs_widget,
contacts_widget,
sites_widget,
]
)
elif r.method == "datalist":
# Stakeholder selection page
# 2-column datalist, 6 rows per page
s3.dl_pagelength = 12
s3.dl_rowsize = 2
from s3.s3filter import S3TextFilter, S3OptionsFilter
filter_widgets = [
# no other filter widgets here yet?
]
# Needs page
# Truncate details field(s)
from s3.s3utils import s3_trunk8
s3_trunk8(lines=2)
get_vars = current.request.get_vars
money = get_vars.get("needs.money", None)
#vol = get_vars.get("needs.vol", None)
if money:
needs_fields = ["needs.money_details"]
s3.crud_strings["org_organisation"].title_list = T("Organizations soliciting Money")
#elif vol:
# needs_fields = ["needs.vol_details"]
# s3.crud_strings["org_organisation"].title_list = T("Organizations with remote Volunteer opportunities")
else:
yesno = {True: T("Yes"), False: T("No")}
needs_fields = ["needs.money_details", "needs.vol_details"]
filter_widgets.insert(0, S3OptionsFilter("needs.money",
options = yesno,
multiple = False,
cols = 2,
hidden = True,
))
#filter_widgets.insert(1, S3OptionsFilter("needs.vol",
# options = yesno,
# multiple = False,
# cols = 2,
# hidden = True,
# ))
filter_widgets.insert(0, S3TextFilter(["name",
"acronym",
"website",
"comments",
] + needs_fields,
label = T("Search")))
ntable = s3db.req_organisation_needs
s3db.configure("org_organisation",
filter_widgets = filter_widgets
)
# Represent used in rendering
current.auth.settings.table_user.organisation_id.represent = s3db.org_organisation_represent
# Hide fields
field = s3db.org_organisation_organisation_type.organisation_type_id
field.readable = field.writable = False
table.region_id.readable = table.region_id.writable = False
table.country.readable = table.country.writable = False
table.year.readable = table.year.writable = False
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="org", f="organisation", args="datalist")
s3db.configure("org_organisation",
create_next = url_next,
delete_next = url_next,
update_next = url_next,
# We want the Create form to be in a modal, not inline, for consistency
listadd = False,
list_fields = list_fields,
list_layout = render_organisations,
)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
if r.interactive and \
isinstance(output, dict) and \
current.auth.s3_has_permission("create", r.table):
# Insert a Button to Create New in Modal
output["showadd_btn"] = A(I(_class="icon icon-plus-sign big-add"),
_href=URL(c="org", f="organisation",
args=["create.popup"],
vars={"refresh": "datalist"}),
_class="btn btn-primary s3_modal",
_role="button",
_title=T("Create Organization"),
)
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
return output
s3.postp = custom_postp
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -----------------------------------------------------------------------------
def customise_site_needs_fields(profile=False):
s3db = current.s3db
table = s3db.req_site_needs
table.modified_by.represent = s3_auth_user_represent_name
table.modified_on.represent = datetime_represent
list_fields = ["id",
"organisation_id$id",
# @ToDo: Are these better displayed elsewhere in Profile view?
"organisation_id$name",
"organisation_id$logo",
"organisation_id$website",
"location_id$L1",
"location_id$L2",
"location_id$L3",
"location_id$L4",
"location_id$addr_street",
"phone1",
#"goods",
#"goods_details",
#"vol",
#"vol_details",
"modified_on",
"modified_by",
]
if not profile:
list_fields += ["site_id$name"]
s3db.configure("req_site_needs",
list_fields=list_fields,
)
return
s3.customise_site_needs_fields = customise_site_needs_fields
# -----------------------------------------------------------------------------
def customise_pr_person_controller(**attr):
s3db = current.s3db
request = current.request
s3 = current.response.s3
tablename = "pr_person"
table = s3db.pr_person
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method == "validate":
# Can't validate image without the file
image_field = s3db.pr_image.image
image_field.requires = None
if r.interactive or r.representation == "aadata":
if request.controller != "default":
# CRUD Strings
ADD_CONTACT = T("Create Contact")
s3.crud_strings[tablename] = Storage(
label_create = T("Create Contact"),
title_display = T("Contact Details"),
title_list = T("Contact Directory"),
title_update = T("Edit Contact Details"),
label_list_button = T("List Contacts"),
label_delete_button = T("Delete Contact"),
msg_record_created = T("Contact added"),
msg_record_modified = T("Contact details updated"),
msg_record_deleted = T("Contact deleted"),
msg_list_empty = T("No Contacts currently registered"))
MOBILE = settings.get_ui_label_mobile_phone()
EMAIL = T("Email")
htable = s3db.hrm_human_resource
htable.organisation_id.widget = None
site_field = htable.site_id
represent = S3Represent(lookup="org_site")
site_field.represent = represent
site_field.requires = IS_ONE_OF(current.db, "org_site.site_id",
represent,
orderby = "org_site.name")
from s3layouts import S3PopupLink
site_field.comment = S3PopupLink(c = "org",
f = "facility",
vars = {"child": "site_id"},
label = T("Add New Site"),
title = T("Site"),
tooltip = T("If you don't see the Site in the list, you can add a new one by clicking link 'Add New Site'."),
)
# ImageCrop widget doesn't currently work within an Inline Form
s3db.pr_image.image.widget = None
hr_fields = ["organisation_id",
"job_title_id",
"site_id",
"site_contact",
]
if r.method in ("create", "update"):
# Context from a Profile page?"
organisation_id = request.get_vars.get("(organisation)", None)
if organisation_id:
field = s3db.hrm_human_resource.organisation_id
field.default = organisation_id
field.readable = field.writable = False
hr_fields.remove("organisation_id")
s3_sql_custom_fields = [
"first_name",
#"middle_name",
"last_name",
S3SQLInlineComponent(
"human_resource",
name = "human_resource",
label = "",
multiple = False,
fields = hr_fields,
),
S3SQLInlineComponent(
"image",
name = "image",
label = T("Photo"),
multiple = False,
fields = [("", "image")],
filterby = dict(field = "profile",
options = [True]
)
),
]
list_fields = [(current.messages.ORGANISATION, "human_resource.organisation_id"),
"first_name",
#"middle_name",
"last_name",
(T("Job Title"), "human_resource.job_title_id"),
(T("Site"), "human_resource.site_id"),
(T("Site Contact"), "human_resource.site_contact"),
]
# Don't include Email/Phone for unauthenticated users
if current.auth.is_logged_in():
list_fields += [(MOBILE, "phone.value"),
(EMAIL, "email.value"),
]
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "phone",
label = MOBILE,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "SMS")),
)
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "email",
label = EMAIL,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL")),
)
crud_form = S3SQLCustomForm(*s3_sql_custom_fields)
if r.id and request.controller == "default":
url_next = URL(c="default", f="person", args=[r.id, "read"])
else:
# Return to List view after create/update/delete (unless done via Modal)
url_next = URL(c="pr", f="person")
s3db.configure(tablename,
create_next = url_next,
crud_form = crud_form,
delete_next = url_next,
list_fields = list_fields,
# Don't include a Create form in 'More' popups
listadd = False if r.method=="datalist" else True,
list_layout = render_contacts,
update_next = url_next,
)
# Move fields to their desired Locations
# Disabled as breaks submission of inline_component
#i18n = []
#iappend = i18n.append
#iappend('''i18n.office="%s"''' % T("Office"))
#iappend('''i18n.organisation="%s"''' % T("Organization"))
#iappend('''i18n.job_title="%s"''' % T("Job Title"))
#i18n = '''\n'''.join(i18n)
#s3.js_global.append(i18n)
#s3.scripts.append('/%s/static/themes/DRMP/js/contacts.js' % request.application)
return True
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
output["rheader"] = ""
actions = [dict(label=str(T("Open")),
_class="action-btn",
url=URL(c="pr", f="person",
args=["[id]", "read"]))
]
# All users just get "Open"
#db = current.db
#auth = current.auth
#has_permission = auth.s3_has_permission
#ownership_required = auth.permission.ownership_required
#s3_accessible_query = auth.s3_accessible_query
#if has_permission("update", table):
# action = dict(label=str(T("Edit")),
# _class="action-btn",
# url=URL(c="pr", f="person",
# args=["[id]", "update"]),
# )
# if ownership_required("update", table):
# # Check which records can be updated
# query = s3_accessible_query("update", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
#if has_permission("delete", table):
# action = dict(label=str(T("Delete")),
# _class="action-btn",
# url=URL(c="pr", f="person",
# args=["[id]", "delete"]),
# )
# if ownership_required("delete", table):
# # Check which records can be deleted
# query = s3_accessible_query("delete", table)
# rows = db(query).select(table._id)
# restrict = []
# rappend = restrict.append
# for row in rows:
# row_id = row.get("id", None)
# if row_id:
# rappend(str(row_id))
# action["restrict"] = restrict
# actions.append(action)
s3.actions = actions
if "form" in output:
output["form"].add_class("pr_person")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("pr_person")
return output
s3.postp = custom_postp
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
def customise_doc_document_controller(**attr):
s3 = current.response.s3
s3db = current.s3db
tablename = "doc_document"
table = s3db.doc_document
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
# Filter Out Docs from Newsfeed
r.resource.add_filter(table.name != None)
if r.interactive:
s3.crud_strings[tablename] = Storage(
label_create = T("Add Document"),
title_display = T("Document"),
title_list = T("Documents"),
title_update = T("Edit Document"),
label_list_button = T("List New Documents"),
label_delete_button = T("Remove Documents"),
msg_record_created = T("Documents added"),
msg_record_modified = T("Documents updated"),
msg_record_deleted = T("Documents removed"),
msg_list_empty = T("No Documents currently recorded"))
# Force added docs to have a name
table.name.requires = IS_NOT_EMPTY()
list_fields = ["name",
"file",
"url",
"organisation_id",
"comments",
]
crud_form = S3SQLCustomForm(*list_fields)
s3db.configure(tablename,
list_fields = list_fields,
crud_form = crud_form,
)
return True
s3.prep = custom_prep
return attr
settings.customise_doc_document_controller = customise_doc_document_controller
# -----------------------------------------------------------------------------
settings.req.req_type = ["Other"]
settings.req.requester_label = "Contact"
# Uncomment if the User Account logging the Request is NOT normally the Requester
settings.req.requester_is_author = False
# Uncomment to have Donations include a 'Value' field
settings.req.commit_value = True
# Uncomment if the User Account logging the Commitment is NOT normally the Committer
#settings.req.comittter_is_author = False
# Uncomment to allow Donations to be made without a matching Request
#settings.req.commit_without_request = True
# Set the Requester as being an HR for the Site if no HR record yet & as Site contact if none yet exists
settings.req.requester_to_site = True
def customise_req_req_controller(**attr):
s3 = current.response.s3
# Custom PreP
#standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
#if callable(standard_prep):
# result = standard_prep(r)
s3db = current.s3db
if r.component_name == "commit":
req_customise_commit_fields()
else:
req_customise_req_fields()
if r.method in ("datalist", "datalist.dl"):
r.resource.add_filter(r.table.req_status.belongs([0, 1]))
elif r.method == "profile":
# Customise tables used by widgets
req_customise_commit_fields()
customise_org_facility_fields()
record = r.record
record_id = record.id
commits_widget = dict(label = "Donations",
label_create = "Add New Donation",
type = "datalist",
tablename = "req_commit",
context = "request",
default = "req_id=%s" % record_id,
filter = FS("cancel") == False,
icon = "truck",
show_on_map = False,
#layer = "Donations",
# provided by Catalogue Layer
#marker = "donation",
list_layout = req_commit_list_layout,
)
filter = (FS("obsolete") == False)
sites_widget = dict(label = "Sites",
#label_create = "Add New Site",
type = "datalist",
tablename = "org_facility",
multiple = False,
context = "request",
filter = filter,
icon = "home",
layer = "Facilities",
# provided by Catalogue Layer
#marker = "office",
list_layout = render_sites,
)
if current.auth.s3_has_permission("update", r.table, record_id=record_id):
edit_btn = A(I(_class = "icon icon-edit"),
_href=URL(c="req", f="req",
args=[record_id, "update.popup"],
vars={"refresh": "datalist"}),
_class="s3_modal",
_title=s3.crud_strings["req_req"].title_update,
)
else:
edit_btn = ""
db = current.db
stable = db.org_site
query = (stable.site_id == record.site_id)
site = db(query).select(stable.name,
stable.location_id,
stable.organisation_id,
limitby=(0, 1)
).first()
location = s3db.gis_LocationRepresent(sep=" | ")(site.location_id)
otable = db.org_organisation
org = db(otable.id == site.organisation_id).select(otable.name,
otable.logo,
limitby=(0, 1)
).first()
if org and org.logo:
logo = URL(c="default", f="download", args=[org.logo])
else:
# @ToDo: Placeholder
logo = "#"
s3db.configure("req_req",
profile_title = s3.crud_strings["req_req"].title_list,
profile_header = DIV(edit_btn,
A(IMG(_class="media-object",
_src=logo,
),
_class="pull-left",
#_href=org_url,
),
H2(site.name),
P(I(_class="icon-sitemap"),
" ",
SPAN(org and org.name or current.messages.NONE),
" ",
_class="card_1_line",
),
P(I(_class="icon-globe"),
" ",
SPAN(location),
" ",
_class="card_1_line",
),
P(record.purpose,
_class="s3-truncate"),
_class="profile-header",
),
profile_widgets = [commits_widget,
sites_widget,
],
)
return True
s3.prep = custom_prep
# Disable postp
s3.postp = None
return attr
settings.customise_req_req_controller = customise_req_req_controller
# -----------------------------------------------------------------------------
def customise_req_commit_controller(**attr):
s3 = current.response.s3
# Custom PreP
#standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
#if callable(standard_prep):
# result = standard_prep(r)
req_customise_commit_fields()
if r.method in ("datalist", "datalist.dl"):
r.resource.add_filter(r.table.cancel != True)
return True
s3.prep = custom_prep
# Disable postp
s3.postp = None
return attr
settings.customise_req_commit_controller = customise_req_commit_controller
# =============================================================================
# Modules
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = "Ticket Viewer",
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("translate", Storage(
name_nice = "Translation Functionality",
#description = "Selective translation of strings based on module.",
module_type = None,
)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = "Persons",
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = None
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = None
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = "Contacts",
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("cms", Storage(
name_nice = "Content Management",
restricted = True,
module_type = None,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("msg", Storage(
name_nice = "Messaging",
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
#("event", Storage(
# name_nice = "Disasters",
# #description = "Events",
# restricted = True,
# module_type = None
#)),
("req", Storage(
name_nice = "Requests",
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = None,
)),
#("project", Storage(
# name_nice = "Projects",
# restricted = True,
# module_type = None
#)),
("stats", Storage(
name_nice = "Statistics",
restricted = True,
module_type = None
)),
#("vulnerability", Storage(
# name_nice = "Vulnerability",
# restricted = True,
# module_type = None
#)),
#("transport", Storage(
# name_nice = "Transport",
# restricted = True,
# module_type = None
#)),
#("hms", Storage(
# name_nice = "Hospitals",
# restricted = True,
# module_type = None
#)),
#("cr", Storage(
# name_nice = "Shelters",
# restricted = True,
# module_type = None
#)),
("supply", Storage(
name_nice = "Supply Chain Management",
restricted = True,
module_type = None
)),
])
# =============================================================================
def req_customise_req_fields():
"""
Customize req_req fields for the Home page & dataList view
- this assumes Simple Requests (i.e. type 'Other')
"""
from s3layouts import S3PopupLink
from s3 import s3_trunk8, \
S3AddPersonWidget, \
S3DateFilter, \
S3LocationFilter, \
S3OptionsFilter, \
S3TextFilter
# Truncate purpose field
s3_trunk8(lines=2)
T = current.T
db = current.db
s3db = current.s3db
tablename = "req_req"
table = s3db.req_req
crud_fields = ["date",
#"priority",
"site_id",
#"is_template",
"requester_id",
"purpose",
]
request = current.request
args = request.args
if "update.popup" in args or \
"update" in args:
field = table.req_status
field.writable = True
field.requires = IS_IN_SET({REQ_STATUS_NONE: T("Open"),
REQ_STATUS_PARTIAL: T("Responded"),
REQ_STATUS_COMPLETE: T("Resolved"),
REQ_STATUS_CANCEL: T("Canceled"),
})
crud_fields.append("req_status")
crud_form = S3SQLCustomForm(*crud_fields)
list_fields = crud_fields + ["site_id$location_id",
"site_id$location_id$level",
"site_id$location_id$parent",
"site_id$organisation_id",
"site_id$comments",
]
table.type.default = 9 # Other
field = table.purpose
field.label = T("Request")
field.requires = IS_NOT_EMPTY(error_message=T("Please enter details of the Request"))
field.represent = lambda body: XML(s3_URLise(body))
field = table.date
field.label = T("Date")
# Make mandatory
requires = field.requires
field.requires = requires.other
field = table.site_id
site_id = request.get_vars.get("~.(site)", None)
if site_id:
field.default = site_id
field.readable = field.writable = False
# Lookup Site Contact
script = '''$.when(S3.addPersonWidgetReady('req_req_requester_id')).then(
function(input){input.addPerson('lookupContact', %s)})''' % site_id
current.response.s3.jquery_ready.append(script)
else:
# If the Requester is blank, then lookup default Site Contact
script = '''$('#req_req_site_id').change(function(){
var siteID=$(this).val()
if(siteID){
var fieldName='req_req_requester_id',input=$('#'+fieldName)
if(input.val()=='{}'&&!$('#'+fieldName+'_full_name').val()){
input.addPerson('lookupContact',siteID)}}})'''
current.response.s3.jquery_ready.append(script)
organisation_id = request.get_vars.get("~.(organisation)", None)
if organisation_id:
# Restrict to Sites belonging to this Org
# @ToDo: Handle Branches
filterby = "organisation_id"
filter_opts = (organisation_id,)
# No need to use Site Autocomplete in this case
field.widget = None
else:
filterby = None
filter_opts = None
field.label = T("Requested for Site")
#site_represent = s3db.org_SiteRepresent(show_link=False,
# show_type=False)
site_represent = S3Represent(lookup="org_site")
field.represent = site_represent
field.requires = IS_ONE_OF(db, "org_site.site_id",
site_represent,
filterby = filterby,
filter_opts = filter_opts,
not_filterby = "obsolete",
not_filter_opts = (True,),
orderby = "org_site.name",
sort = True,
)
field.comment = S3PopupLink(c = "org",
f = "facility",
vars = {"child": "site_id",
"parent": "req",
},
title = T("Add New Site"),
)
db.org_site.location_id.represent = s3db.gis_LocationRepresent(sep=" | ")
field = table.requester_id
field.widget = S3AddPersonWidget(controller="pr")
#from s3db.req import req_status_opts
filter_widgets = [
S3TextFilter(["requester_id$first_name",
"requester_id$middle_name",
"requester_id$last_name",
"site_id$name",
"purpose",
#"comments",
],
label = T("Search"),
comment=T("Search for a request by Site name, Requester name or free text."),
),
#S3OptionsFilter("transit_status",
# label = T("Transit Status"),
# options = req_status_opts,
# cols = 3,
# ),
#S3OptionsFilter("fulfil_status",
# label = T("Fulfill Status"),
# options = req_status_opts,
# cols = 3,
# ),
S3LocationFilter("site_id$location_id",
#hidden = True,
),
S3OptionsFilter("site_id",
label = T("Requested For Site"),
hidden = True,
),
S3DateFilter("date",
label = T("Date"),
hide_time = True,
input_labels = {"ge": "From", "le": "To"},
comment = T("Search for requests made between these dates."),
hidden = True,
),
#S3DateFilter("date_required",
# label = T("Date Needed By"),
# hide_time = True,
# input_labels = {"ge": "From", "le": "To"},
# comment = T("Search for requests required between these dates."),
# hidden = True,
# ),
]
# @ToDo: deployment_setting
if current.auth.s3_has_role("EDITOR"):
filter_widgets.insert(-1, S3OptionsFilter("created_by",
label = T("Logged By"),
hidden = True,
))
# Return to Requests view after create/update/delete (unless done via Modal)
url_next = URL(c="req", f="req", args="datalist")
s3db.configure(tablename,
create_next = url_next,
crud_form = crud_form,
delete_next = url_next,
filter_formstyle = filter_formstyle,
filter_widgets = filter_widgets,
# We want the Create form to be in a modal, not inline, for consistency
listadd = False,
list_fields = list_fields,
list_layout = req_req_list_layout,
update_next = url_next,
)
return table
# =============================================================================
def req_customise_commit_fields():
"""
Customize req_commit fields for the Home page & dataList view
"""
from s3layouts import S3PopupLink
from s3 import s3_trunk8, \
S3AddPersonWidget, \
S3DateFilter, \
S3LocationFilter, \
S3TextFilter
# Truncate comments field
s3_trunk8(lines=2)
T = current.T
s3db = current.s3db
settings = current.deployment_settings
tablename = "req_commit"
table = s3db.req_commit
list_fields = [#"req_id", # populated automatically or not at all?
"organisation_id",
"committer_id",
"comments",
"date_available",
# We'd like to be able to map donations, but harder for users to enter data
#"location_id",
]
if settings.get_req_commit_value():
list_fields += ["value",
"currency",
]
request = current.request
args = request.args
if "create.popup" in args or \
"create" in args:
req_id = request.get_vars.get("req_id", None)
if req_id:
table.req_id.default = req_id
elif not settings.get_req_commit_without_request():
current.session.error = T("Not allowed to Donate without matching to a Request!")
redirect(URL(c="req", f="req", args=["datalist"]))
elif "update.popup" in args or \
"update" in args:
list_fields.append("cancel")
# CRUD strings
#ADD_COMMIT = T("Make Donation")
ADD_COMMIT = T("Add Donation")
current.response.s3.crud_strings[tablename] = Storage(
label_create = ADD_COMMIT,
title_display = T("Donation Details"),
title_list = T("Donations"),
title_update = T("Edit Donation"),
label_list_button = T("List Donations"),
label_delete_button = T("Delete Donation"),
msg_record_created = T("Donation Added"),
msg_record_modified = T("Donation Updated"),
msg_record_deleted = T("Donation Canceled"),
msg_list_empty = T("No Donations"))
auth = current.auth
# @ToDo: deployment_setting
if auth.s3_has_role("EDITOR"):
editor = True
else:
editor = False
field = table.committer_id
if editor:
field.widget = S3AddPersonWidget(controller="pr")
field.default = None
else:
field.writable = False
#field = table.location_id
#field.represent = s3db.gis_LocationRepresent(sep=" | ")
# Required
#field.requires = IS_LOCATION()
field = table.comments
field.label = T("Donation")
field.represent = lambda body: XML(s3_URLise(body))
field.required = True
# @ToDo
field.comment = None
table.date_available.default = current.request.utcnow
field = table.organisation_id
field.readable = True
field.comment = S3PopupLink(c = "org",
f = "organisation_id",
title = T("Create Organization"),
)
if settings.get_org_autocomplete():
# Enable if there are many Orgs
field.widget = S3OrganisationAutocompleteWidget()
if editor:
# Editor can select Org
field.writable = True
crud_form = S3SQLCustomForm(*list_fields)
elif auth.user and auth.user.organisation_id:
field.default = auth.user.organisation_id
field.writable = False
crud_form = S3SQLCustomForm(*list_fields)
else:
# Only a User representing an Org can commit for an Org
field.default = None
field.writable = False
crud_fields = [f for f in list_fields if f != "organisation_id"]
crud_form = S3SQLCustomForm(*crud_fields)
filter_widgets = [
S3TextFilter(["committer_id$first_name",
"committer_id$middle_name",
"committer_id$last_name",
"site_id$name",
"comments",
"req_id$name",
"organisation_id$name"
],
label = T("Search"),
comment=T("Search for a commitment by Committer name, Request ID, Site or Organization."),
),
S3LocationFilter("location_id",
hidden = True,
),
#S3DateFilter("date",
# label = T("Date"),
# hide_time = True,
# input_labels = {"ge": "From", "le": "To"},
# comment = T("Search for commitments made between these dates."),
# hidden = True,
# ),
S3DateFilter("date_available",
label = T("Date Available"),
hide_time = True,
input_labels = {"ge": "From", "le": "To"},
comment = T("Search for commitments available between these dates."),
hidden = True,
),
]
# Return to Requests view after create/update/delete (unless done via Modal)
url_next = URL(c="req", f="req", args="datalist")
s3db.configure(tablename,
create_next = url_next,
crud_form = crud_form,
delete_next = url_next,
filter_formstyle = filter_formstyle,
filter_widgets = filter_widgets,
# We want the Create form to be in a modal, not inline, for consistency
listadd = False,
list_fields = list_fields,
list_layout = req_commit_list_layout,
update_next = url_next,
)
return table
# =============================================================================
def filter_formstyle(row_id, label, widget, comment, hidden=False):
"""
Custom Formstyle for FilterForm
@param row_id: HTML id for the row
@param label: the label
@param widget: the form widget
@param comment: the comment
@param hidden: whether the row should initially be hidden or not
"""
if hidden:
_class = "advanced hide"
else:
_class= ""
if not label:
label = ""
if comment:
if current.response.s3.rtl:
dir = "fleft"
else:
dir = "fright"
comment = DIV(_class = "tooltip %s" % dir,
_title = "%s|%s" % (label[0][:-1], comment),
)
else:
comment = ""
return DIV(label,
widget,
comment,
_id=row_id,
_class=_class,
)
# =============================================================================
def req_req_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Requests on the Home page & dataList view
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["req_req.id"]
item_class = "thumbnail"
raw = record._row
date = record["req_req.date"]
body = record["req_req.purpose"]
location = record["org_site.location_id"] or ""
level = raw["gis_location.level"]
if level:
location_id = raw["org_site.location_id"]
else:
location_id = raw["gis_location.parent"]
if location_id:
location_url = URL(c="gis", f="location", args=[location_id, "profile"])
else:
location_url = "#"
organisation = record["org_site.organisation_id"] or ""
organisation_id = raw["org_site.organisation_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
person = record["req_req.requester_id"]
person_id = raw["req_req.requester_id"]
person_url = URL(c="pr", f="person", args=[person_id])
person = A(person,
_href=person_url,
)
# Avatar
# Try Organisation Logo
db = current.db
otable = db.org_organisation
row = db(otable.id == organisation_id).select(otable.logo,
limitby=(0, 1)
).first()
if row and row.logo:
logo = URL(c="default", f="download", args=[row.logo])
avatar = IMG(_src=logo,
_height=50,
_width=50,
_style="padding-right:5px;",
_class="media-object")
avatar = A(avatar,
_href=org_url,
_class="pull-left",
)
else:
# Personal Avatar
avatar = s3_avatar_represent(person_id,
tablename="pr_person",
_class="media-object")
avatar = A(avatar,
_href=person_url,
_class="pull-left",
)
# Edit Bar
T = current.T
auth = current.auth
permit = auth.s3_has_permission
table = db.req_req
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="req", f="req",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=T("Edit Request"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"), _class="dl-item-delete")
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
s3db = current.s3db
site = record["req_req.site_id"]
site_id = raw["req_req.site_id"]
table = s3db.org_facility
facility = db(table.site_id == site_id).select(table.id,
limitby=(0, 1)
).first()
if facility:
site_url = URL(c="org", f="facility",
args=[facility.id, "profile"])
opts = dict(_href=site_url)
site_comments = raw["org_site.comments"] or ""
if site_comments:
opts["_class"] = "s3-popover"
opts["_data-toggle"] = "popover"
opts["_data-content"] = site_comments
site_link = A(site, **opts)
card_title = TAG[""](ICON("request"),
SPAN(site_link, _class="card-title"),
)
else:
card_title = TAG[""](ICON("reqiest"),
SPAN(" ", _class="card-title"),
)
#if priority == 3:
# # Apply additional highlighting for High Priority
# item_class = "%s disaster" % item_class
# Tallies
# NB We assume that all records are readable here
table = s3db.req_commit
query = (table.deleted == False) & \
(table.req_id == record_id)
tally_commits = db(query).count()
#if permit("create", table):
if auth.is_logged_in():
_class="s3_modal btn"
commit_url = URL(c="req", f="commit",
args=["create.popup"],
vars={"req_id": record_id,
"refresh": list_id,
"record": record_id,
},
)
else:
_class="btn"
next = "/%s/req/commit/create?req_id=%s" % (current.request.application,
record_id)
commit_url = URL(c="default", f="user",
args="login",
vars={"_next": next,
},
)
commit_btn = A(ICON("commit"),
" ",
T("DONATE"),
_href=commit_url,
_class=_class,
_title=T("Donate to this Request"),
)
# Render the item
item = DIV(DIV(card_title,
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(SPAN(body,
_class="s3-truncate"),
DIV(person,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
_class="card-person",
),
_class="media pull-left",
),
DIV(P(A(T("Donations"),
_href=URL(c="req", f="req",
args=[record_id, "profile"],
),
),
SPAN(tally_commits,
_class="badge",
),
_class="tally",
),
commit_btn,
_class="media pull-right",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
def req_commit_list_layout(list_id, item_id, resource, rfields, record):
"""
Default dataList item renderer for Commits on the Home page & dataList view
@param list_id: the HTML ID of the list
@param item_id: the HTML ID of the item
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
"""
record_id = record["req_commit.id"]
item_class = "thumbnail"
raw = record._row
date = record["req_commit.date_available"]
body = record["req_commit.comments"]
title = ""
#location = record["req_commit.location_id"]
#location_id = raw["req_commit.location_id"]
#location_url = URL(c="gis", f="location", args=[location_id, "profile"])
person = record["req_commit.committer_id"]
person_id = raw["req_commit.committer_id"]
person_url = URL(c="pr", f="person", args=[person_id])
person = A(person,
_href=person_url,
)
organisation_id = raw["req_commit.organisation_id"]
if organisation_id:
organisation = record["req_commit.organisation_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id, "profile"])
organisation = A(organisation,
_href=org_url,
_class="card-organisation",
)
organisation = TAG[""](" - ",
organisation)
# Use Organisation Logo
# @ToDo: option for Personal Avatar (fallback if no Org Logo?)
db = current.db
otable = db.org_organisation
row = db(otable.id == organisation_id).select(otable.logo,
limitby=(0, 1)
).first()
if row and row.logo:
logo = URL(c="default", f="download", args=[row.logo])
else:
logo = URL(c="static", f="img", args="blank-user.gif")
avatar = IMG(_src=logo,
_height=50,
_width=50,
_style="padding-right:5px;",
_class="media-object")
avatar = A(avatar, _href=org_url, _class="pull-left")
else:
organisation = ""
# Personal Avatar
avatar = s3_avatar_represent(person_id,
tablename="pr_person",
_class="media-object")
avatar = A(avatar, _href=person_url, _class="pull-left")
# Edit Bar
permit = current.auth.s3_has_permission
table = current.s3db.req_commit
if permit("update", table, record_id=record_id):
edit_btn = A(ICON("edit"),
_href=URL(c="req", f="commit",
args=[record_id, "update.popup"],
vars={"refresh": list_id,
"record": record_id}),
_class="s3_modal",
_title=current.T("Edit Donation"),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(ICON("delete"), _class="dl-item-delete")
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
card_label = TAG[""](ICON("offer"),
SPAN(" %s" % title, _class="card-title"),
)
# Render the item
item = DIV(DIV(card_label,
#SPAN(A(location,
# _href=location_url,
# ),
# _class="location-title",
# ),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(SPAN(body,
_class="s3-truncate"),
DIV(person,
organisation,
_class="card-person",
),
_class="media",
),
_class="media-body",
),
_class="media",
),
#docs,
_class=item_class,
_id=item_id,
)
return item
# END =========================================================================
|
flavour/eden
|
modules/templates/historic/Philippines/config.py
|
Python
|
mit
| 166,125
|
[
"Amber"
] |
bcd891357a99ab977467bde428486a1968079e61c6223e0f75994557c3022635
|
import pdb
import six
import numpy as np
from astropy.io import fits
import astropy.units as u
from utils import bin_ndarray as rebin
from utils import gauss_kern
from utils import clean_nans
from utils import clean_args
from utils import map_rms
from astropy import cosmology
from astropy.cosmology import Planck15 as cosmo
from astropy.cosmology import Planck15, z_at_value
class Skymaps:
def __init__(self,file_map,file_noise,psf,color_correction=1.0,beam_area=1.0,wavelength=None,fwhm=None):
''' This Class creates Objects for a set of
maps/noisemaps/beams/TransferFunctions/etc.,
at each Wavelength.
This is a work in progress!
Issues: If the beam has a different pixel size from the map,
it is not yet able to re-scale it.
Just haven't found a convincing way to make it work.
Future Work:
Will shift some of the work into functions (e.g., read psf,
color_correction) and increase flexibility.
'''
#READ MAPS
if file_map == file_noise:
#SPIRE Maps have Noise maps in the second extension.
cmap, hd = fits.getdata(file_map, 1, header = True)
cnoise, nhd = fits.getdata(file_map, 2, header = True)
else:
#This assumes that if Signal and Noise are different maps, they are contained in first extension
cmap, hd = fits.getdata(file_map, 0, header = True)
cnoise, nhd = fits.getdata(file_noise, 0, header = True)
#GET MAP PIXEL SIZE
if 'CD2_2' in hd:
pix = hd['CD2_2'] * 3600.
else:
pix = hd['CDELT2'] * 3600.
#READ BEAMS
#Check first if beam is a filename (actual beam) or a number (approximate with Gaussian)
if isinstance(psf, six.string_types):
beam, phd = fits.getdata(psf, 0, header = True)
#GET PSF PIXEL SIZE
if 'CD2_2' in phd:
pix_beam = phd['CD2_2'] * 3600.
elif 'CDELT2' in phd:
pix_beam = phd['CDELT2'] * 3600.
else: pix_beam = pix
#SCALE PSF IF NECESSARY
if np.round(10.*pix_beam) != np.round(10.*pix):
raise ValueError("Beam and Map have different size pixels")
scale_beam = pix_beam / pix
pms = np.shape(beam)
new_shape=(np.round(pms[0]*scale_beam),np.round(pms[1]*scale_beam))
#pdb.set_trace()
kern = rebin(clean_nans(beam),new_shape=new_shape,operation='ave')
#kern = rebin(clean_nans(beam),new_shape[0],new_shape[1])
else:
kern = clean_nans(beam)
self.psf_pixel_size = pix_beam
else:
sig = psf / 2.355 / pix
#pdb.set_trace()
#kern = gauss_kern(psf, np.floor(psf * 8.), pix)
kern = gauss_kern(psf, np.floor(psf * 8.)/pix, pix)
self.map = clean_nans(cmap) * color_correction
self.noise = clean_nans(cnoise,replacement_char=1e10) * color_correction
if beam_area != 1.0:
self.beam_area_correction(beam_area)
self.header = hd
self.pixel_size = pix
self.psf = clean_nans(kern)
self.rms = map_rms(self.map.copy(), silent=True)
if wavelength != None:
add_wavelength(wavelength)
if fwhm != None:
add_fwhm(fwhm)
def beam_area_correction(self,beam_area):
self.map *= beam_area * 1e6
self.noise *= beam_area * 1e6
def add_wavelength(self,wavelength):
self.wavelength = wavelength
def add_fwhm(self,fwhm):
self.fwhm = fwhm
def add_weights(self,file_weights):
weights, whd = fits.getdata(file_weights, 0, header = True)
#pdb.set_trace()
self.noise = clean_nans(1./weights,replacement_char=1e10)
|
marcoviero/Utils
|
skymaps.py
|
Python
|
mit
| 3,302
|
[
"Gaussian"
] |
39f71ec235de21fe0d8cb2d74a0c453f58b081edb7dc487f0c78db10d67ba45e
|
# """
# :mod: DataLoggingHandler
#
# .. module: DataLoggingHandler
#
# :synopsis: DataLoggingHandler is the implementation of the Data Logging service in the DISET framework.
#
# The following methods are available in the Service interface:
#
# - addFileRecord()
# - addFileRecords()
# - getFileLoggingInfo()
#
# """
#
# __RCSID__ = "$Id$"
#
# ## imports
# from types import StringType, ListType, TupleType
# ## from DIRAC
# from DIRAC import S_OK
# from DIRAC.Core.DISET.RequestHandler import RequestHandler
# from DIRAC.DataManagementSystem.DB.DataLoggingDB import DataLoggingDB
#
# ## global instance of the DataLoggingDB
# gDataLoggingDB = False
#
# def initializeDataLoggingHandler( serviceInfo ):
# """ handler initialisation """
# global gDataLoggingDB
# gDataLoggingDB = DataLoggingDB()
#
# res = gDataLoggingDB._connect()
# if not res['OK']:
# return res
# res = gDataLoggingDB._checkTable()
# if not res['OK'] and not res['Message'] == 'The requested table already exist':
# return res
# return S_OK()
#
# class DataLoggingHandler( RequestHandler ):
# """
# .. class:: DataLoggingClient
#
# Request handler for DataLogging service.
# """
#
# types_addFileRecord = [ [StringType, ListType], StringType, StringType, StringType, StringType ]
# @staticmethod
# def export_addFileRecord( lfn, status, minor, date, source ):
# """ Add a logging record for the given file
#
# :param self: self reference
# :param mixed lfn: list of strings or a string with LFN
# :param str status: file status
# :param str minor: minor status (additional information)
# :param mixed date: datetime.datetime or str(datetime.datetime) or ""
# :param str source: source setting a new status
# """
# if type( lfn ) == StringType:
# lfns = [ lfn ]
# else:
# lfns = lfn
# return gDataLoggingDB.addFileRecord( lfns, status, minor, date, source )
#
# types_addFileRecords = [ [ ListType, TupleType ] ]
# @staticmethod
# def export_addFileRecords( fileTuples ):
# """ Add a group of logging records
# """
# return gDataLoggingDB.addFileRecords( fileTuples )
#
# types_getFileLoggingInfo = [ StringType ]
# @staticmethod
# def export_getFileLoggingInfo( lfn ):
# """ Get the file logging information
# """
# return gDataLoggingDB.getFileLoggingInfo( lfn )
#
# types_getUniqueStates = []
# @staticmethod
# def export_getUniqueStates():
# """ Get all the unique states
# """
# return gDataLoggingDB.getUniqueStates()
#
|
Sbalbp/DIRAC
|
DataManagementSystem/Service/DataLoggingHandler.py
|
Python
|
gpl-3.0
| 2,542
|
[
"DIRAC"
] |
9b2f6019f3e5ee9e1182975c0af7aeb33a593898dc372713f51569ab92eb478e
|
import math
import ast
import inspect
import re
import os.path
import sys
import subprocess
from org.nmrfx.processor.math.units import Fraction
from org.nmrfx.processor.math.units import Frequency
from org.nmrfx.processor.math.units import Index
from org.nmrfx.processor.math.units import PPM
from org.nmrfx.processor.math.units import Point
from org.nmrfx.processor.math.units import Time
from org.nmrfx.processor.operations import Add
from org.nmrfx.processor.operations import Asmooth
from org.nmrfx.processor.operations import AutoPhase
from org.nmrfx.processor.operations import AutoPhaseDataset
from org.nmrfx.processor.operations import BcMed
from org.nmrfx.processor.operations import BcPoly
from org.nmrfx.processor.operations import BcSine
from org.nmrfx.processor.operations import Bcwhit
from org.nmrfx.processor.operations import Blackman
from org.nmrfx.processor.operations import Bucket
from org.nmrfx.processor.operations import Bz
from org.nmrfx.processor.operations import CShift
from org.nmrfx.processor.operations import CoAdd
from org.nmrfx.processor.operations import Cwtd
from org.nmrfx.processor.operations import Combine
from org.nmrfx.processor.operations import Dc
from org.nmrfx.processor.operations import Dept
from org.nmrfx.processor.operations import Dcfid
from org.nmrfx.processor.operations import Dx
from org.nmrfx.processor.operations import Exp
from org.nmrfx.processor.operations import EACombine
from org.nmrfx.processor.operations import ESmooth
from org.nmrfx.processor.operations import Extend
from org.nmrfx.processor.operations import Extract
from org.nmrfx.processor.operations import Expd
from org.nmrfx.processor.operations import Fdss
from org.nmrfx.processor.operations import FFilter
from org.nmrfx.processor.operations import Ft
from org.nmrfx.processor.operations import Ft2d
from org.nmrfx.processor.operations import GapSmooth
from org.nmrfx.processor.operations import Gen
from org.nmrfx.processor.operations import Gf
from org.nmrfx.processor.operations import Gm
from org.nmrfx.processor.operations import Gmb
from org.nmrfx.processor.operations import GRINSOp
from org.nmrfx.processor.operations import Hft
from org.nmrfx.processor.operations import Ift
from org.nmrfx.processor.operations import Ift2d
from org.nmrfx.processor.operations import Imag
from org.nmrfx.processor.operations import Integrate
from org.nmrfx.processor.operations import IO
from org.nmrfx.processor.operations import IstMatrix
from org.nmrfx.processor.operations import IstVec
from org.nmrfx.processor.operations import Kaiser
from org.nmrfx.processor.operations import Mag
from org.nmrfx.processor.operations import Measure
from org.nmrfx.processor.operations import Merge
from org.nmrfx.processor.operations import Mult
from org.nmrfx.processor.operations import NESTANMREx
from org.nmrfx.processor.operations import NESTANMR
from org.nmrfx.processor.operations import Ones
from org.nmrfx.processor.operations import Phase
from org.nmrfx.processor.operations import Phase2d
from org.nmrfx.processor.operations import Power
from org.nmrfx.processor.operations import PythonScript
from org.nmrfx.processor.operations import Rand
from org.nmrfx.processor.operations import RandN
from org.nmrfx.processor.operations import Range
from org.nmrfx.processor.operations import Real
from org.nmrfx.processor.operations import Regions
from org.nmrfx.processor.operations import Reverse
from org.nmrfx.processor.operations import Rft
from org.nmrfx.processor.operations import Schedule
from org.nmrfx.processor.operations import Shift
from org.nmrfx.processor.operations import Sign
from org.nmrfx.processor.operations import SinebellApod
from org.nmrfx.processor.operations import Sqrt
from org.nmrfx.processor.operations import Stack
from org.nmrfx.processor.operations import DGRINSOp
from org.nmrfx.processor.operations import TDCombine
from org.nmrfx.processor.operations import TDPoly
from org.nmrfx.processor.operations import Tm
from org.nmrfx.processor.operations import Tri
from org.nmrfx.processor.operations import Tdss
from org.nmrfx.processor.operations import VecRef
from org.nmrfx.processor.operations import WriteVector
from org.nmrfx.processor.operations import Zeros
from org.nmrfx.processor.operations import Zf
from org.nmrfx.processor.processing.processes import ProcessOps
from org.nmrfx.processor.processing import Processor
from org.nmrfx.processor.datasets.vendor import NMRDataUtil
from org.nmrfx.processor.datasets.vendor import NMRData
from org.nmrfx.processor.math.units import UnitFactory
from org.nmrfx.processor.math import Vec
from org.nmrfx.processor.datasets import DatasetPhaser
from java.util.concurrent import ConcurrentHashMap
from java.util import ArrayList
from java.util import HashMap
import java.lang.Double as Double
import java.lang.Integer as Integer
import psspecial
from nmrpar import getFdSizes
from nmrpar import getTdSizes
from nmrpar import getBzSize
from nmrpar import getExtendSize
from nmrpar import getExtractSize
from nmrpar import getExtractSizeP
from nmrpar import getFilterSize
from nmrpar import getZfSize
from nmrpar import refByRatio
from nmrpar import getWaterPPM
import re
session_globals = {}
processor = Processor.getProcessor()
defaultProcess = processor.getDefaultProcess()
localProcess = None
useLocalProcess = False
fidInfo = None
nestaExecutable='/usr/local/bin/NESTANMR'
argFile = None
# Create dictionary of standard coefficients
# 1, 0, 1, 0, 0, 1, 0,-1],
StdCoefs={
'real':[
1, 0,0, 0],
'sep':[
1, 0,0, 1],
'echo-antiecho':[
1, 0,-1, 0, 0, 1, 0, 1],
'echo-antiecho-r':[
1, 0, 1, 0, 0, 1, 0,-1],
'hyper':[
1, 0, 0, 0, 0, 0,-1, 0],
'hyper-r':[
1, 0, 0, 0, 0, 0, 1, 0],
'ge':[
1, 0, 1, 0, 1, 0, 1, 0],
'ea3d12':[
1, 0, 1, 0, 0, 0, 0, 0,
0,-1, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 1, 0,
0, 0, 0, 0, 0,-1, 0, 1],
'ea3d21':[
1, 0, 0, 0, 1, 0, 0, 0,
0,-1, 0, 0, 0, 1, 0, 0,
0, 0, 1, 0, 0, 0, 1, 0,
0, 0, 0,-1, 0, 0, 0, 1],
'ea3d21x':[
1, 0, 0, 0, 1, 0, 0, 0,
0, 0,-1, 0, 0, 0,-1, 0,
0, 1, 0, 0, 0,-1, 0, 0,
0, 0, 0,-1, 0, 0, 0, 1]
}
class FIDInfo:
size=[]
sw=[]
sf=[]
ref=[]
refpt=[]
label=[]
mapToFIDList=[]
mapToDatasetList=[]
solvent='H2O'
flags = {}
acqOrder = []
acqArray = []
fidObj = None
def checkParDim(self,pars):
nd = self.fidObj.getNDim()
if (len(pars) > nd):
raise Exception("Number of parameters must be < "+str(nd))
def printInfo(self):
print " sw", self.sw
print " sf", self.sf
print " size", self.size
print " useSize", self.useSize
print " acqArray", self.acqArray
print " label", self.label
print " ref", self.ref
print " refpt", self.refpt
print " flags", self.flags
print " acqOrder", self.acqOrder
print " mapToFID", self.mapToFIDList
print " mapToDataset", self.mapToDatasetList
def getPar(self,par):
value = self.fidObj.getParDouble(par)
return value
def setFlags(self,flags):
NMRDataUtil.setFlags(self.fidObj,flags)
def setFixDSP(self,value):
self.fidObj.setFixDSP(value)
def setSW(self,pars):
self.checkParDim(pars)
for i,par in enumerate(pars):
if isinstance(par,float):
self.sw[i] = par
elif isinstance(par,int):
self.sw[i] = float(par)
else:
if (par == ''):
self.fidObj.resetSW(i)
continue
value = self.fidObj.getParDouble(par)
if isinstance(value,float):
self.sw[i] = value
self.fidObj.setSW(i,self.sw[i])
def setSF(self,pars):
self.checkParDim(pars)
for i,par in enumerate(pars):
if isinstance(par,float):
self.sf[i] = par
elif isinstance(par,int):
self.sf[i] = float(par)
else:
if (par == ''):
self.fidObj.resetSF(i)
continue
value = self.fidObj.getParDouble(par)
if isinstance(value,float):
self.sf[i] = value
self.fidObj.setSF(i,self.sf[i])
def setRef(self,pars):
self.checkParDim(pars)
for i,par in enumerate(pars):
if isinstance(par,float):
self.ref[i] = par
elif isinstance(par,int):
self.ref[i] = float(par)
else:
par = par.upper().strip()
if (par == ''):
self.fidObj.resetRef(i)
self.ref[i] = self.fidObj.getRef(i)
continue
if (par == 'H2O'):
temp = self.fidObj.getTempK()
self.ref[i] = getWaterPPM(temp)
elif (par.find('@') != -1):
(refValue,sfValue) = par.split('@')
refValue = float(refValue)
sfValue = float(sfValue)
self.ref[i] = (self.sf[i]-sfValue)*1.e6/sfValue+refValue
elif ((i > 0) and (par in ('N','C','P','D','H'))):
self.ref[i] = refByRatio(self.sf[0],self.ref[0],self.sf[i],par)
else:
doubleValue = self.fidObj.getParDouble(par)
if doubleValue == None:
raise Exception("Cannot convert par "+par)
self.ref[i] = doubleValue;
delRef = (self.size[i]/2) * self.sw[i] / self.sf[i] / self.size[i];
self.fidObj.setRef(i,self.ref[i])
def setLabel(self,pars):
self.checkParDim(pars)
for i,par in enumerate(pars):
self.label[i] = par
def read(self,index=0,name="work"):
global fidInfo
fidObj = self.fidObj
size = fidObj.getNPoints()
v1 = Vec(name, size, True)
fidObj.readVector(index,v1)
return v1
def isComplex(self, dim):
'''return whether or not current dimension is complex'''
fidObj = self.fidObj
return fidObj.isComplex(dim) # dim is 1-based
def negatePairsFT(self,dim):
'''return whether or not to negatePairs for FT for current dimension'''
fidObj = self.fidObj
negate = fidObj.getNegatePairs(dim) # dim is 1-based
return negate
def negateImagFT(self,dim):
'''return whether or not to negateImag for FT for current dimension'''
fidObj = self.fidObj
negate = fidObj.getNegateImag(dim) # dim is 1-based
return negate
def getSymbolicCoefs(self,dim):
'''return indirect dimension coefficients'''
fidObj = self.fidObj
return fidObj.getSymbolicCoefs(dim);
def getCoefs(self,dim='all'):
'''return indirect dimension coefficients'''
fidObj = self.fidObj
nDim = fidObj.getNDim()
if (dim != 'all'):
coefs = fidObj.getCoefs(dim) # not guaranteed to exist
else:
coefs = [1.0, 0.0] # complex
if (nDim > 1): # calculate coefs from all dimensions
for iDim in range(1, nDim):
dcoefs = fidObj.getCoefs(iDim)
cf = pairList(coefs)
df = pairList(dcoefs)
# complex array multiplication - not quite correct?
koeffs = [ c * d for c in cf for d in df ]
coefs = unPairList( koeffs )
return coefs
def getPhases(self,dim):
'''return 0th and 1st order phase corrections for current dimension'''
fidObj = self.fidObj
dataset = processor.getDataset()
if (dataset != None):
ph0 = dataset.getPh0(dim) # dim is zero-based
ph1 = dataset.getPh1(dim)
else:
ph0 = fidObj.getPH0(dim) # dim is 1-based
ph1 = fidObj.getPH1(dim)
return ph0, ph1
def setFIDMap(self,values):
self.mapToFIDList = list(values)
def mapToFID0(self, iDim):
if iDim < len(self.mapToFIDList):
return self.mapToFIDList[iDim]
else:
return -1
def mapToDataset(self, iDim):
return self.mapToDatasetList[iDim]
def printInfo():
'''Print out reference info. Useful to see what values the automatic parameter extraction found.'''
fidInfo.printInfo()
def printDataInfo():
'''Print out reference info. Useful to see what values the automatic parameter extraction found.'''
dataInfo.printInfo()
def fixdsp(value):
''' Set whether to fix dsp charge-up when reading FID. Only used for Bruker data.
'''
fidInfo.setFixDSP(value)
def sw(*pars):
''' Sweep width values to set for each dimension.<br>
Values can be either a numeric value (2000.0) or the name of a vendor specific parameter (in quotes).
<br>Examples:
<ul>
<li>sw(5000.0,2000.0) Numeric values</li>
<li>sw('sw','sw1') Agilent VNMR</li>
<li>sw('SW_h,1','SW_h,2') Bruker</li>
</ul>
'''
fidInfo.setSW(pars)
def sf(*pars):
''' Spectrometer frequency at center of spectrum to set for each dimension.<br>
Values can be either a numeric value (2000.0) or the name of a vendor specific parameter (in quotes).
<br>Examples:
<ul>
<li>sf(5000.0,2000.0) Numeric values</li>
<li>sf('sfrq','dfrq') Agilent VNMR</li>
<li>sf('SFO1,1','SFO1,2') Bruker</li>
</ul>
'''
fidInfo.setSF(pars)
def ref(*pars):
''' Reference position (in ppm) at center of spectrum to set for each dimension.<br>
Values can be either a numeric value (2000.0), the name of a vendor specific parameter (in quotes), or symbolic values.
If symbolic values ('h2o', 'C', 'N') are used the sf and sw values must already be set correctly.
<br>Examples:
<ul>
<li>ref(4.73,115.0) Numeric values</li>
<li>ref('h2o','N') Set dimension 1 to the shift of water at the experimental temperature, and dimension 2 to value for N using indirect refrence ratio</li>
</ul>
'''
fidInfo.setRef(pars)
def label(*pars):
''' Set the label to be used for each dimension.<br>
<br>Examples:
<ul>
<li>label('HN','N15') </li>
<li>label('H','CA','N') </li>
</ul>
'''
fidInfo.setLabel(pars)
def genLSCatalog(lwMin, lwMax, nLw, nKeep, nFrac):
datasetName = dataInfo.filename
processor.setupSim(lwMin, lwMax, nLw, nKeep, nFrac, datasetName)
def flags(**keywords):
fidInfo.setFlags(keywords)
# set acquisition order, e.g. acqOrder('p2','p1','d1','d2')
def acqOrder(*order):
''' Set acquisiton order used by experiment, including phase and time increments.<br>
<br>Examples:
<ul>
<li>acqOrder('p1','d1','p2','d2')</li>
<li>acqOrder('p2','p1','d1','d2')</li>
</ul>
'''
global fidInfo
fidInfo.acqOrder = []
for par in order:
fidInfo.acqOrder.append(par)
fidInfo.fidObj.resetAcqOrder()
fidInfo.fidObj.setAcqOrder(order)
processor.setAcqOrder(fidInfo.fidObj.getAcqOrder())
def setupScanTable(fileName):
global scanTableName
global scanTable
scanTableName = fileName
scanTable = None
def closeScanTable():
global scanTableName
global scanTable
if scanTable != None:
scanTable.close()
scanTable = None
def writeToScanTable(iFile, filePath, dataName, map):
global scanTableName
global scanTable
if scanTable == None:
scanTable = open(scanTableName,'w')
# write header
scanTable.write('index\tfid\tdataset')
if map != None:
header = getMeasureMapHeader(map)
if header != None:
scanTable.write(header)
scanTable.write('\n')
outStr = str(iFile) + '\t' + filePath + '\t' + dataName
if map != None:
outStr += getMeasureMapData(map)
scanTable.write(outStr + '\n')
def setMeasureMap(map):
global gmap
gmap = map
def getMeasureMap():
global gmap
try:
if gmap == None:
gmap = ConcurrentHashMap()
except:
gmap = ConcurrentHashMap()
return gmap
def getMeasureMapHeader(map):
# loop over keys in measure map and get values
result = ""
for key in map:
if key.startswith('measures_'):
dataValues = map.get(key)
for (i,dataValue) in enumerate(dataValues):
outStr = "\tIntegral%d_%.3f_%.3f\tMax%d_%.3f_%.3f" % (i,dataValue.getStartPPM(),dataValue.getEndPPM(),i,dataValue.getStartPPM(),dataValue.getEndPPM())
result += outStr
return result
return None
def getMeasureMapData(map):
# loop over keys in measure map and get values
result = ""
for key in map:
if key.startswith('measures_'):
dataValues = map.get(key)
for (i,dataValue) in enumerate(dataValues):
dataValue.setScale(1.0)
outStr = "\t%.3f\t%.3f" % (dataValue.getCorrectedSum(),dataValue.getMax())
result += outStr
# clear map for next spectrum
map.clear()
return result
def inMemory(mode=True):
global dataInfo
dataInfo.inMemory = mode
def acqarray(*pars):
''' Set acquired array size.
'''
global fidInfo
global dataInfo
size = list(fidInfo.maxSize)
fidInfo.acqArray = []
for i,par in enumerate(pars):
if (i != 0) and (par != ''):
fidInfo.acqArray.append(par)
fidInfo.fidObj.setArraySize(i,par)
if par != 0:
dataInfo.extra = fidInfo.acqArray[i]
else:
fidInfo.acqArray.append(0)
fidInfo.fidObj.setArraySize(i,0)
# set fid size limits
def acqsize(*pars):
''' Set acquired size. This is not normally needed, but might be useful if the experiment did not run to completion
and you need to specify the number of rows of data that were actually acquired. Only the size for the indirect
dimensions can be changed. Specifying a value of 0, or an empty value indicates that the actual acquired size
should be used.
'''
global fidInfo
global dataInfo
global useLocalProcess
size = list(fidInfo.maxSize)
for i,par in enumerate(pars):
if (i != 0) and (par != ''):
if (fidInfo.maxSize[i] < par):
size[i] = fidInfo.maxSize[i]
elif par == 0:
size[i] = fidInfo.maxSize[i]
else:
size[i] = par
if not useLocalProcess:
processor.setSizes(size)
processor.adjustSizes();
newSizes = processor.getNewSizes()
size = [s for s in newSizes]
fidInfo.size = list(size)
fidInfo.useSize = list(size)
dataInfo.size = list(size)
dataInfo.msize = initMSize(fidInfo, size)
# set fid size limits
def tdsize(*size):
''' Set time domain size that should actually be used. Normally set to a value less than or equal to the acqsize value.
Only the size for the indirect dimensions can be changed. Specifying a value of 0, or an empty value indicates
that the actual acquired size should be used. Useful if you want to see what the processed data would be like if fewer
data rows were acquired, or if there is some corruption of data (by changes to sample or fault in instrument) after
a certain point.
'''
global fidInfo
fidInfo.useSize = []
for i,par in enumerate(size):
# at present, can't change size of direct dimension
if i == 0:
fidInfo.useSize.append(fidInfo.size[i])
elif par == '':
fidInfo.useSize.append(fidInfo.size[i])
elif par == 0:
fidInfo.useSize.append(fidInfo.size[i])
else:
fidInfo.useSize.append(par)
def p(par):
return fidInfo.getPar(par)
def getCurrentProcess():
global localProcess
global useLocalProcess
if (useLocalProcess and localProcess):
process = localProcess
else:
process = processor.getCurrentProcess()
return process
def clearLocalProcess():
global localProcess
if localProcess:
localProcess.clearOps()
def readNUS(fileName, demo=True):
global fidInfo
fidObj = fidInfo.fidObj
fidObj.readSampleSchedule(fileName, demo)
def genNUS(sizes):
global fidInfo
fidObj = fidInfo.fidObj
print "gen ",sizes
fidObj.createUniformSchedule(sizes)
class genericOperation(object):
def __init__(self, f):
self.f = f
self.__doc__ = f.__doc__
self.defs = {}
name_val = zip(inspect.getargspec(f)[0], inspect.getargspec(f)[-1])
for name,val in name_val:
self.defs[name] = val
self.arguments = inspect.getargspec(self.f)[0]
def __call__(self,*args,**kwargs):
if argFile != None:
self.dumpArgs(argFile,self.f.__name__,*args,**kwargs)
op = self.f(*args,**kwargs)
if op != None:
if 'vector' in kwargs and kwargs['vector'] != None:
op.eval(kwargs['vector'])
else:
process.add(op)
return op
def dumpArgs(self,argFile,opName,*args,**kwargs):
argFile.write(opName)
nArgSupplied = len(args)
for iArg,arg in enumerate(self.arguments):
if iArg < nArgSupplied:
value = args[iArg]
elif arg in kwargs:
value = kwargs[arg]
elif arg in self.defs:
value = self.defs[arg]
else:
raise 'No value'
argFile.write('\t'+arg+'\t'+str(value))
argFile.write('\n')
class DataInfo:
filename = 'data.nv'
curDim = -1
size = []
createdSize = []
useSize = None
resizeable = True # size may change or not
inMemory = False
extra = 0
def printInfo(self):
print " size", self.size
print " msize", self.msize
print " useSize", self.useSize
print "resizable", self.resizeable
print " extra", self.extra
def initLocal():
global fidInfo
global dataInfo
global localProcess
global useLocalProcess
useLocalProcess = True
dataInfo = DataInfo()
dataInfo.curDim = 1
dataInfo.resizeable = False
localProcess = ProcessOps()
def useLocal():
global useLocalProcess
useLocalProcess = True
processor.clearProcessorError();
dataInfo.resizeable = False
def useProcessor(inNMRFx=False):
global dataInfo
global useLocalProcess
global processor
global defaultProcess
global nmrFxMode
nmrFxMode = inNMRFx
useLocalProcess = False
processor.reset()
processor.clearDatasets()
processor.clearProcessorError();
defaultProcess = processor.getDefaultProcess()
if dataInfo.extra == 0:
dataInfo = DataInfo()
dataInfo.resizeable = True
# should FID be done by open with testing for file type
def FID(fidFileName, tdSize=None, nusFileName=None, **keywords):
''' Open a raw NMR dataset (FID file).<br>
Parameters
---------
fidFileName : string
Name of the file to open.
tdSize : array
Size of each time domain dimension. Automatically determined from paramter files if not specified.
keywords : keywords
Optional list of arguments describing data
'''
if (tdSize):
fidObj = processor.openfid(fidFileName, nusFileName, tdSize)
else:
fidObj = processor.openfid(fidFileName, nusFileName)
fidInfo = makeFIDInfo(fidObj,tdSize)
if (keywords): # may use keywords for flags
fidInfo.flags = keywords
return fidInfo
def makeFIDInfo(fidObj=None, tdSize=None, **keywords):
global fidInfo
fidInfo = FIDInfo()
if (not fidObj):
fidObj = NMRDataUtil.getCurrentData()
if (not fidObj):
return None
if (not tdSize):
tdSize = getTdSizes(fidObj)
fidInfo.size = list(tdSize)
fidInfo.useSize = list(tdSize)
fidInfo.fidObj = fidObj
fidInfo.solvent = fidObj.getSolvent()
fidInfo.nd = fidObj.getNDim()
fidInfo.sw = []
fidInfo.sf = []
fidInfo.ref = []
fidInfo.refpt = []
fidInfo.label = []
fidInfo.maxSize = []
fidInfo.mapToFIDList = []
fidInfo.mapToDatasetList = []
for i in range(fidInfo.nd):
fidInfo.mapToDatasetList.append(-1)
j = 0
for i in range(fidInfo.nd):
fidInfo.sw.append(fidObj.getSW(i))
fidInfo.sf.append(fidObj.getSF(i))
fidInfo.ref.append(fidObj.getRef(i))
fidInfo.refpt.append(fidObj.getSize(i)/2)
fidInfo.label.append('D'+str(i))
fidInfo.maxSize.append(fidObj.getMaxSize(i))
fidInfo.acqArray.append(0)
if fidObj.getSize(i) > 1:
fidInfo.mapToFIDList.append(i)
fidInfo.mapToDatasetList[i] = j
j += 1
acqOrder = fidObj.getAcqOrder()
fidInfo.acqOrder = acqOrder
if (keywords): # may use keywords for flags
fidInfo.flags = keywords
return fidInfo
def CREATE(nvFileName, dSize=None, extra=0):
''' Create a new NMRViewJ format dataset. If file already exists it will be erased first.<br>
Parameters
---------
nvFileName : string
Name of the dataset file to create.
dSize : array
The size of the dimensions. If not specified the size automatically determined from processing script.
'''
global fidInfo
global dataInfo
global nmrFxMode
try:
if nmrFxMode:
#nvFileName += ".tmp"
pass
except:
pass
dataInfo.filename = nvFileName
if (dSize == None):
dSize = fidInfo.size
dataInfo.size = list(dSize)
dataInfo.msize = initMSize(fidInfo, dSize)
else:
dataInfo.size = list(dSize)
dataInfo.msize = initMSize(fidInfo, dSize)
createDataset()
dataInfo.extra = extra
if dataInfo.extra != 0:
processor.keepDatasetOpen(True)
DIM(1) # default start dim
def initMSize(fidInfo, size):
msize = []
for i,sz in enumerate(size):
nsz = sz
if fidInfo.isComplex(i):
nsz = sz * 2;
msize.append(nsz)
return msize
def createDataset(nvFileName=None, datasetSize=None):
global fidInfo
global dataInfo
print 'create',datasetSize,dataInfo.msize,'extra',dataInfo.extra
# fidInfo.flags['dmx'] = False
# fidInfo.flags = {'dmx':True, 'exchangeXY':False, 'swapBits':True, 'negatePairs':True}
if (nvFileName == None):
nvFileName = dataInfo.filename
if (datasetSize == None):
datasetSize = dataInfo.msize
datasetSize[0] = dataInfo.size[0]
useSize = []
j=0
newDatasetSize = []
for i,datasetSize in enumerate(datasetSize):
if (fidInfo.mapToDatasetList[i] >= 0) and (datasetSize > 1):
newDatasetSize.append(datasetSize)
useSize.append(fidInfo.useSize[i])
j += 1
else:
useSize.append(1)
if dataInfo.extra != 0:
useSize.append(dataInfo.extra)
newDatasetSize.append(dataInfo.extra)
#useSize = [956,1,32]
datasetSize = list(newDatasetSize)
dataInfo.createdSize = datasetSize
if not processor.isDatasetOpen():
try:
os.remove(nvFileName)
except OSError:
pass
parFileName = os.path.splitext(nvFileName)[0]+'.par'
try:
os.remove(parFileName)
except OSError:
pass
if dataInfo.inMemory:
processor.createNVInMemory(nvFileName, datasetSize, useSize)
elif (fidInfo and fidInfo.flags):
processor.createNV(nvFileName, datasetSize, useSize, fidInfo.flags)
print 'exists',os.path.exists(nvFileName)
else:
processor.createNV(nvFileName, datasetSize, useSize)
print 'exists',os.path.exists(nvFileName)
dataset = processor.getDataset()
psspecial.datasetMods(dataset, fidInfo)
dataInfo.resizeable = False # dataInfo.size is fixed, createNV has been run
setDataInfo(datasetSize)
def closeDataset():
if not processor.isDatasetOpen():
print 'fexists',os.path.exists(nvFileName)
processor.closeDataset()
def setDataInfo(dSize):
global fidInfo
dataset = processor.getDataset()
nDim = dataset.getNDim()
if (fidInfo):
dataset.setSolvent(fidInfo.solvent)
for iDim in range(nDim):
fidDim = fidInfo.mapToFID0(iDim)
if fidDim != -1:
if fidInfo.ref:
dataset.setRefValue(iDim,fidInfo.ref[fidDim])
dataset.setRefValue_r(iDim,fidInfo.ref[fidDim])
if fidInfo.refpt:
center = dSize[iDim]/2
dataset.setRefPt(iDim,center)
dataset.setRefPt_r(iDim,center)
if fidInfo.sw:
dataset.setSw(iDim,fidInfo.sw[fidDim])
if fidInfo.sf:
dataset.setSf(iDim,fidInfo.sf[fidDim])
if fidInfo.label:
dataset.setLabel(iDim,fidInfo.label[fidDim])
if fidInfo.label:
if (nDim > len(fidInfo.label)):
dataset.setLabel(nDim-1,'array')
def getAcqOrder():
'''return nmrData acquisition order'''
global fidInfo
return fidInfo.acqOrder
def setDataInfoSize(curDim, size):
global dataInfo
global fidInfo
if fidInfo.mapToDatasetList[curDim] != -1:
dataInfo.size[curDim] = size
if size > dataInfo.msize[curDim]:
dataInfo.msize[curDim] = size
def OPEN(nvFileName, resize=False):
global fidInfo
global dataInfo
processor.openNV(nvFileName)
dataset = processor.getDataset()
fidInfo = FIDInfo()
fidInfo.size = dataset.getSizes()
dataInfo = DataInfo()
dataInfo.dataset = dataset
dataInfo.filename = nvFileName
dataInfo.size = dataset.getSizes()
dataInfo.msize = list(dataInfo.size)
dataInfo.resizeable = resize
return dataInfo
# to set pars, use OPEN, then sw() sf() etc, then setDataInfo()
def skip(*args):
global dataInfo
global fidInfo
j = 0
newSize = []
fidInfo.mapToFIDList = []
for i,skip in enumerate(args):
if skip:
fidInfo.mapToDatasetList[i] = -1
else:
fidInfo.mapToDatasetList[i] = j
fidInfo.mapToFIDList.append(i)
j += 1
def DIM(*args):
''' Subsequent operations in script apply to the specified dimension number.'''
global dataInfo
global fidInfo
maxDim = len(dataInfo.size)
if len(args) == 0:
processor.addDatasetProcess()
else:
iDim = args[0]
if (iDim < 1 or iDim > maxDim):
raise Exception("DIM("+str(iDim)+"): should be between 1 and "+str(maxDim))
if len(args) == 1:
dataInfo.curDim = iDim-1
processor.addDimProcess(dataInfo.curDim)
else:
dims = []
dataInfo.curDims = [dim-1 for dim in args]
for dim in args:
if (dim < 1 or dim > maxDim):
raise Exception("DIM("+str(dim)+"): should be between 1 and "+str(maxDim))
dims.append(int(dim)-1)
processor.addMatProcess(*dims)
def UNDODIM(iDim):
''' Adds a process which undoes the operations in the last instance of the specified dimension number.'''
global dataInfo
global fidInfo
maxDim = len(dataInfo.size)
if (iDim < 1 or iDim > maxDim):
raise Exception("DIM("+str(iDim)+"): should be between 1 and "+str(maxDim))
dataInfo.curDim = iDim-1
processor.addUndoDimProcess(dataInfo.curDim)
dataInfo.size[dataInfo.curDim] = fidInfo.size[dataInfo.curDim]
def generic_operation(operation):
'''decorator to make a basic operation function easier. Code ends up looking like:
@generic_operation
def FUNCTION(arg1, arg2, ...):
op = FUNCTION(arg1, arg2, ...)
return op'''
#The actual decorator. Any code from the original python operation function
# is called between getting the process and the execution / addOp phase.
def inner(*args, **kwargs):
process = kwargs['process'] if ('process' in kwargs) and (kwargs['process'] != None) else getCurrentProcess()
#call the originally declared python function with the arguments that
#the function is called with.
op = operation(*args, **kwargs)
if op == None:
return None
if 'vector' in kwargs and kwargs['vector'] != None:
op.eval(kwargs['vector'])
else:
process.add(op)
return op
sig = list(inspect.getargspec(operation))
#get arguments of the operation
arguments = [inspect.getargspec(operation)[0]]
if arguments != None:
arguments = inspect.formatargspec(arguments[0])
else:
arguments = ""
#this makes a zip with tuples of the strings of the variable names, and the key value, so its ((variable_name_1, variable_1_default_value), ...)
#inspect.formatargspec will turn a list into a string
name_val = inspect.formatargspec(zip(inspect.getargspec(operation)[0],
inspect.getargspec(operation)[-1]))
#substitute a tuple with a variable name and its value into a string with:
#((a=1), (b=2), (c=3), ...)
def sub_arg_name_and_value(t):
'''Tuple with ((vector, vector), value, 0.0 + 0.0j). Convert it to:
(vector = vector, value=0.0+0.0j)'''
return re.sub("\([^)]*\)", lambda x: x.group(0).replace(',', '='), t)
#for the arguments of the decorated function, we pass in a tuple of variable #names and their default values (as declared in the original operation).
arguments = sub_arg_name_and_value(name_val)
#get rid of all parentheses and surround the string with parenthesis again
arguments = '(' + arguments.replace('(', '').replace(')', '') + ')'
#key = value passed in from outer function
#vector=vector, pt1 = pt1
#variable names are again the variables from the original function,
#but the values are the values that are passed in, not the default
#values. This means that the key is the name of the parameter,
# but the value is the variable that the function is called with.
#this is used to make the function call:
# inner(value=value, vector=vector, process=process)
# Obviously, if no value is passed in, it defaults to the default value
# from the "arguments" variable
key_val = inspect.formatargspec(zip(inspect.getargspec(operation)[0],
inspect.getargspec(operation)[0]))
call_values = sub_arg_name_and_value(key_val)
call_values = '(' + call_values.replace('(', '').replace(')', '') + ')'
#create function declaration that contains the operation name, that
#calls "inner" with the correct arguments (that also contains defaults
#arguments from the operation if none were passed in
#this declares a function with the original operation name, taking
#the original arguments, and calling the decorated function.
src = 'def %s%s :\n' % (operation.__name__, arguments)
src += ' return inner %s\n' % call_values
#namespace to declare the function in
evaldict = {'inner': inner}
#declare inside evaldict so that we can retain the function name
#after decorating the function
exec src in evaldict
inner = evaldict[operation.__name__]
#bind docstring from original function to inner
inner.__doc__ = operation.__doc__
return inner
@generic_operation
def ADD(value=0 + 0j, first=0, last=-1, disabled=False, vector=None, process=None):
'''Add value to the vector at all points between first and last, where value can either be an integer (real) or a complex number written as (1.0 + 3j).
Parameters
---------
value : complex
The value to add to each data point.
first : int
min : 0
max : size - 1
The first point of the vector to add to.
last : int
min : -1
max : size - 1
The last point of the vector to add to.
'''
if disabled:
return None
value = complex(value)
op = Add(value.real, value.imag, first, last)
return op
def BCMED(frac=0.1,wrap=False, disabled=False, vector=None, process=None):
'''Correct the baseline of the vector using the median method.
Parameters
---------
frac : double
amin : 0.001
min : 0.001
max : 0.50
amax : 1.00
window size is set by multiplying frac times the number of extrema in the vector
wrap : bool
Wrap baseline fit around edge of spectrum.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = BcMed(frac,wrap)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def REGIONS(regions = None,type='frac', signal=False, disabled=False, vector=None, process=None):
'''Baseline correction using a polynomial fit.
Parameters
---------
regions : []
Specify the points of the vector to perform baseline correction on.
type : {'frac','pts','ppms'}
Specify the units for the region values.
signal : bool
Specify the boundary of peaks instead of the baseline.
'''
if disabled:
return None
process = process or getCurrentProcess()
realPoints = ArrayList()
if regions == None:
pass
else:
for value in regions:
realPoints.add(float(value))
op = Regions(realPoints, type, signal)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def AUTOREGIONS(mode='sdev', winSize=16, minBase=12, ratio=10.0, disabled=False, vector=None, process=None):
'''Baseline correction using a polynomial fit.
Parameters
---------
mode : {'sdev','cwtd','cwtdf'}
Specify the mode for auto identifying baseline regions.
winSize : int
min : 4
max : 256
Size of window used in searching for baseline regions;
minBase : int
min : 4
max : 256
Baseline regions must be at least this big;
ratio : real
amin : 1.0
min : 1.0
max : 100.0
Ratio relative to noise used in determining if region is signal or baseline, or percent baseline in cwtdf mode.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Regions(mode, winSize, minBase, ratio)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def BCPOLY(order=2, winSize=16, disabled=False, vector=None, process=None):
'''Baseline correction using a polynomial fit.
Parameters
---------
order : int
min : 1
max : 8
Order of the polynomial used in fit;
winSize : int
min : 4
max : 256
Size of window used in searching for baseline regions;
'''
if disabled:
return None
process = process or getCurrentProcess()
op = BcPoly(order, winSize)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def BCSINE(order=1, winSize=16, disabled=False, vector=None, process=None):
'''Baseline correction using a sine curve.
Parameters
---------
order : int
min : 1
max : 8
Order of the polynomial used in fit;
winSize : int
min : 4
max : 256
Size of window used in searching for baseline regions;
'''
if disabled:
return None
process = process or getCurrentProcess()
op = BcSine(order, winSize)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def BCWHIT(lamb=5000, order=1, baseline=False, disabled=False, vector=None, process=None):
'''Baseline correction using a smoother.
Parameters
---------
lamb : real
amin : 10.0
min : 1000.0
max : 20000.0
Parameter controlling how close the fit to the baseline should be
order : int
min : 1
max : 2
Order of the polynomial used in fit;
baseline : bool
If true, return the calculated baseline, rather than the corrected vector
'''
if disabled:
return None
process = process or getCurrentProcess()
realPoints = ArrayList()
op = Bcwhit(lamb, order, baseline )
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
@generic_operation
def BUCKET(buckets=256, disabled=False, vector=None, process=None):
'''The vector is bucketed by adding adjacent data points. The vector size after this operation will be equal to the specified number of buckets. The original vector size must be a multiple of the number of buckets. Each resulting data point will represent the sum of winSize data points where winSize is equal to size/nBuckets
Parameters
---------
buckets : int
min : 0
max : size
Number of buckets to place data points into. Vector size must be a multiple of this number.
'''
if disabled:
return None
op = Bucket(buckets)
return op
def COMB(coef=None, numInVec=0, numOutVec=0, inVec=None, outVec=None, keepImag=False,disabled=False, process=None):
'''combine inVec and outVec with a list of coefficients
Parameters
---------
coef : {'hyper','hyper-r','echo-antiecho','echo-antiecho-r','ge','sep','real','auto'}
How to combine data rows with different phases.
'''
if disabled:
return None
process = process or getCurrentProcess()
global fidInfo
if (coef == None):
coef = StdCoefs['hyper']
elif (coef == 'auto'):
coef = fidInfo.getCoefs()
else:
if isinstance(coef, str) and coef in StdCoefs:
coef = StdCoefs[coef]
elif isinstance(coef, str):
coef = coef.split()
if len(coef) < 2:
raise Exception("Coefficients "+str(coef)+" are not a valid value")
else:
ncoef = []
for c in coef:
ncoef.append(float(c))
coef = ncoef
if (not isinstance(coef, (list, tuple))):
raise Exception("Coefficients "+coef+" are not a list variable")
if (numInVec == 0):
nCoef = len(coef)
numInVec = int(math.log(nCoef/2)/math.log(2))
numOutVec=numInVec
if nCoef == 4:
numOutVec=2
op = Combine(numInVec, numOutVec, coef,keepImag)
if (inVec != None): #and outVec != None):
arrList = Combine.getArrayList()
for v in inVec:
arrList.add(v)
op.eval(arrList)
else:
process.addOperation(op)
if len(coef) == 4:
if (dataInfo.resizeable):
curDim = dataInfo.curDim
setDataInfoSize(curDim+1, dataInfo.size[curDim+1]*2)
return op
def TDCOMB(dim=2,coef=None, numInVec=0, numOutVec=0, inVec=None, outVec=None, disabled=False, process=None):
'''combine complex inVec and outVec time domain vectors using a list of coefficients
Parameters
---------
dim : {2,3,4,5}
Indirect dimension of dataset to combine vectors in. Use 2 for 2D, 2 or 3 for 3D, etc.
coef : {'hyper','hyper-r','echo-antiecho','echo-antiecho-r','ge','sep','real','auto'}
How to combine data rows with different phases.
'''
if disabled:
return None
process = process or getCurrentProcess()
global fidInfo
if (coef == None):
coef = StdCoefs['hyper']
elif (coef == 'auto'):
coef = fidInfo.getCoefs()
else:
if isinstance(coef, str) and coef in StdCoefs:
coef = StdCoefs[coef]
elif isinstance(coef, str):
coef = coef.split()
if len(coef) < 2:
raise Exception("Coefficients "+str(coef)+" are not a valid value")
else:
ncoef = []
for c in coef:
ncoef.append(float(c))
coef = ncoef
if (not isinstance(coef, (list, tuple))):
raise Exception("Coefficients "+coef+" are not a list variable")
if (numInVec == 0):
nCoef = len(coef)
numInVec = int(math.log(nCoef/2)/math.log(2))
numOutVec=numInVec
op = TDCombine(dim-1,numInVec, numOutVec, coef)
if (inVec != None): #and outVec != None):
arrList = TDCombine.getArrayList()
for v in inVec:
arrList.add(v)
op.eval(arrList)
else:
process.addOperation(op)
return op
@generic_operation
def CSHIFT(shift=0, adjref=False, disabled=False, vector=None, process=None):
'''Circular shift of the data points in the vector by the specified amount.
Parameters
---------
shift : position
min : -128
max : 128
Amount to shift the vector by. If float or int use points. If string, convert units
adjref : bool
If true, adjust the referencing of the vector based on shift
'''
if disabled:
return None
shiftObj = convertUnitStringToObject(shift)
op = CShift(shiftObj, adjref)
return op
@generic_operation
def COADD(coef=None, disabled=False, vector=None, process=None):
'''Coaddition of a set of vectors to yield one result vector.
Parameters
---------
coef : []
List of coefficients to scale each vector by.
'''
if disabled:
return None
if (not isinstance(coef, (list, tuple))):
raise Exception("Coefficients "+coef+" are not a list variable")
if (len(coef) == 0):
raise Exception("Coefficients list is empty")
op = CoAdd(coef)
return op
@generic_operation
def STACK(count=1,group=2,disabled=False):
'''Stack vectors from rows into planes.
Parameters
---------
count : int
amin : 1
min : 1
max : 32
Count of planes in stack.
group : int
amin : 1
min : 1
max : 32
Number of vectors in group (kept in plane together).
'''
if disabled:
return None
op = Stack(count,group)
return op
def CWTD(winSize=32, disabled=False, vector=None, process=None):
'''Continuous Wavelet Transform Derivative.
Parameters
---------
winSize : int
min : 1
max : 1024
Size of the window.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Cwtd(winSize)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def DC(fraction=0.05, disabled=False, vector = None, process=None):
'''Shifts the spectrum so edges are centered. DC Offset.
Parameters
---------
fraction : real
amin : 0
min : 0
max : .33
amax : .33
The fraction of points from the beginning and end of a spectrum that will be used to create the offset.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Dc(fraction)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def MEASURE(key="measures_", map=None, disabled=False, vector = None, process=None):
'''Measures regions in spectrum.
Parameters
---------
key : "measures_"
Prefix to key used to store measure values in a map (dictionary). Key will have vector row appended.
map : None
Map in which to store results. If not specified (or = None) the default map will be used. Get the default map with "getMeasureMap()"
'''
if disabled:
return None
map = map or getMeasureMap()
process = process or getCurrentProcess()
op = Measure(map, key)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def SCHEDULE(fraction=0.05, endOnly=False, fileName="", disabled=False, vector = None, process=None):
'''Sets a sample schedule for a 1D vector and zeros points not on schedule. Used for testing IST.
Parameters
---------
fraction : real
amin : 0.05
min : 0.05
max : 1.0
amax : 1.0
The fraction of points that are collected. Ignored if fileName specified.
endOnly : bool
If true, only zero values at end of vector
fileName : file
Name of the schedule file to open if set.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Schedule(fraction, endOnly, fileName)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def LP(fitStart=0, fitEnd=0, predictStart=0, predictEnd=0, npred=0, ncoef=0,
threshold=5, backward=True, forward=True, mirror=None,disabled=False,vector=None,
process=None):
'''Extend the vector using Linear Prediction.
Forward or backward linear prediction can be done. If both are specified
then both are done and coefficients averaged (forward-backward LP).
Parameters
---------
fitStart : int
min : 0
max : size-1
First point used in fit. Defaults to 0 or 1 (depending on forward/backward mode) if 0;
fitEnd : int
min : 0
max : size-1
Last point used in fit. Defaults to size-1 if 0.
predictStart : int
min : 0
max : size-1
Position of first predicted point. Defaults to size if 0.
predictEnd : int
min : 0
max : size*2-1
Position of last predicted point. Defaults to 2*size-1 if 0.
npred : int
min : 0
max : size*2-1
Number of points to predict, only used if predictEnd is 0.
ncoef : int
min : 0
max : size-1
Number of coefficients. Defaults to size/2 if 0.
threshold : int
min : 4
max : 10
Threshold of singular values used in keeping coefficients. Check this??
backward : bool
Do backwards linear prediction.
forward : bool
Do forwards linear prediction.
mirror : {None, 'odd', 'even'}
Do mirror image linear prediction.
'''
'''If fitEnd is equal to zero, then it will be set to the size of the vector.'''
global dataInfo
if disabled:
return None
process = process or getCurrentProcess()
mirrorInt = 0
if mirror:
if mirror == "even":
mirrorInt = 2
elif mirror == "odd":
mirrorInt = 1
elif mirror == "ps90-180":
mirrorInt = 2
elif mirror == "ps0-0":
mirrorInt = 1
else:
raise Exception("Invalid mirror option: "+mirror)
threshold = pow(10.0,-threshold)
op = Extend(fitStart, fitEnd, predictStart, predictEnd, npred, ncoef, threshold, backward, forward, False, mirrorInt)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
if (dataInfo.resizeable):
curDim = dataInfo.curDim
setDataInfoSize(curDim, getExtendSize(dataInfo.size[curDim],predictEnd,False))
return op
def LPR(fitStart=0, fitEnd=0, predictStart=0, predictEnd=0, npred=0, ncoef=0,
threshold=5, backward=True, forward=True, disabled=False, vector=None,
process=None):
'''Replace starting points of the vector using Linear Prediction.
Forward or backward linear prediction can be done. If both are specified
then both are done and coefficients averaged (forward-backward LP).
Parameters
---------
fitStart : int
min : 1
max : size-1
First point used in fit. Defaults to 0 if 0;
fitEnd : int
min : 0
max : size-1
Last point used in fit. Defaults to size-1 if 0.
predictStart : int
min : 0
max : size/4
Position of first predicted point. Defaults to 0 if < 0.
predictEnd : int
min : 0
max : size/4
Position of last predicted point. Defaults to 0 if 0.
npred : int
min : 0
max : size*2-1
Number of points to predict, only used if predictEnd is 0.
ncoef : int
min : 0
max : size-1
Number of coefficients. Defaults to size/2 if 0.
threshold : int
min : 3
max : 10
Threshold of singular values used in keeping coefficients. Value used is 10^-threshold
backward : bool
Do backwards linear prediction.
forward : bool
Do forwards linear prediction.
'''
'''If fitEnd is equal to zero, then it will be set to the size of the vector.'''
global dataInfo
if disabled:
return None
process = process or getCurrentProcess()
threshold = pow(10.0,-threshold)
op = Extend(fitStart, fitEnd, predictStart, predictEnd, npred, ncoef, threshold, backward, forward, True, 0)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
if (dataInfo.resizeable):
curDim = dataInfo.curDim
setDataInfoSize(curDim,getExtendSize(dataInfo.size[curDim],predictEnd,True))
return op
def EXTRACTP(fstart=0.0, fend=0.0, disabled=False, vector=None, process=None):
'''Extract a specified range of points.
Parameters
---------
fstart : real
min : 0
max : size-1
Start point of region to extract
fend : real
min : 0
max : size-1
End point of region to extract
'''
global fidInfo
if disabled:
return None
process = process or getCurrentProcess()
op = Extract(fstart,fend,True)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
if (dataInfo.resizeable):
curDim = dataInfo.curDim
setDataInfoSize(curDim, getExtractSizeP(dataInfo.size[curDim],fidInfo,curDim,fstart,fend))
def EXTRACT(start=0, end=0, mode='left', disabled=False, vector=None, process=None):
'''Extract a specified range of points.
Parameters
---------
start : int
min : 0
max : size-1
Start point of region to extract
end : int
min : 0
max : size-1
End point of region to extract
mode : {'left', 'right', 'all', 'middle','region'}
Extract a named region (left,right,all,middle) instead of using start and end points
'''
if disabled:
return None
process = process or getCurrentProcess()
fmode = False
if end != 0:
mode = 'region'
if (mode == 'left'):
fstart = 0.0
fend = 0.5
fmode = True
elif (mode == 'all'):
fstart = 0.0
fend = 1.0
fmode = True
elif (mode == 'right'):
fstart = 0.5
fend = 1.0
fmode = True
elif (mode == 'middle'):
fstart = 0.25
fend = 0.75
fmode = True
else:
global dataInfo
if (end == 0):
try:
end = dataInfo.size[dataInfo.curDim] - 1
except:
pass
if (fmode):
op = Extract(fstart,fend)
else:
op = Extract(start,end)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
if (dataInfo.resizeable):
curDim = dataInfo.curDim
if (fmode):
setDataInfoSize(curDim, getExtractSize(dataInfo.size[curDim],fstart,fend))
else:
if end == 0:
end = dataInfo.size[curDim]-1
setDataInfoSize(curDim, end - start + 1)
def TRIM(ftrim=0.1, disabled=False, vector=None, process=None):
'''Trim a fraction of vector from each end.
Parameters
---------
ftrim : real
amin : 0
min : 0
max : 0.4
amax : 0.4
Fraction of size to trim on each side
'''
if disabled:
return None
process = process or getCurrentProcess()
fmode = True
fstart = ftrim
fend = 1.0-ftrim
op = Extract(fstart,fend)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
if (dataInfo.resizeable):
curDim = dataInfo.curDim
setDataInfoSize(curDim, getExtractSize(dataInfo.size[curDim],fstart,fend))
def DCFID(fraction=0.06, disabled=False, vector=None, process=None):
''' Correct DC offset of FID real and imaginary channels
Parameters
---------
fraction : real
amin : 0.01
min : 0.01
max : 0.25
amax : 0.35
Fraction of end of FID to average to calculate offset
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Dcfid(fraction)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
@generic_operation
def DX(disabled=False, vector=None, process=None):
'''Numerical Derivative.
'''
op = Dx()
return op
def TDSS(winSize=31, nPasses=3, shift='0.0f',disabled=False, vector=None, process=None):
''' Time domain solvent suppression.
Parameters
---------
winSize : int
min : 1
max : 128
Window size of moving average filter (+/- this value).
nPasses : int
min : 1
max : 3
Number of passes of filter. Three is optimal.
shift : position
min : -0.5
max : 0.5
Position of frequency to suppress. Default is in fractional units with zero at center..
'''
if disabled:
return None
process = process or getCurrentProcess()
shiftObj = convertUnitStringToObject(shift)
op = Tdss(winSize,nPasses,shiftObj)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def EXPD(lb=1.0, fPoint=1.0, inverse=False, disabled=False, vector=None, process=None):
'''Exponential Decay Apodization.
Parameters
---------
lb : real
amin : -20.0
min : 0.0
max : 20.0
Line broadening factor.
fPoint : real
amin : 0.0
min : 0.5
max : 1.0
First point multiplication.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Expd(lb, fPoint, inverse)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def BZ(alg='ph', phase=0.0, scale=1.0, pt2=0.0, delay=None, disabled=False, vector=None, process=None):
'''Zero Bruker DSP baseline and associated algorithms: <i>sim, ph, dspph, chop</i>.
Parameters
---------
alg : {'ph','sim', 'dspph', 'chop'}
Algorithm to correct Bruker DSP artifact.
phase : real
min : -180
max : 180
Phase adjust (sim, ph only).
scale : real
min : -1
max : 3
Scale factor (sim only).
'''
if disabled:
return None
process = process or getCurrentProcess()
global fidInfo
global dataInfo
curDim = dataInfo.curDim
if (delay == None):
delay = 67.984 # default group delay
try:
delay = p('GRPDLY,1') # read from Bruker pars
except:
delay = 0.0
pass
if (dataInfo.resizeable):
try:
setDataInfoSize(curDim, getBzSize(dataInfo.size[curDim], delay, alg))
except:
pass
op = Bz(alg, delay, scale, phase, pt2)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def FDSS(center='0.0f', start='0.005f', end='0.015f', autoCenter=False, disabled=False, vector=None, process=None):
'''Frequency Domain Solvent Suppression.
Parameters
---------
center : position
amin : -0.5
min : -0.5
max : 0.5
amax : 0.5
Position of frequency to suppress. Default is in fractional units with zero at center..
start : position
amin : 0.00
min : 0.00
max : 0.010
amax : 0.1
The beginning of the peak.
end : position
amin : 0.000
min : 0.00
max : 0.02
amax : 0.15
The end of the peak.
autoCenter : bool
Find the largest peak in spectrum and center on that.
'''
if disabled:
return None
process = process or getCurrentProcess()
shiftObj = convertUnitStringToObject(center)
startObj = convertUnitStringToObject(start)
endObj = convertUnitStringToObject(end)
op = Fdss(shiftObj, startObj, endObj, autoCenter)
if (vector!=None):
op.eval(vector)
else:
process.addOperation(op)
return op
def FILTER(type='notch', offset=0, width=0.05, factor=4, groupFactor=8, mode='zero', ncoefs=None, disabled=False, vector=None, process=None):
'''Generic filter, type is <i>notch</i> or <i>lowpass</i>.
Parameters
---------
type : {'notch', 'lowpass'}
Filter type.
offset : real
min : -0.5
max : 0.5
Frequency offset in fraction of sw.
width : real
min : 0.01
max : 0.09
Notch width in fraction of sw (notch only).
factor : int
min : 3
max : 20
Decimation factor (lowpass only).
groupFactor : real
min : 4
max : 40
Filter sharpness.
mode : {'zero', 'reflect','profile'}
Filter type.
'''
if disabled:
return None
# typical usage: type='notch' width=0.05 or type='lowpass' factor=4
process = process or getCurrentProcess()
global dataInfo
curDim = dataInfo.curDim
if (type == 'notch' or type == 'n'):
nc = 0.5 * groupFactor / width + 0.2
nc = (int(nc)/2) * 2 + 1 # ensure odd integer
ncoefs = ncoefs or nc
ncoefs = int(ncoefs) # groupDelay is ncoefs/2, not groupFactor
op = FFilter(type, mode, 1.0-width, ncoefs, offset)
if (dataInfo.resizeable):
try:
setDataInfoSize(curDim, getFilterSize(dataInfo.size[curDim], ncoefs, 1))
except:
pass
else: # type = 'lowpass'
factor = int(factor)
nc = 2 * groupFactor * factor + 1
nc = (int(nc)/2) * 2 + 1 # ensure odd integer
ncoefs = ncoefs or nc
ncoefs = int(ncoefs) # groupDelay is ncoefs/2, groupFactor
op = FFilter(type, mode, factor, ncoefs, offset)
if (dataInfo.resizeable):
try:
setDataInfoSize(curDim, getFilterSize(dataInfo.size[curDim], ncoefs, factor))
except:
pass
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def FT(negateImag=False, negatePairs=False, auto=False, disabled=False, vector=None, process=None):
'''Fourier Transform.
Parameters
---------
negateImag : bool
Negate imaginary values before the FT
negatePairs : bool
Negate alternate complex real/imaginary values before the FT
auto : bool
Determine negatePairs from FID parameters
'''
if disabled:
return None
process = process or getCurrentProcess()
global dataInfo
global fidInfo
if (auto == True):
negatePairs = fidInfo.negatePairsFT(dataInfo.curDim)
negateImag = fidInfo.negateImagFT(dataInfo.curDim)
op = Ft(negateImag, negatePairs)
if (vector != None):
if (not vector.isComplex()):
raise Exception("Cannot perform FT: vector not complex")
op.eval(vector)
else:
process.addOperation(op)
return op
def FT2D(process=None):
process = process or getCurrentProcess()
op = Ft2d()
process.addOperation(op)
return op
def IFT2D(process=None):
process = process or getCurrentProcess()
op = Ift2d()
process.addOperation(op)
return op
@generic_operation
def RANDN(mean=0.0, stdev=1.0, seed=0, disabled=False, vector=None, process=None):
'''Add a Gaussian to a vector.
Parameters
---------
mean : double
min : 0.0
max : 100.0
Mean of the Gaussian.
stdev : double
amin : 0.0
min : 0.1
max : 100.0
Standard deviation of the Gaussian.
seed : int
min : 0
Seed for the RNG.
'''
if disabled:
return None
op = RandN(mean, stdev, seed)
return op
@generic_operation
def GAPSMOOTH(center=-1, start=-1, end=-1, autoCenter=False, disabled=False, vector=None, process=None):
'''Solvent suppression by removing signal and filling the gap with a smoothing function.
Parameters
---------
center : int
Center point of the solvent peak.
start : int
Beginning point of the solvent peak.
end : int
End point of the solvent peak.
autoCenter : bool
Find largest peak in spectrum and set that as center
'''
if disabled:
return None
op = GapSmooth(center, start, end, autoCenter)
return op
def GF(gf=1.0, gfs=1.0, fPoint=1.0, inverse=False, disabled=False, vector=None, process=None):
'''Lorentz-to-Gauss.
Parameters
---------
gf : double
amin : 0.0
min : 0.0
max : 20.0
gf: Gaussian broadening
gfs : double
amin : 0.0
min : 0.0
max : 1.0
gfs: Gaussian center
fPoint : double
amin : 0.0
min : 0.0
max : 1.0
amax : 5.0
fpoint: First point multiplier
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Gf(gf, gfs, fPoint, inverse)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def GM(g1=1.0, g2=1.0, g3=0.0, fPoint=1.0, inverse=False, disabled=False, vector=None, process=None):
'''Lorentz-to-Gauss.
Parameters
---------
g1 : double
amin : 0.0
min : 0.0
max : 20.0
g1: Exponential line narrowing
g2 : double
amin : 0.0
min : 0.0
max : 20.0
g2: Gaussian broadening
g3 : double
amin : 0.0
min : 0.0
max : 1.0
amax : 1.0
g3: Gaussian center
fPoint : double
amin : 0.0
min : 0.0
max : 1.0
amax : 5.0
fpoint: First point multiplier
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Gm(g1, g2, g3, fPoint, inverse)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def GMB(gb=0.0, lb=0.0, fPoint=1.0, inverse=False, disabled=False, vector=None, process=None):
'''Gauss Broaden Window.
Parameters
---------
gb : real
amin : 0.0
min : 0.0
max : 1.0
amax : 1.0
Gaussian Broadening Coefficient.
lb : real
min : -20.0
max : 20.0
Line broadening.
fPoint : real
amin : 0.0
min : 0.0
max : 1.0
Factor multiplied with the first point.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Gmb(gb, lb, fPoint, inverse)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def HFT(disabled=False, vector=None, process=None):
'''Hilbert Transform
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Hft()
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def IFT(disabled=False, vector=None, process=None):
'''Inverse Fourier Transform'''
if disabled:
return None
process = process or getCurrentProcess()
op = Ift()
if (vector != None):
if (not vector.isComplex()):
raise Exception("Cannot perform IFT: vector not complex")
op.eval(vector)
else:
process.addOperation(op)
return op
@generic_operation
def IMAG(disabled=False, vector=None, process=None):
'''Set the real values equal to the imaginary values and discard the rest.'''
op = Imag()
return op
@generic_operation
def INTEGRATE(first=0, last=-1, disabled=False, vector=None, process=None):
'''Set the signal equal to its integral.
int : first
First point of integration region
int : last
Last point of integration region
'''
if disabled:
return None
op = Integrate(first, last)
return op
def SAMPLE_SCHEDULE(filename="/tmp/sample_schedule.txt", mode='read', dims=[], demo=False, fraction=0.25):
'''Read or write a sample schedule from/to a file
filename : string
Name of file.
mode : {'read','create'}
Whether to read or write a schedule.
dims : int
integer array of time domain data sizes after NUS processing, e.g. [40, 24].
demo : bool
Demonstration mode (False if non-uniformly sampled, True if regular fid).
fraction : real
Fraction of total points that are sampled (create mode only).
'''
global fidInfo
fidObj = fidInfo.fidObj
if (mode == 'create'): # for 2D NUS
size = fidInfo.size[1] # too small unless demo
if (len(dims) > 0):
size = dims[0]
schedule = fidObj.createSampleSchedule(size, fraction, filename, demo, fidObj)
else: # mode='read'
schedule = fidObj.readSampleSchedule(filename, demo, fidObj)
if (len(dims) > 0):
schedule.setDims(dims)
def ISTMATRIX(threshold=0.90, iterations=500, alg='std', phase=None, timeDomain=True, disabled=False, process=None):
'''Iterative Soft Threshold for 2D Matrix.
Parameters
---------
threshold : real
amin : 0.1
min : 0.1
max : 1.0
amax: 1.0
Values above this threshold (multiplied times largest peak) are transfered to IST add buffer.
iterations : int
min : 1
max : 1000
Number of iterations to perform.
alg : {'std','abs','phased','phasedpos'}
Name of algorithm to use.
phase : []
Array of phase values, 2 per indirect dimension.
'''
if disabled:
return None
phaseList = ArrayList()
if phase == None:
pass
else:
for value in phase:
phaseList.add(float(value))
process = process or getCurrentProcess()
global fidInfo
if fidInfo == None or fidInfo.fidObj == None:
schedule = None
else:
schedule = fidInfo.fidObj.getSampleSchedule()
if (len(phaseList) > 0):
op = IstMatrix(threshold, iterations, schedule, alg, timeDomain, phaseList)
else:
op = IstMatrix(threshold, iterations, schedule, alg, timeDomain)
process.addOperation(op)
return op
def IST(threshold=0.98, iterations=500, alg='std', timeDomain=True, ph0=None, ph1=None,
adjustThreshold=False, all=False, disabled=False, vector=None, process=None):
'''Iterative Soft Threshold.
Parameters
---------
threshold : real
amin : 0.1
min : 0.89
max : 0.99
amax: 0.99
Values above this threshold (multiplied times largest peak) are transfered to IST add buffer.
iterations : int
min : 1
max : 2000
Number of iterations to perform.
alg : {'std','abs','phased','phasedpos'}
Name of algorithm to use.
timeDomain : bool
Is the end result of the operation in time domain
ph0 : real
min : -360.0
max : 360.0
Apply this zero order phase correction to data before IST.
ph1 : real
min : -360.0
max : 360.0
Apply this first order phase correction to data before IST.
adjustThreshold : bool
Adjust threshold during IST calculation
all : bool
Replace all values in FID (including actually sampled)
'''
if disabled:
return None
zeroFill = True
process = process or getCurrentProcess()
global fidInfo
global dataInfo
if fidInfo == None or fidInfo.fidObj == None:
schedule = None
else:
schedule = fidInfo.fidObj.getSampleSchedule()
# gph0, gph1 = getPhases(dataInfo.curDim)
# if (ph0 == None):
# ph0 = gph0
# if (ph1 == None):
# ph1 = gph1
# if nDim == 2 do op, otherwise do IST command (3d, 4d)
if (ph0 != None or ph1 != None):
if (ph1 == None):
ph1 = 0.0
if (ph0 == None):
ph1 = 0.0
op = IstVec(threshold, iterations, schedule, alg, timeDomain, zeroFill, all, adjustThreshold, ph0, ph1)
else:
op = IstVec(threshold, iterations, schedule, alg, timeDomain, zeroFill, all, adjustThreshold)
# eventually add ter, alternate to iterations
if (vector != None):
if (not vector.isComplex()):
raise Exception("Cannot perform IST: vector not complex")
op.eval(vector)
else:
process.addOperation(op)
return op
def NESTA(nOuter=15, nInner=20, tolFinal=2.5, muFinal=6,phase=None, logToFile=False, zeroAtStart=True, threshold=0.0, disabled=False, vector=None, process=None):
''' Experimental implementation of NESTA algorithm for NUS processing. This version
requires that the data be in-phase. Use the phase argument to provide a list of phase values.
Parameters
---------
nOuter : int
min : 1
max : 100
Number of outer iterations (continuations) to perform.
nInner : int
min : 1
max : 100
Number of inner iterations to perform.
tolFinal : real
amin : 0
min : 0
max : 10
amax : 10
Final tolerance for inner iterations is 10 raised to the negative of this number. For example, 5 gives 1.0e-5.
muFinal : real
amin : -2
min : -2
max : 9
Final mu value is 10 raised to the negative of this number. For example, 5 gives 1.0e-5.
phase : []
Array of phase values, 2 per indirect dimension.
logToFile : bool
Write log files containing information about progress of NESTA.
zeroAtStart : bool
Set unsampled values to zero at start of operation
threshold : real
min : 0
Threshold for absolute value. If less than this skip this hyperplane.
'''
if disabled:
return None
phaseList = ArrayList()
if phase == None:
pass
else:
for value in phase:
phaseList.add(float(value))
tolFinalReal = math.pow(10.0,-tolFinal)
muFinalReal = math.pow(10.0,-muFinal)
process = process or getCurrentProcess()
global fidInfo
logFileName = None
if fidInfo == None or fidInfo.fidObj == None:
schedule = None
else:
schedule = fidInfo.fidObj.getSampleSchedule()
if logToFile:
rootdir = fidInfo.fidObj.getFilePath()
logDir = os.path.join(rootdir,"nesta")
if not os.path.exists(logDir):
os.mkdir(logDir)
logFileName = os.path.join(logDir,"log")
op = NESTANMR(nOuter, nInner, tolFinalReal, muFinalReal, schedule, phaseList, zeroAtStart, threshold, logFileName)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def NESTA_EX_SCR(iterations=30, execName='', disabled=False, vector=None, process=None):
'''NUS Processing with external NESTANMR program.
Parameters
---------
iterations : int
min : 1
max : 2000
Number of iterations to perform.
execName : string
Full path to NESTANMR executable.
'''
global nestaExecutable
if disabled:
return None
if execName != '':
nestaExecutable = execName
initialScript = 'import os;import subprocess;import pyproc'
script='pyproc.execNESTA(vecmat,'+str(iterations)+')'
process = process or getCurrentProcess()
op=PythonScript(script, initialScript, False)
if (vector != None):
op.eval(vector)
else:
process.add(op)
return op
def NESTA_L1_EXT(iter=30, rwiter=1, rootdir='', nestdir='nestaL1', schedFile='', phase=None, disabled=False, vector=None, process=None):
'''NUS Processing with external NESTANMR program.
Parameters
---------
iter : int
min : 1
max : 200
Number of iterations to perform.
rwiter : int
min : 1
max : 20
Number of re-weighted iterations to perform.
rootdir : string
Root directory for NESTA working files. If empty, defaults to directory of FID.
nestdir : string
Sub- directory for NESTA working files.
schedFile : string
Schedule file. If empty, it defaults to value stored in FID file object.
phase : []
Array of phase values, 2 per indirect dimension.
'''
if disabled:
return None
global fidInfo
if schedFile == '':
if fidInfo == None or fidInfo.fidObj == None:
schedFile = None
else:
schedFile = fidInfo.fidObj.getSampleSchedule().getFile()
phaseList = ArrayList()
if phase == None:
pass
else:
for value in phase:
phaseList.add(float(value))
process = process or getCurrentProcess()
if rootdir == '':
if fidInfo == None or fidInfo.fidObj == None:
rootdir = "."
else:
rootdir = fidInfo.fidObj.getFilePath()
if not os.path.exists(rootdir):
raise Exception('Directory "'+rootdir+'" does not exist')
nestDir = os.path.join(rootdir,nestdir)
if not os.path.exists(nestDir):
os.mkdir(nestDir)
op=NESTANMREx(iter,rwiter,nestDir,schedFile, phaseList)
if (vector != None):
op.eval(vector)
else:
process.add(op)
return op
def NESTA_L0_EXT(iter=5000, scaling=0.98, cutoff=0.1, rootdir='', nestdir='nestaL0', schedFile='', phase=None, disabled=False, vector=None, process=None):
'''NUS Processing with external NESTANMR program.
Parameters
---------
iter : int
amin : 1
min : 1
max : 6000
Number of iterations to perform.
scaling : real
amin : 0.94
min : 0.94
max : 0.99
amax : 0.99
Scaling of threshold at each iteration.
cutoff : real
amin : 0.0
min : 0.1
max : 0.5
Stop iterations when threshold is at this value
rootdir : string
Root directory for NESTA working files. If empty, defaults to directory of FID.
nestdir : string
Sub- directory for NESTA working files.
schedFile : string
Schedule file. If empty, it defaults to value stored in FID file object.
phase : []
Array of phase values, 2 per indirect dimension.
'''
if disabled:
return None
global fidInfo
if schedFile == '':
if fidInfo == None or fidInfo.fidObj == None:
schedFile = None
else:
schedFile = fidInfo.fidObj.getSampleSchedule().getFile()
phaseList = ArrayList()
if phase == None:
pass
else:
for value in phase:
phaseList.add(float(value))
process = process or getCurrentProcess()
if rootdir == '':
rootdir = fidInfo.fidObj.getFilePath()
if not os.path.exists(rootdir):
raise Exception('Directory "'+rootdir+'" does not exist')
nestDir = os.path.join(rootdir,nestdir)
if not os.path.exists(nestDir):
os.mkdir(nestDir)
op=NESTANMREx(iter,scaling,cutoff,nestDir,schedFile, phaseList)
if (vector != None):
op.eval(vector)
else:
process.add(op)
return op
def MAG(disabled=False, vector=None, process=None):
'''Magnitude Calculation of a Vector. Each point is updated with its Complex magnitude.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Mag()
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def EXP(disabled=False, vector=None, process=None):
'''Exponential Calculation of a Vector. Each point is updated with the exponential value of the point .
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Exp()
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def POWER(disabled=False, vector=None, process=None):
'''Power Calculation of a Vector. Each point is updated with its power value.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Power()
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def SQRT(disabled=False, vector=None, process=None):
'''Sqrt Calculation of a Vector. Each point is updated with its square root.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Sqrt()
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
@generic_operation
def MULT(value=1.0+0j, first = 0, last = -1, disabled=False, vector=None, process=None):
'''Multiply the points in a vector by a Real or Complex number.
Parameters
---------
value : complex
Number to multiply the points by.
first : int
min : 0
max : size - 1
Points starting from this will be multiplied by value. Default is 0.
last : int
min : -1
max : size - 1
Last point to multiply the data by. Default is the end of the vector.
'''
if disabled:
return None
#last = vector.getSize() - 1 if vector != None and last == -1
value = complex(value)
op = Mult(value.real, value.imag, first, last)
return op
@generic_operation
def ONES(disabled=False, vector=None, process=None):
'''Set all points in a vector to 1.0'''
op = Ones()
return op
@generic_operation
def GEN(freq=100.0,lw=1.0,amp=50.0,phase=0.0, disabled=False, vector=None, process=None):
'''Generate a simulated signal and add it to the vector.
Parameters
---------
freq : real
min : -500
max : 500.0
Frequency in Hz.
lw : real
amin : 0
min : 0
max : 10.0
Linewidth in Hz.
amp : real
amin : 0
min : 0
max : 100.0
Amplitude of signal.
phase : real
min : -180
max : 180.0
Phase of signal in degrees.
'''
if disabled:
return None
op = Gen(freq,lw,amp,phase)
return op
def PRINT(disabled=False, vector=None, process=None):
'''Print vector.'''
if disabled:
return None
process = process or getCurrentProcess()
op = IO()
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def WRITE(index=-1, dimag=True, isabled=False, disabled=False, vector=None, process=None):
'''Write vector to dataset (normally done automatically).
dimag : bool
Discard imaginary values (make vector real).
'''
if disabled:
return None
process = process or getCurrentProcess()
op = WriteVector(dimag, index)
if (vector != None):
if (vector.pt != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def AUTOPHASE(firstOrder=False, maxMode=False, winSize=2, ratio=25.0, mode='flat', ph1Limit=45.0, negativePenalty=1.0, disabled=False, vector=None, process=None):
'''Auto Phase shift.
Parameters
---------
firstOrder : bool
Do first order phase correction.
maxMode : bool
Autophase by maximizing positive signal.
winSize : int
amin : 1
min : 1
max : 32
Size of each half of window used in doing CWTD. Full window is 2 x this value.
ratio : real
amin : 1.0
min : 1.0
max : 100.0
Ratio relative to noise used in determining if region is signal or baseline.
mode : {'flat','entropy'}
Name of algorithm to use.
ph1Limit : real
amin : 0.0
min : 1.0
max : 100.0
amax :540.0
Limit ph1 value so its absolute value is less than this range.
negativePenalty : real
amin : 0.01
min : 0.1
max : 100.0
amax : 200.0
How much to weight to use in penalizing negative values in entropy mode (actual value is multiplied by 1.0e-5).
'''
if disabled:
return None
process = process or getCurrentProcess()
imode = 0
if mode == 'entropy':
imode = 1
op = AutoPhase(firstOrder, maxMode, winSize, ratio, imode, ph1Limit, negativePenalty)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def DPHASE(dim=0,firstOrder=False, winSize=2, ratio=25.0, ph1Limit=45.0, disabled=False, dataset=None, process=None):
'''Auto Phase shift.
Parameters
---------
dim : {0,1,2,3,4,5,6}
Dataset dimension to phase. (0 does all dimensions)
firstOrder : bool
Do first order phase correction.
winSize : int
amin : 1
min : 1
max : 32
Size of each half of window used in doing CWTD. Full window is 2 x this value.
ratio : real
amin : 1.0
min : 1.0
max : 100.0
Ratio relative to noise used in determining if region is signal or baseline.
ph1Limit : real
amin : 0.0
min : 1.0
max : 100.0
amax :540.0
Limit ph1 value so its absolute value is less than this range.
'''
if disabled:
return None
process = process or getCurrentProcess()
dim -= 1
op = AutoPhaseDataset(dim, firstOrder, winSize, ratio, ph1Limit)
if (dataset != None):
op.eval(dataset)
else:
process.addOperation(op)
return op
def DEPT( disabled=False, dataset=None, process=None):
''' DEPT : combine rows.
Parameters
---------
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Dept()
if (dataset != None):
op.eval(dataset)
else:
process.addOperation(op)
return op
def DGRINS(noise=5, logToFile=False, disabled=False, dataset=None, process=None):
''' Experimental GRINS.
Parameters
---------
noise : real
amin : 0.0
Noise estimate
'''
if disabled:
return None
global fidInfo
if fidInfo == None or fidInfo.fidObj == None:
schedule = None
else:
schedule = fidInfo.fidObj.getSampleSchedule()
if logToFile:
rootdir = fidInfo.fidObj.getFilePath()
logDir = os.path.join(rootdir,"nesta")
if not os.path.exists(logDir):
os.mkdir(logDir)
logFileName = os.path.join(logDir,"log")
process = process or getCurrentProcess()
op = DGRINSOp(schedule, noise)
if (dataset != None):
op.eval(dataset)
else:
process.addOperation(op)
return op
def GRINS(noise=0.0, scale=0.5, zf=0, phase=None, preserve=False, synthetic=False, logToFile=False, disabled=False, dataset=None, process=None):
''' Experimental GRINS.
Parameters
---------
noise : real
amin : 0.0
min : 0.0
max : 100.0
Noise estimate
scale : real
amin : 0.1
min : 0.2
max : 2.0
amax : 10.0
Parabola to Lorentzian scale
zf : int
amin : 0
min : 0
max : 2
amax : 2
Zero fill factor
phase : []
Array of phase values, 2 per indirect dimension.
preserve : bool
Add fitted signals to the residual signal (rather than replacing it)
synthetic : bool
Replace measured values with synthetic values.
logToFile : bool
Write log files containing information about progress of NESTA.
'''
if disabled:
return None
global fidInfo
phaseList = ArrayList()
if phase == None:
pass
else:
for value in phase:
phaseList.add(float(value))
logFileName = None
if fidInfo == None or fidInfo.fidObj == None:
schedule = None
else:
schedule = fidInfo.fidObj.getSampleSchedule()
if logToFile:
rootdir = fidInfo.fidObj.getFilePath()
logDir = os.path.join(rootdir,"nesta")
if not os.path.exists(logDir):
os.mkdir(logDir)
logFileName = os.path.join(logDir,"log")
process = process or getCurrentProcess()
op = GRINSOp(noise, scale, zf, phaseList, preserve, synthetic, schedule, logFileName)
if (dataset != None):
op.eval(dataset)
else:
process.addOperation(op)
curDims = dataInfo.curDims
print 'curdims', curDims
if (dataInfo.resizeable):
for curDim in curDims:
print zf,curDim,zf,dataInfo.size[curDim]
setDataInfoSize(curDim, getZfSize(dataInfo.size[curDim],zf,-1))
print dataInfo.size[curDim]
return op
def PHASE(ph0=0.0, ph1=0.0, dimag=False, disabled=False, vector=None, process=None):
'''Phase shift.
Parameters
---------
ph0 : real
min : -90.0
max : 90.0
slider : 0
Zero order phase value
ph1 : real
min : -180.0
max : 180.0
slider : 0
First order phase value
dimag : bool
Discard imaginary values
'''
if disabled:
return None
global fidInfo
process = process or getCurrentProcess()
if (ph0 == None):
try:
gph0, gph1 = fidInfo.getPhases(dataInfo.curDim)
except:
gph0=0.0
ph0 = gph0
if (ph1 == None):
try:
gph0, gph1 = fidInfo.getPhases(dataInfo.curDim)
except:
gph1=0.0;
ph1 = gph1
op = Phase(ph0, ph1, dimag)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def PHASE2D(phase=None, disabled=False, process=None):
'''Phase ND matrix.
Parameters
---------
phase : []
Array of phase values, 2 per indirect dimension.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Phase2d(phase)
process.addOperation(op)
return op
def PHASEND(ph0=0.0,ph1=0.0,dim=0, disabled=False, process=None):
'''Phase ND matrix.
Parameters
---------
ph0 : real
min : -360.0
max : 360.0
Zero order phase value
ph1 : real
min : -360.0
max : 360.0
First order phase value
dim : {0,1,2,3,4,5,6}
Dataset dimension to phase. (0 does all dimensions)
'''
if disabled:
return None
dim -= 1
process = process or getCurrentProcess()
op = Phase2d(ph0, ph1, dim)
process.addOperation(op)
return op
@generic_operation
def RAND(disabled=False, vector=None, process=None):
'''Set all points in a vector to a uniformly distributed random number between 0.0 and 1.0.'''
if disabled:
return None
op = Rand()
return op
@generic_operation
def RANGE(value=0 + 0j, first=0, last=-1, max=False, min=False, disabled=False, process=None, vector=None):
'''Sets the values in the vector from first to last inclusive to either the specified value (which can be real or complex (written as 1.0 + 3j) or Double Min or Double Max.
Parameters
---------
value : complex
Vector will have this value from the 'first' to 'last' elements
first : int
min : 0
max : size-1
The first point of the vector to set.
last : int
min : -1
max : size-1
The last point of the vector to set.
max : bool
Set the value to Double.MAX (instead of min or value). If True, overrides value.
min : bool
Set the value to Double.MIN (instead of max or value). If True, overrides value.
'''
if disabled:
return None
if min:
value = Double.MIN_VALUE
if max:
value = Double.MAX_VALUE
op = Range(first, last, value.real, value.imag)
return op
def MERGE(disabled=False, process=None, vector=None):
'''Make the vector complex, by merging alternate values into a complex number'''
if disabled:
return None
process = process or getCurrentProcess()
op = Merge()
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def REAL(disabled=False, process=None, vector=None):
'''Make the vector real, discarding the imaginary part'''
if disabled:
return None
process = process or getCurrentProcess()
op = Real()
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def EA(disabled=False, process=None, vector=None):
'''Do echo-anti echo combination'''
if disabled:
return None
process = process or getCurrentProcess()
op = EACombine()
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
@generic_operation
def ESMOOTH(winSize=256, lambd=5000, order=2, baseline=False, disabled=False, process=None, vector=None):
'''Envelope smoothing.
Parameters
---------
winSize : int
Size of the window
lambd : real
amin : 10.0
min : 1000.0
max : 50000.0
Parameter controlling how close the fit to the baseline should be
order : int
min : 1
max : 2
Parameter controlling the order of the baseline fit
baseline : bool
If true, return the calculated baseline, rather than the corrected vector
'''
if disabled:
return None
op = ESmooth(winSize, lambd, order, baseline)
return op
def REVERSE(disabled=False, process=None, vector=None):
'''Reverse points in a vector'''
if disabled:
return None
process = process or getCurrentProcess()
op = Reverse()
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def RFT(inverse=False, negatePairs=False, disabled=False, process=None, vector=None):
'''Real fourier transform
Parameters
---------
inverse : bool
True if inverse RFT, False if forward RFT.
negatePairs : bool
Negate alternate complex real/imaginary values before the FT
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Rft(inverse, negatePairs)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def SIGN(mode='i', disabled=False, process=None, vector=None):
'''Change sign of values
Parameters
---------
mode : {'i','r','alt'}
What elements of vector to change .
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Sign(mode)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def SB(offset=0.5, end=1.0,power=2.0,c=1.0,apodSize=0,inverse=False,disabled=False, vector=None, process=None):
'''Sine Bell Apodization
Parameters
---------
offset : real
amin : 0.0
min : 0.0
max : 0.5
Offset of sine window.
end : real
amin : 0.5
min : 0.5
max : 1.0
amax : 1.0
End value of sine window argument.
power : real
amin : 1.0
min : 1.0
max : 2.0
amax : 2.0
Exponential power.
c : real
amin : 0.5
min : 0.5
max : 1.0
amax : 1.0
First point multiplier.
apodSize : int
min : 0
max : size
Size of apodization window. Default 0f 0 uses entire FID.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = SinebellApod(offset, end, power, c, apodSize, inverse)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def BLACKMAN(offset=0.5,end=1.0,c=1.0,apodSize=0,dim=1, inverse=False,disabled=False, vector=None, process=None):
'''Blackman Apodization
Parameters
---------
offset : real
amin : 0.0
min : 0.0
max : 0.5
Offset of Blackman window.
end : real
amin : 0.5
min : 0.5
max : 1.0
amax : 1.0
End value of Blackman window argument.
c : real
amin : 0.5
min : 0.5
max : 1.0
amax : 1.0
First point multiplier.
apodSize : int
min : 0
max : size
Size of apodization window. Default 0f 0 uses entire FID.
dim : {1,2,3,4,5,6}
Dataset dimension to apodize. Only applicable for matrix operations.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Blackman(offset, end, c, apodSize, dim-1, inverse)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def KAISER(offset=0.5, beta=10.0, end=1.0,c=1.0,apodSize=0, dim=1, inverse=False,disabled=False, vector=None, process=None):
'''Kaiser Apodization
Parameters
---------
offset : real
amin : 0.0
min : 0.0
max : 0.5
Offset of Kaiser window.
beta : real
amin : 0.0
min : 0.0
max : 20.0
amax : 20.0
Beta.
end : real
amin : 0.5
min : 0.5
max : 1.0
amax : 1.0
End value of window
c : real
amin : 0.5
min : 0.5
max : 1.0
amax : 1.0
First point multiplier.
apodSize : int
min : 0
max : size
Size of apodization window. Default 0f 0 uses entire FID.
dim : {1,2,3,4,5,6}
Dataset dimension to apodize. Only applicable for matrix operations.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Kaiser(offset, beta, end, c, apodSize, dim-1, inverse)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
@generic_operation
def SHIFT(shift=0, adjref=False, disabled=False, vector=None, process=None):
'''Left or right shift of the data points in the vector by the specified amount.
Parameters
---------
shift : int
min : -2048
max : 2048
Amount of points to shift the vector by.
adjref : bool
If true, adjust the referencing of the vector based on shift
'''
if disabled:
return None
op = Shift(shift, adjref)
return op
def SCRIPT(script="", initialScript="", execFileName="", encapsulate=False, disabled=False, vector=None, process=None):
'''Execute a Python script as an Operation. Current vector is available as object named "vec".
Parameters
---------
script : wstring
The script that will be run on each Vec at the stage in the processing queue.
initialScript : wstring
Any initial declarations that will be executed on initialization.
execFileName : file
An initial file that will be executed on initialization.
encapsulate : bool
Whether the interpreter should persist between evaluations or be reinitialized for each evaluation.
'''
if disabled:
return None
process = process or getCurrentProcess()
op=PythonScript(script, initialScript, execFileName, encapsulate)
if (vector != None):
op.eval(vector)
else:
process.add(op)
return op
@generic_operation
def TDPOLY(order=4, winSize=32, start=0, disabled=False, vector=None, process=None):
'''Time Domain Polynomial.
Parameters
---------
order : int
min : 1
max : 10
Order of the polynomial.
winSize : int
min : 1
max : size-1
Size of the window
start : int
min : 0
max : size-1
First point
'''
if disabled:
return None
op = TDPoly(order, winSize, start)
return op
def TM(pt1=0, pt2=-1, inverse=False, disabled=False, vector=None, process=None):
'''Trapezoid Multiply.
Parameters
---------
pt1 : int
min : 0
max : size-1
First point to multiply.
pt2 : int
min : -1
max : size-1
Last point to multiply.
'''
if disabled:
return None
process = process or getCurrentProcess()
op = Tm(pt1, pt2, inverse)
if vector != None:
op.eval(vector)
else:
process.add(op)
return op
@generic_operation
def TRI(pt1=0, lHeight=1.0, rHeight=0.0, inverse=False, disabled=False, vector=None, process=None):
'''Triangle Window
Parameters
---------
pt1 : int
min : 0
max : size-1
Middle point of the triangle.
lHeight : real
amin : 0.0
min : 0.0
max : 1.0
amax : 1.0
Height of the left side.
rHeight : real
amin : 0.0
min : 0.0
max : 1.0
amax : 1.0
Height of the right side.
'''
if disabled:
return None
op = Tri(pt1, lHeight, rHeight, inverse)
return op
def VECREF(size=8, sf=500.0, sw=5000.0,disabled=False, process=None, vector=None):
'''Sets size, spectrometer frequency and sweep width of vector. Used for simulated FIDs for testing and demonstration.
Parameters
---------
size : int
min : 3
max : 16
Size of vector specified as a power of 2.
sf : real
amin : 0.0
min : 0.0
max : 1200.0
amax : 1200.0
Spectrometer frequency (in MHz).
sw : real
amin : 0.0
min : 0.0
max : 10000.0
amax : 10000.0
Sweep width of spectrum (in Hz).
'''
if disabled:
return None
process = process or getCurrentProcess()
global dataInfo
curDim = dataInfo.curDim
size = pow(2,size)
op = VecRef(size,sf,sw)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
if size != None:
if (dataInfo.resizeable):
setDataInfoSize(curDim, size)
return op
def ZEROS(disabled=False, process=None, vector=None):
'''Zeros a vector.'''
if disabled:
return None
process = process or getCurrentProcess()
global dataInfo
curDim = dataInfo.curDim
op = Zeros()
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
return op
def ZF(factor=1, size=-1, pad=-1, disabled=False, process=None, vector=None):
'''Zero Fill.
factor is the 'factor' power of 2 that the vector size is increased to, so if the vector has 513 elements and factor = 1, it will increase to 1024, the next power of 2, but if factor = 2, it will increase to 2048, which is two powers of two greater.
A size can be specified instead of a factor which will be the exact number of points the vector will have, and the increased elements will all be zero.
Parameters
---------
factor : int
min : -1
max : 4
Number of powers of 2 to zero fill to.
size : int
min : -1
max : 65536
Size after zero filling. If -1 (default), calculate from factor value.
pad : int
min : -1
max : 128
Increase size by this amount. If -1 (default) use size or factor value.
'''
if disabled:
return None
process = process or getCurrentProcess()
global dataInfo
curDim = dataInfo.curDim
op = Zf(factor,size,pad)
if (vector != None):
op.eval(vector)
else:
process.addOperation(op)
if (dataInfo.resizeable):
setDataInfoSize(curDim, getZfSize(dataInfo.size[curDim],factor,size))
return op
def makeDataNames(filePath,baseDir=None,outDir=None,iFile=None,baseName='data',multiMode=False):
(dirName,tail) = os.path.split(filePath)
(rootName,ext) = os.path.splitext(tail)
print 'fp',filePath
print 'ro',rootName
print 'ex',ext
if iFile:
rootName =rootName+'_'+str(iFile)
if baseName:
rootName = baseName+"_"+rootName
if multiMode:
dataName = 'multi.nv'
else:
dataName = rootName+'.nv'
print 'da',dataName
if (baseDir):
fullFidName = os.path.join(baseDir,filePath)
else:
fullFidName = filePath
if (outDir):
fullDataName = os.path.join(outDir,dataName)
else:
fullDataName = os.path.join(dirName,dataName)
return fullFidName,filePath,fullDataName,dataName
def newvector(size=32):
return Vec(size)
def addvector(vector,process=None):
'''add the vector to the process'''
process = process or getCurrentProcess()
process.addVec(vector)
def copy(process=None):
'''return a new ProcessOps instance that is a copy of process'''
process = process or getCurrentProcess()
temp = processor.createProcess()
return process.cloneProcess(temp)
def copy(processFrom, processTo):
'''Clone processFrom to processTo and return processTo. Will modify
operations in processTo, but will not modify vectors.'''
return processFrom.cloneProcess(processTo)
def create(name=None):
if (name == None):
return processor.createProcess()
else:
return processor.createProcess(name)
def defaultName():
'''return the name of the default process'''
return processor.getDefaultName()
def getDefault():
'''return the default process'''
return processor.getDefaultProcess()
def run(process=None):
'''Execute the series of operations that have been added to processor. Return true if it executed successfully.
The run command must be present at the end of processing operations or no processing will happen.'''
if (dataInfo.resizeable):
createDataset()
else:
setDataInfo(dataInfo.createdSize)
if (process == None):
processor.runProcesses()
else:
processor.run(process)
def list_ops(process=None):
process = process or getCurrentProcess()
return process.getOperationString()
def list_processes():
return processor.getListOfProcessNames()
def list_vectors(process=None):
process = process or getCurrentProcess()
return process.getVectorString()
def status(process=None):
'''Return status of a process'''
process = process or getCurrentProcess()
return process.getStatus()
def procOpts(nprocess=None,nvectors=None):
''' Set and get various options in the Processor
Parameters
---------
nprocess : int
The number of processes to run simultaneously. Defaults to number of cpu cores (x2 with hyper-threaded)
nvectors : int
The number of vectors each process should grab at one time.
'''
if (nprocess != None):
processor.setNumProcessors(nprocess)
if (nvectors != None):
processor.setVectorsPerProcess(nvectors)
return {'nprocess':processor.getNumProcessors(),'nvectors':processor.getVectorsPerProcess()}
def writeVec(vector,fileName):
f = open(fileName,'w')
size = vector.getSize()
for i in range(size):
rx = vector.get(i,True)
output = str(rx)
if (vector.isComplex()):
ix = vector.get(i,False)
output += ' '+str(ix)
f.write(output+'\n')
f.close()
def execNESTA(vecMat,iterations):
global fidInfo
global nestaExecutable
filePath = fidInfo.fidObj.getFilePath()
nestDir = os.path.join(filePath,'nestaL1')
rootName = os.path.join(nestDir,'test')
vecFileName = vecMat.exportData(rootName,"nestain",True)
# do external processing, for example
nusListFile = fidInfo.fidObj.getSampleSchedule().getFile()
fileIndex = str(vecMat.getIndex()+1)
nestArgs = [nestaExecutable,"-q","-D","-b","-i",str(iterations),"-t","1","-n",nusListFile,"-d",nestDir,"-e",fileIndex,"-f",vecFileName]
try:
retcode = subprocess.call(nestArgs)
except OSError as e:
print "Execution failed:", e
raise(e)
except:
e = sys.exc_info()[0]
print "Execution failed:", e
raise(e)
if retcode < 0:
raise("Child was terminated by signal " + retcode)
vecMat.importData(rootName,"nestaout",True)
os.remove(vecFileName)
os.remove(vecFileName+'.par')
coefs3d = [1, 0, 1, 0, 0, 0, 0, 0,
0,-1, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 1, 0,
0, 0, 0, 0, 0,-1, 0, 1]
def convertUnitStringToObject(unitString):
'''Return a Unit object (Fraction, Frequency, Index, PPM, Point, Time) from a string of the unit. Proper format is a number, with optional decimal place, followed by a token. f for Fraction, h for frequency (Hz), no token for index, P for PPM, p or no token for point, s for second.'''
#token = unitString.strip(' \t')[-1]
if isinstance(unitString,(float,int)):
return unitString
token = filter(lambda x: x != '', re.findall('[a-zA-z]*', unitString))
if len(token) > 1:
raise Exception("Poorly formatted Unit String. Cannot convert %s. Unit must be supplied as a number followed by f, h, p, or s." % (unitString))
num = filter(lambda x: x != '', re.findall('[\d.\-]*', unitString))
if len(num) != 1:
raise Exception("Poorly formatted Unit String. Cannot convert %s. Unit must be supplied as a number followed by f, h, p, or s." % unitString)
if (len(token)):
token = token[0]
else:
token = unitString[-1]
num = num[0]
unit = None
if token == 'f':
unit = Fraction(num)
elif token == 'h':
unit = Frequency(num)
elif token == 'P':
unit = PPM(num)
elif token == 'p':
unit = Point(num)
elif token == 's':
unit = Time(num)
elif token in '0123456789.':
if '.' in unitString:
unit = Point(num)
else:
unit = Index(num)
return unit
def genScript(arrayed=False):
global fidInfo
script = ''
sequence = fidInfo.fidObj.getSequence()
if fidInfo.nd < 2:
script += 'DIM(1)\n'
script += 'EXPD(lb=0.5)\n'
script += 'ZF()\n'
script += 'FT()\n'
trim = fidInfo.fidObj.getTrim()
if trim > 1.0e-3:
script += 'TRIM(ftrim=' + str(trim) +')\n'
script += 'AUTOPHASE(firstOrder=True)\n'
else:
script += psspecial.scriptMods(fidInfo, 0)
script += 'DIM(1)\n'
for iDim in range(2,fidInfo.nd+1):
if not fidInfo.fidObj.isFrequencyDim(iDim-1):
continue
if not fidInfo.isComplex(iDim-1):
continue
if fidInfo.mapToDatasetList[iDim-1] == -1:
continue
fCoef = fidInfo.getSymbolicCoefs(iDim-1)
if fCoef != None and fCoef != 'hyper' and fCoef != 'sep':
script += 'TDCOMB('
script += "dim="+str(iDim)
script += ",coef='"
script += fCoef
script += "')\n"
script += 'SB()\n'
script += 'ZF()\n'
script += 'FT()\n'
script += 'PHASE(ph0=0.0,ph1=0.0)\n'
fCoef = fidInfo.getSymbolicCoefs(1)
if fCoef != None and fCoef == 'sep' and not arrayed:
script += "COMB(coef='sep')\n"
if fidInfo.nd > 2 and fidInfo.fidObj.getSampleSchedule() != None:
multiDim = 'DIM(2'
for mDim in range(2,fidInfo.nd):
multiDim += ',' + str(mDim+1)
multiDim += ')'
script += multiDim + '\n'
script += 'NESTA()\n'
for iDim in range(2,fidInfo.nd+1):
if fidInfo.size[iDim-1] < 2:
continue
if fidInfo.mapToDatasetList[iDim-1] == -1:
continue
if not fidInfo.fidObj.isFrequencyDim(iDim-1):
continue
if (iDim >= fidInfo.nd) and arrayed:
continue
script += 'DIM('+str(iDim)+')\n'
if iDim == 2 and fidInfo.nd == 2 and fidInfo.fidObj.getSampleSchedule() != None:
script += 'NESTA()\n'
script += 'SB(c=0.5)\n'
script += 'ZF()\n'
script += 'FT('
negatePairs = fidInfo.negatePairsFT(iDim-1)
negateImag = fidInfo.negateImagFT(iDim-1)
if negatePairs:
script += 'negatePairs=True'
if negateImag:
if negatePairs:
script += ','
script += 'negateImag=True'
script += ')\n'
fCoef = fidInfo.getSymbolicCoefs(iDim-1)
if fCoef != None and fCoef == 'sep':
script += "MAG()\n"
else:
script += 'PHASE(ph0=0.0,ph1=0.0)\n'
script += 'run()'
return script
def ddoc(op,opList):
argspec = inspect.getargspec(op)
argNames = argspec[0]
keyWordArgs = argspec[2]
defaults = argspec[3]
nArgs = len(argNames)
if defaults == None:
nDefaults = 0
else:
nDefaults = len(defaults)
inPar = False
s=op.__doc__.split('\n')
iArg = -1
opDesc = ''
opMap = HashMap()
opList.add(op.__name__)
example = op.__name__+"("
nNoDefaults = len(argNames)
if defaults != None:
nNoDefaults -= len(defaults)
for i in range(nNoDefaults):
example += argNames[i]+','
if defaults != None :
for (argName,defaultValue) in zip(argNames[nNoDefaults:],defaults):
if argName == "disabled":
break
if isinstance(defaultValue,str):
example += argName+"="+"'" + defaultValue + "'"
else:
example += argName+"="+str(defaultValue)
example += ","
example = example.strip(',')+")"
opList.add(example)
parList = ArrayList()
opList.add(parList)
for line in s:
n4space = line.count(' ')
line = line.strip()
if line.startswith('----'):
continue
if line == '':
inPar = False
continue
if line.startswith('Parameters'):
inPar = True
else:
if not inPar:
opDesc = opDesc + ' ' +line
else:
if (n4space == 1):
parMap = HashMap()
parList.add(parMap)
parMap.clear()
iArg += 1
pars = line.split(' : ')
#ast.literal_eval
parName = pars[0].strip()
if ((parName != 'keywords') and (parName != argNames[iArg])):
print parName,' not equal to ',argNames[iArg]
exit()
iDefault = nArgs-iArg
hasDefault = True
#print nArgs,iArg,iDefault,nDefaults
default = None
if (iDefault > nDefaults):
hasDefault = False
else:
default = defaults[-iDefault]
# (parType,parOptional)= pars[1].split(',')
#print pars[1].strip()
parTypeList = ArrayList()
if (pars[1][0] == '{'):
parTypeString = pars[1].strip()
#parTypeString = "set([" + parTypeString[1:-1] + "])"
parTypeString = "(" + parTypeString[1:-1] + ")"
#print parTypeString
#parTypes = ast.literal_eval(parTypeString)
parTypes = eval(parTypeString)
if isinstance(parTypes,tuple):
for parType in parTypes:
parTypeList.add(parType)
elif (pars[1][0] == '['):
parTypeList.add('list')
lst = pars[1]
listTypes = eval(lst)
listTypeList = ArrayList()
for listType in listTypes:
listTypeList.add(listType)
parMap.put('listTypes', listTypeList)
else:
parTypeList.add(pars[1].strip())
#parOptional = parOptional.strip()=='optional'
parOptional = hasDefault;
#print 'parName ',parName,'type ',parType,'optional ', parOptional
parMap.put('name',parName)
parMap.put('type',parTypeList)
parMap.put('optional',parOptional)
else:
if line.find(' : ') == -1:
parMap.put('desc',line)
#print 'desc',line
if hasDefault:
parMap.put('default',default)
#print 'default ',default
else:
#print 'opts',line
opts = line.split(' : ')
optName = opts[0].strip()
optValue = opts[1].strip()
parMap.put(optName,optValue)
opList.add(opDesc.strip())
def getOperationList():
'''Get a list of all Operations that have a name such that NAME.toupper() is True, and they are also python functions, and their signature includes parameters "vector" and "process"'''
operation_list = []
for op in globals():
if op.isupper():
#print globals()[op]
try:
arg_spec = inspect.getargspec(globals()[op])[0]
if (('dataset' in arg_spec) or ('vector' in arg_spec) or ('inVec' in arg_spec)) and 'process' in arg_spec:
operation_list.append(op)
elif op == "ISTMATRIX":
operation_list.append(op)
except TypeError:
#argument is not a python function (like IO)
continue
#These functions don't have documentation, so they should be excluded
#(can check to see if the function has documentation and dynamically
#add to the list as well.)
exclude_operations = []
exclude_operations += ['ISTCL',] #currently broken
#return all operations that are not excluded
opList = filter(lambda op: op not in exclude_operations, operation_list)
opList.sort()
return opList
def getDocs():
opList = ArrayList()
for operation in getOperationList():
ddoc(globals()[operation], opList)
return opList
def getRefDocs(ops):
opList = ArrayList()
for op in ops:
ddoc(globals()[op], opList)
return opList
def parseFileArgs():
if (len(sys.argv) < 3):
sys.exit("usage: script FIDDIR datasetName")
fidDir,datasetName = sys.argv[1:3]
fidInfo = FID(fidDir)
datasetInfo = CREATE(datasetName)
return fidInfo,datasetInfo
dataInfo = DataInfo()
|
onemoonsci/nmrfxprocessor
|
src/main/resources/pyproc.py
|
Python
|
gpl-3.0
| 119,470
|
[
"Gaussian"
] |
19e58f7563be81d84e678b09eab7d1533dad021da601c34dca0505024334f6d8
|
# Copyright 2004-2015 Tom Rothamel <pytom@bishoujo.us>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# This file contains classes that handle layout of displayables on
# the screen.
from renpy.display.render import render, Render
import renpy.display
import pygame_sdl2 as pygame
def scale(num, base):
"""
If num is a float, multiplies it by base and returns that. Otherwise,
returns num unchanged.
"""
if isinstance(num, float):
return num * base
else:
return num
class Null(renpy.display.core.Displayable):
"""
:doc: disp_imagelike
A displayable that creates an empty box on the screen. The size
of the box is controlled by `width` and `height`. This can be used
when a displayable requires a child, but no child is suitable, or
as a spacer inside a box.
::
image logo spaced = HBox("logo.png", Null(width=100), "logo.png")
"""
def __init__(self, width=0, height=0, **properties):
super(Null, self).__init__(**properties)
self.width = width
self.height = height
def render(self, width, height, st, at):
rv = renpy.display.render.Render(self.width, self.height)
if self.focusable:
rv.add_focus(self, None, None, None, None, None)
return rv
class Container(renpy.display.core.Displayable):
"""
This is the base class for containers that can have one or more
children.
@ivar children: A list giving the children that have been added to
this container, in the order that they were added in.
@ivar child: The last child added to this container. This is also
used to access the sole child in containers that can only hold
one child.
@ivar offsets: A list giving offsets for each of our children.
It's expected that render will set this up each time it is called.
@ivar sizes: A list giving sizes for each of our children. It's
also expected that render will set this each time it is called.
"""
# We indirect all list creation through this, so that we can
# use RevertableLists if we want.
_list_type = list
def __init__(self, *args, **properties):
self.children = self._list_type()
self.child = None
self.offsets = self._list_type()
for i in args:
self.add(i)
super(Container, self).__init__(**properties)
def set_style_prefix(self, prefix, root):
super(Container, self).set_style_prefix(prefix, root)
for i in self.children:
i.set_style_prefix(prefix, False)
def add(self, d):
"""
Adds a child to this container.
"""
child = renpy.easy.displayable(d)
self.children.append(child)
self.child = child
self.offsets = self._list_type()
def _clear(self):
self.child = None
self.children = self._list_type()
self.offsets = self._list_type()
renpy.display.render.redraw(self, 0)
def remove(self, d):
"""
Removes the first instance of child from this container. May
not work with all containers.
"""
for i, c in enumerate(self.children):
if c is d:
break
else:
return
self.children.pop(i) # W0631
self.offsets = self._list_type()
if self.children:
self.child = self.children[-1]
else:
self.child = None
def update(self):
"""
This should be called if a child is added to this
displayable outside of the render function.
"""
renpy.display.render.invalidate(self)
def render(self, width, height, st, at):
rv = Render(width, height)
self.offsets = self._list_type()
for c in self.children:
cr = render(c, width, height, st, at)
offset = c.place(rv, 0, 0, width, height, cr)
self.offsets.append(offset)
return rv
def event(self, ev, x, y, st):
children = self.children
offsets = self.offsets
for i in xrange(len(offsets) - 1, -1, -1):
d = children[i]
xo, yo = offsets[i]
rv = d.event(ev, x - xo, y - yo, st)
if rv is not None:
return rv
return None
def visit(self):
return self.children
# These interact with the ui functions to allow use as a context
# manager.
def __enter__(self):
renpy.ui.context_enter(self)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
renpy.ui.context_exit(self)
return False
def LiveComposite(size, *args, **properties):
"""
:doc: disp_imagelike
This creates a new displayable of `size`, by compositing other
displayables. `size` is a (width, height) tuple.
The remaining positional arguments are used to place images inside
the LiveComposite. The remaining positional arguments should come
in groups of two, with the first member of each group an (x, y)
tuple, and the second member of a group is a displayable that
is composited at that position.
Displayables are composited from back to front.
::
image eileen composite = LiveComposite(
(300, 600),
(0, 0), "body.png",
(0, 0), "clothes.png",
(50, 50), "expression.png")
"""
properties.setdefault('style', 'image_placement')
width, height = size
rv = Fixed(xmaximum=width, ymaximum=height, xminimum=width, yminimum=height, **properties)
if len(args) % 2 != 0:
raise Exception("LiveComposite requires an odd number of arguments.")
for pos, widget in zip(args[0::2], args[1::2]):
xpos, ypos = pos
rv.add(renpy.display.motion.Transform(widget, xpos=xpos, xanchor=0, ypos=ypos, yanchor=0))
return rv
class Position(Container):
"""
Controls the placement of a displayable on the screen, using
supplied position properties. This is the non-curried form of
Position, which should be used when the user has directly created
the displayable that will be shown on the screen.
"""
def __init__(self, child, style='image_placement', **properties):
"""
@param child: The child that is being laid out.
@param style: The base style of this position.
@param properties: Position properties that control where the
child of this widget is placed.
"""
super(Position, self).__init__(style=style, **properties)
self.add(child)
def render(self, width, height, st, at):
surf = render(self.child, width, height, st, at)
self.offsets = [ (0, 0) ]
rv = renpy.display.render.Render(surf.width, surf.height)
rv.blit(surf, (0, 0))
return rv
def get_placement(self):
xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel = self.child.get_placement()
v = self.style.xpos
if v is not None:
xpos = v
v = self.style.ypos
if v is not None:
ypos = v
v = self.style.xanchor
if v is not None:
xanchor = v
v = self.style.yanchor
if v is not None:
yanchor = v
v = self.style.xoffset
if v is not None:
xoffset = v
v = self.style.yoffset
if v is not None:
yoffset = v
v = self.style.subpixel
if v is not None:
subpixel = v
return xpos, ypos, xanchor, yanchor, xoffset, yoffset, subpixel
class Grid(Container):
"""
A grid is a widget that evenly allocates space to its children.
The child widgets should not be greedy, but should instead be
widgets that only use part of the space available to them.
"""
def __init__(self, cols, rows, padding=None,
transpose=False,
style='grid', **properties):
"""
@param cols: The number of columns in this widget.
@params rows: The number of rows in this widget.
@params transpose: True if the grid should be transposed.
"""
if padding is not None:
properties.setdefault('spacing', padding)
super(Grid, self).__init__(style=style, **properties)
cols = int(cols)
rows = int(rows)
self.cols = cols
self.rows = rows
self.transpose = transpose
def render(self, width, height, st, at):
# For convenience and speed.
padding = self.style.spacing
cols = self.cols
rows = self.rows
if len(self.children) != cols * rows:
if len(self.children) < cols * rows:
raise Exception("Grid not completely full.")
else:
raise Exception("Grid overfull.")
if self.transpose:
children = [ ]
for y in range(rows):
for x in range(cols):
children.append(self.children[y + x * rows])
else:
children = self.children
# Now, start the actual rendering.
renwidth = width
renheight = height
if self.style.xfill:
renwidth = (width - (cols - 1) * padding) / cols
if self.style.yfill:
renheight = (height - (rows - 1) * padding) / rows
renders = [ render(i, renwidth, renheight, st, at) for i in children ]
sizes = [ i.get_size() for i in renders ]
cwidth = 0
cheight = 0
for w, h in sizes:
cwidth = max(cwidth, w)
cheight = max(cheight, h)
if self.style.xfill:
cwidth = renwidth
if self.style.yfill:
cheight = renheight
width = cwidth * cols + padding * (cols - 1)
height = cheight * rows + padding * (rows - 1)
rv = renpy.display.render.Render(width, height)
offsets = [ ]
for y in range(0, rows):
for x in range(0, cols):
child = children[ x + y * cols ]
surf = renders[x + y * cols]
xpos = x * (cwidth + padding)
ypos = y * (cheight + padding)
offset = child.place(rv, xpos, ypos, cwidth, cheight, surf)
offsets.append(offset)
if self.transpose:
self.offsets = [ ]
for x in range(cols):
for y in range(rows):
self.offsets.append(offsets[y * cols + x])
else:
self.offsets = offsets
return rv
class IgnoreLayers(Exception):
"""
Raise this to have the event ignored by layers, but reach the
underlay.
"""
pass
class MultiBox(Container):
layer_name = None
first = True
order_reverse = False
def __init__(self, spacing=None, layout=None, style='default', **properties):
if spacing is not None:
properties['spacing'] = spacing
super(MultiBox, self).__init__(style=style, **properties)
self.default_layout = layout
# The start and animation times for children of this
# box.
self.start_times = [ ]
self.anim_times = [ ]
# A map from layer name to the widget corresponding to
# that layer.
self.layers = None
# The scene list for this widget.
self.scene_list = None
def parameterize(self, name, parameters):
if not type(self) is MultiBox:
return self
rv = MultiBox(layout=self.default_layout)
rv.style = self.style.copy()
rv.children = self._list_type(i.parameterize('displayable', [ ]) for i in self.children)
rv.offsets = self._list_type()
return rv
def _clear(self):
super(MultiBox, self)._clear()
self.start_times = [ ]
self.anim_times = [ ]
self.layers = None
self.scene_list = None
def _in_old_scene(self):
if self.layer_name is not None:
if self.scene_list is None:
return self
scene_list = [ ]
changed = False
for old_sle in self.scene_list:
new_sle = old_sle.copy()
d = new_sle.displayable._in_old_scene()
if d is not new_sle.displayable:
new_sle.displayable = d
changed = True
scene_list.append(new_sle)
if not changed:
return self
rv = MultiBox(layout=self.default_layout)
rv.layer_name = self.layer_name
rv.append_scene_list(scene_list)
elif self.layers:
rv = MultiBox(layout=self.default_layout)
rv.layers = { }
changed = False
for layer in renpy.config.layers:
old_d = self.layers[layer]
new_d = old_d._in_old_scene()
if new_d is not old_d:
changed = True
rv.add(new_d)
rv.layers[layer] = new_d
if not changed:
return self
else:
return self
if self.offsets:
rv.offsets = list(self.offsets)
if self.start_times:
rv.start_times = list(self.start_times)
if self.anim_times:
rv.anim_times = list(self.anim_times)
return rv
def __unicode__(self):
layout = self.style.box_layout
if layout is None:
layout = self.default_layout
if layout == "fixed":
return "Fixed"
elif layout == "horizontal":
return "HBox"
elif layout == "vertical":
return "VBox"
else:
return "MultiBox"
def add(self, widget, start_time=None, anim_time=None): # W0221
super(MultiBox, self).add(widget)
self.start_times.append(start_time)
self.anim_times.append(anim_time)
def append_scene_list(self, l):
for sle in l:
self.add(sle.displayable, sle.show_time, sle.animation_time)
if self.scene_list is None:
self.scene_list = [ ]
self.scene_list.extend(l)
def render(self, width, height, st, at):
# Do we need to adjust the child times due to our being a layer?
if self.layer_name or (self.layers is not None):
adjust_times = True
else:
adjust_times = False
xminimum = self.style.xminimum
if xminimum is not None:
width = max(width, scale(xminimum, width))
yminimum = self.style.yminimum
if yminimum is not None:
height = max(height, scale(yminimum, height))
if self.first:
self.first = False
if adjust_times:
it = renpy.game.interface.interact_time
self.start_times = [ i or it for i in self.start_times ]
self.anim_times = [ i or it for i in self.anim_times ]
layout = self.style.box_layout
if layout is None:
layout = self.default_layout
self.layout = layout # W0201
else:
layout = self.layout
# Handle time adjustment, store the results in csts and cats.
if adjust_times:
t = renpy.game.interface.frame_time
csts = [ t - start for start in self.start_times ]
cats = [ t - anim for anim in self.anim_times ]
else:
csts = [ st ] * len(self.children)
cats = [ at ] * len(self.children)
offsets = [ ]
if layout == "fixed":
rv = None
if self.style.order_reverse:
iterator = zip(reversed(self.children), reversed(csts), reversed(cats))
else:
iterator = zip(self.children, csts, cats)
for child, cst, cat in iterator:
surf = render(child, width, height, cst, cat)
if rv is None:
if self.style.fit_first:
width, height = surf.get_size()
rv = renpy.display.render.Render(width, height, layer_name=self.layer_name)
if surf:
offset = child.place(rv, 0, 0, width, height, surf)
offsets.append(offset)
else:
offsets.append((0, 0))
if rv is None:
rv = renpy.display.render.Render(width, height, layer_name=self.layer_name)
if self.style.order_reverse:
offsets.reverse()
self.offsets = offsets
return rv
# If we're here, we have a box, either horizontal or vertical. Which is good,
# as we can share some code between boxes.
spacing = self.style.spacing
first_spacing = self.style.first_spacing
if first_spacing is None:
first_spacing = spacing
spacings = [ first_spacing ] + [ spacing ] * (len(self.children) - 1)
box_wrap = self.style.box_wrap
xfill = self.style.xfill
yfill = self.style.yfill
# The shared height and width of the current line. The line_height must
# be 0 for a vertical box, and the line_width must be 0 for a horizontal
# box.
line_width = 0
line_height = 0
# The children to layout.
children = list(self.children)
if self.style.box_reverse:
children.reverse()
spacings.reverse()
# a list of (child, x, y, w, h, surf) tuples that are turned into
# calls to child.place().
placements = [ ]
# The maximum x and y.
maxx = 0
maxy = 0
def layout_line(line, xfill, yfill):
"""
Lays out a single line.
`line` a list of (child, x, y, surf) tuples.
`xfill` the amount of space to add in the x direction.
`yfill` the amount of space to add in the y direction.
"""
xfill = max(0, xfill)
yfill = max(0, yfill)
if line:
xperchild = xfill / len(line)
yperchild = yfill / len(line)
else:
xperchild = 0
yperchild = 0
maxxout = maxx
maxyout = maxy
for i, (child, x, y, surf) in enumerate(line):
sw, sh = surf.get_size()
sw = max(line_width, sw)
sh = max(line_height, sh)
x += i * xperchild
y += i * yperchild
sw += xperchild
sh += yperchild
placements.append((child, x, y, sw, sh, surf))
maxxout = max(maxxout, x + sw)
maxyout = max(maxyout, y + sh)
return maxxout, maxyout
x = 0
y = 0
full_width = False
full_height = False
if layout == "horizontal":
full_height = yfill
line_height = 0
line = [ ]
remwidth = width
for d, padding, cst, cat in zip(children, spacings, csts, cats):
if box_wrap:
rw = width
else:
rw = remwidth
surf = render(d, rw, height - y, cst, cat)
sw, sh = surf.get_size()
if box_wrap and remwidth - sw - padding <= 0 and line:
maxx, maxy = layout_line(line, remwidth if xfill else 0, 0)
y += line_height
x = 0
line_height = 0
remwidth = width
line = [ ]
line.append((d, x, y, surf))
line_height = max(line_height, sh)
x += sw + padding
remwidth -= (sw + padding)
maxx, maxy = layout_line(line, remwidth if xfill else 0, 0)
elif layout == "vertical":
full_width = xfill
line_width = 0
line = [ ]
remheight = height
for d, padding, cst, cat in zip(children, spacings, csts, cats):
if box_wrap:
rh = height
else:
rh = remheight
surf = render(d, width - x, rh, cst, cat)
sw, sh = surf.get_size()
if box_wrap and remheight - sh - padding <= 0:
maxx, maxy = layout_line(line, 0, remheight if yfill else 0)
x += line_width
y = 0
line_width = 0
remheight = height
line = [ ]
line.append((d, x, y, surf))
line_width = max(line_width, sw)
y += sh + padding
remheight -= (sh + padding)
maxx, maxy = layout_line(line, 0, remheight if yfill else 0)
else:
raise Exception("Unknown box layout: %r" % layout)
# Back to the common for vertical and horizontal.
if not xfill:
width = maxx
if not yfill:
height = maxy
rv = renpy.display.render.Render(width, height)
if self.style.box_reverse ^ self.style.order_reverse:
placements.reverse()
for child, x, y, w, h, surf in placements:
if full_width:
w = width
if full_height:
h = height
offset = child.place(rv, x, y, w, h, surf)
offsets.append(offset)
if self.style.order_reverse:
offsets.reverse()
self.offsets = offsets
return rv
def event(self, ev, x, y, st):
children_offsets = zip(self.children, self.offsets, self.start_times)
if not self.style.order_reverse:
children_offsets.reverse()
try:
for i, (xo, yo), t in children_offsets:
if t is None:
cst = st
else:
cst = renpy.game.interface.event_time - t
rv = i.event(ev, x - xo, y - yo, cst)
if rv is not None:
return rv
except IgnoreLayers:
if self.layers:
return None
else:
raise
return None
def Fixed(**properties):
return MultiBox(layout='fixed', **properties)
class SizeGroup(renpy.object.Object):
def __init__(self):
super(SizeGroup, self).__init__()
self.members = [ ]
self._width = None
self.computing_width = False
def width(self, width, height, st, at):
if self._width is not None:
return self._width
if self.computing_width:
return 0
self.computing_width = True
maxwidth = 0
for i in self.members:
rend = i.render(width, height, st, at)
maxwidth = max(rend.width, maxwidth)
self._width = maxwidth
self.computing_width = False
return maxwidth
size_groups = dict()
class Window(Container):
"""
A window that has padding and margins, and can place a background
behind its child. `child` is the child added to this
displayable. All other properties are as for the :ref:`Window`
screen language statement.
"""
def __init__(self, child=None, style='window', **properties):
super(Window, self).__init__(style=style, **properties)
if child is not None:
self.add(child)
def visit(self):
return [ self.style.background ] + self.children
def get_child(self):
return self.style.child or self.child
def per_interact(self):
size_group = self.style.size_group
if size_group:
group = size_groups.get(size_group, None)
if group is None:
group = size_groups[size_group] = SizeGroup()
group.members.append(self)
def predict_one(self):
pd = renpy.display.predict.displayable
self.style._predict_window(pd)
def render(self, width, height, st, at):
# save some typing.
style = self.style
xminimum = scale(style.xminimum, width)
yminimum = scale(style.yminimum, height)
size_group = self.style.size_group
if size_group and size_group in size_groups:
xminimum = max(xminimum, size_groups[size_group].width(width, height, st, at))
left_margin = scale(style.left_margin, width)
left_padding = scale(style.left_padding, width)
right_margin = scale(style.right_margin, width)
right_padding = scale(style.right_padding, width)
top_margin = scale(style.top_margin, height)
top_padding = scale(style.top_padding, height)
bottom_margin = scale(style.bottom_margin, height)
bottom_padding = scale(style.bottom_padding, height)
# c for combined.
cxmargin = left_margin + right_margin
cymargin = top_margin + bottom_margin
cxpadding = left_padding + right_padding
cypadding = top_padding + bottom_padding
child = self.get_child()
# Render the child.
surf = render(child,
width - cxmargin - cxpadding,
height - cymargin - cypadding,
st, at)
sw, sh = surf.get_size()
# If we don't fill, shrink our size to fit.
if not style.xfill:
width = max(cxmargin + cxpadding + sw, xminimum)
if not style.yfill:
height = max(cymargin + cypadding + sh, yminimum)
rv = renpy.display.render.Render(width, height)
# Draw the background. The background should render at exactly the
# requested size. (That is, be a Frame or a Solid).
if style.background:
bw = width - cxmargin
bh = height - cymargin
back = render(style.background, bw, bh, st, at)
style.background.place(rv, left_margin, top_margin, bw, bh, back, main=False)
offsets = child.place(rv,
left_margin + left_padding,
top_margin + top_padding,
width - cxmargin - cxpadding,
height - cymargin - cypadding,
surf)
# Draw the foreground. The background should render at exactly the
# requested size. (That is, be a Frame or a Solid).
if style.foreground:
bw = width - cxmargin
bh = height - cymargin
back = render(style.foreground, bw, bh, st, at)
style.foreground.place(rv, left_margin, top_margin, bw, bh, back, main=False)
if self.child:
self.offsets = [ offsets ]
self.window_size = width, height # W0201
return rv
def dynamic_displayable_compat(st, at, expr):
child = renpy.python.py_eval(expr)
return child, None
class DynamicDisplayable(renpy.display.core.Displayable):
"""
:doc: disp_dynamic
A displayable that can change its child based on a Python
function, over the course of an interaction.
`function`
A function that is called with the arguments:
* The amount of time the displayable has been shown for.
* The amount of time any displayable with the same tag has been shown for.
* Any positional or keyword arguments supplied to DynamicDisplayable.
and should return a (d, redraw) tuple, where:
* `d` is a displayable to show.
* `redraw` is the amount of time to wait before calling the
function again, or None to not call the function again
before the start of the next interaction.
`function` is called at the start of every interaction.
As a special case, `function` may also be a python string that evaluates
to a displayable. In that case, function is run once per interaction.
::
# If tooltip is not empty, shows it in a text. Otherwise,
# show Null. Checks every tenth of a second to see if the
# tooltip has been updated.
init python:
def show_tooltip(st, at):
if tooltip:
return tooltip, .1
else:
return Null(), .1
image tooltipper = DynamicDisplayable(show_tooltip)
"""
nosave = [ 'child' ]
def after_setstate(self):
self.child = None
def __init__(self, function, *args, **kwargs):
super(DynamicDisplayable, self).__init__()
self.child = None
if isinstance(function, basestring):
args = ( function, )
kwargs = { }
function = dynamic_displayable_compat
self.predict_function = kwargs.pop("_predict_function", None)
self.function = function
self.args = args
self.kwargs = kwargs
def visit(self):
return [ ]
def update(self, st, at):
child, redraw = self.function(st, at, *self.args, **self.kwargs)
child = renpy.easy.displayable(child)
child.visit_all(lambda c : c.per_interact())
self.child = child
if redraw is not None:
renpy.display.render.redraw(self, redraw)
def per_interact(self):
renpy.display.render.redraw(self, 0)
def render(self, w, h, st, at):
self.update(st, at)
return renpy.display.render.render(self.child, w, h, st, at)
def predict_one(self):
try:
if self.predict_function:
child = self.predict_function(*self.args, **self.kwargs)
else:
child, _ = self.function(0, 0, *self.args, **self.kwargs)
if child is not None:
renpy.display.predict.displayable(child)
except:
pass
def get_placement(self):
if not self.child:
self.update(0, 0)
return self.child.get_placement()
def event(self, ev, x, y, st):
if self.child:
return self.child.event(ev, x, y, st)
# A cache of compiled conditions used by ConditionSwitch.
cond_cache = { }
# This chooses the first member of switch that's being shown on the
# given layer.
def condition_switch_pick(switch):
for cond, d in switch:
if cond is None:
return d
if cond in cond_cache:
code = cond_cache[cond]
else:
code = renpy.python.py_compile(cond, 'eval')
cond_cache[cond] = code
if renpy.python.py_eval_bytecode(code):
return d
raise Exception("Switch could not choose a displayable.")
def condition_switch_show(st, at, switch):
return condition_switch_pick(switch), None
def condition_switch_predict(switch):
if renpy.game.lint:
return [ d for _cond, d in switch ]
return [ condition_switch_pick(switch) ]
def ConditionSwitch(*args, **kwargs):
"""
:doc: disp_dynamic
This is a displayable that changes what it is showing based on
python conditions. The positional argument should be given in
groups of two, where each group consists of:
* A string containing a python condition.
* A displayable to use if the condition is true.
The first true condition has its displayable shown, at least
one condition should always be true.
::
image jill = ConditionSwitch(
"jill_beers > 4", "jill_drunk.png",
"True", "jill_sober.png")
"""
kwargs.setdefault('style', 'default')
switch = [ ]
if len(args) % 2 != 0:
raise Exception('ConditionSwitch takes an even number of arguments')
for cond, d in zip(args[0::2], args[1::2]):
if cond not in cond_cache:
code = renpy.python.py_compile(cond, 'eval')
cond_cache[cond] = code
d = renpy.easy.displayable(d)
switch.append((cond, d))
rv = DynamicDisplayable(condition_switch_show,
switch,
_predict_function=condition_switch_predict)
return Position(rv, **kwargs)
def ShowingSwitch(*args, **kwargs):
"""
:doc: disp_dynamic
This is a displayable that changes what it is showing based on the
images are showing on the screen. The positional argument should
be given in groups of two, where each group consists of:
* A string giving an image name, or None to indicate the default.
* A displayable to use if the condition is true.
A default image should be specified.
One use of ShowingSwitch is to have side images change depending on
the current emotion of a character. For example::
define e = Character("Eileen",
show_side_image=ShowingSwitch(
"eileen happy", Image("eileen_happy_side.png", xalign=1.0, yalign=1.0),
"eileen vhappy", Image("eileen_vhappy_side.png", xalign=1.0, yalign=1.0),
None, Image("eileen_happy_default.png", xalign=1.0, yalign=1.0),
)
)
"""
layer = kwargs.pop('layer', 'master')
if len(args) % 2 != 0:
raise Exception('ShowingSwitch takes an even number of positional arguments')
condargs = [ ]
for name, d in zip(args[0::2], args[1::2]):
if name is not None:
if not isinstance(name, tuple):
name = tuple(name.split())
cond = "renpy.showing(%r, layer=%r)" % (name, layer)
else:
cond = None
condargs.append(cond)
condargs.append(d)
return ConditionSwitch(*condargs, **kwargs)
class IgnoresEvents(Container):
def __init__(self, child, **properties):
super(IgnoresEvents, self).__init__(**properties)
self.add(child)
def render(self, w, h, st, at):
cr = renpy.display.render.render(self.child, w, h, st, at)
cw, ch = cr.get_size()
rv = renpy.display.render.Render(cw, ch)
rv.blit(cr, (0, 0), focus=False)
return rv
def get_placement(self):
return self.child.get_placement()
# Ignores events.
def event(self, ev, x, y, st):
return None
def edgescroll_proportional(n):
"""
An edgescroll function that causes the move speed to be proportional
from the edge distance.
"""
return n
class Viewport(Container):
__version__ = 5
def after_upgrade(self, version):
if version < 1:
self.xadjustment = renpy.display.behavior.Adjustment(1, 0)
self.yadjustment = renpy.display.behavior.Adjustment(1, 0)
self.set_adjustments = False
self.mousewheel = False
self.draggable = False
self.width = 0
self.height = 0
if version < 2:
self.drag_position = None
if version < 3:
self.edge_size = False
self.edge_speed = False
self.edge_function = None
self.edge_xspeed = 0
self.edge_yspeed = 0
self.edge_last_st = None
if version < 4:
self.xadjustment_param = None
self.yadjustment_param = None
self.offsets_param = (None, None)
self.set_adjustments_param = True
self.xinitial_param = None
self.yinitial_param = None
if version < 5:
self.focusable = self.draggable
def __init__(self,
child=None,
child_size=(None, None),
offsets=(None, None),
xadjustment=None,
yadjustment=None,
set_adjustments=True,
mousewheel=False,
draggable=False,
edgescroll=None,
style='viewport',
xinitial=None,
yinitial=None,
replaces=None,
**properties):
super(Viewport, self).__init__(style=style, **properties)
if child is not None:
self.add(child)
self.xadjustment_param = xadjustment
self.yadjustment_param = yadjustment
self.offsets_param = offsets
self.set_adjustments_param = set_adjustments
self.xinitial_param = xinitial
self.yinitial_param = yinitial
self._show()
if isinstance(replaces, Viewport):
self.xadjustment.range = replaces.xadjustment.range
self.yadjustment.range = replaces.yadjustment.range
self.xadjustment.value = replaces.xadjustment.value
self.yadjustment.value = replaces.yadjustment.value
self.xoffset = replaces.xoffset
self.yoffset = replaces.yoffset
self.drag_position = replaces.drag_position
else:
self.drag_position = None
self.child_width, self.child_height = child_size
self.mousewheel = mousewheel
self.draggable = draggable
# Layout participates in the focus system so drags get migrated.
self.focusable = draggable
self.width = 0
self.height = 0
# The speed at which we scroll in the x and y directions, in pixels
# per second.
self.edge_xspeed = 0
self.edge_yspeed = 0
# The last time we edgescrolled.
self.edge_last_st = None
if edgescroll is not None:
# The size of the edges that trigger scrolling.
self.edge_size = edgescroll[0]
# How far from the edge we can scroll.
self.edge_speed = edgescroll[1]
if len(edgescroll) >= 3:
self.edge_function = edgescroll[2]
else:
self.edge_function = edgescroll_proportional
else:
self.edge_size = 0
self.edge_speed = 0
self.edge_function = edgescroll_proportional
def _show(self):
if self.xadjustment_param is None:
self.xadjustment = renpy.display.behavior.Adjustment(1, 0)
else:
self.xadjustment = self.xadjustment_param
if self.yadjustment_param is None:
self.yadjustment = renpy.display.behavior.Adjustment(1, 0)
else:
self.yadjustment = self.yadjustment_param
if self.xadjustment.adjustable is None:
self.xadjustment.adjustable = True
if self.yadjustment.adjustable is None:
self.yadjustment.adjustable = True
self.set_adjustments = self.set_adjustments_param
offsets = self.offsets_param
self.xoffset = offsets[0] if (offsets[0] is not None) else self.xinitial_param
self.yoffset = offsets[1] if (offsets[1] is not None) else self.yinitial_param
def per_interact(self):
self.xadjustment.register(self)
self.yadjustment.register(self)
def render(self, width, height, st, at):
self.width = width
self.height = height
child_width = self.child_width or width
child_height = self.child_height or height
surf = render(self.child, child_width, child_height, st, at)
cw, ch = surf.get_size()
if not self.style.xfill:
width = min(cw, width)
if not self.style.yfill:
height = min(ch, height)
width = max(width, self.style.xminimum)
height = max(height, self.style.yminimum)
if self.set_adjustments:
self.xadjustment.range = max(cw - width, 0)
self.xadjustment.page = width
self.yadjustment.range = max(ch - height, 0)
self.yadjustment.page = height
if self.xoffset is not None:
if isinstance(self.xoffset, int):
value = self.xoffset
else:
value = max(cw - width, 0) * self.xoffset
self.xadjustment.value = value
if self.yoffset is not None:
if isinstance(self.yoffset, int):
value = self.yoffset
else:
value = max(ch - height, 0) * self.yoffset
self.yadjustment.value = value
if self.edge_size and (self.edge_last_st is not None) and (self.edge_xspeed or self.edge_yspeed):
duration = max(st - self.edge_last_st, 0)
self.xadjustment.change(self.xadjustment.value + duration * self.edge_xspeed)
self.yadjustment.change(self.yadjustment.value + duration * self.edge_yspeed)
self.check_edge_redraw(st)
cxo = -int(self.xadjustment.value)
cyo = -int(self.yadjustment.value)
self.offsets = [ (cxo, cyo) ]
rv = renpy.display.render.Render(width, height)
rv.blit(surf, (cxo, cyo))
return rv
def check_edge_redraw(self, st):
redraw = False
if (self.edge_xspeed > 0) and (self.xadjustment.value < self.xadjustment.range):
redraw = True
if (self.edge_xspeed < 0) and (self.xadjustment.value > 0):
redraw = True
if (self.edge_yspeed > 0) and (self.yadjustment.value < self.yadjustment.range):
redraw = True
if (self.edge_yspeed < 0) and (self.yadjustment.value > 0):
redraw = True
if redraw:
renpy.display.render.redraw(self, 0)
self.edge_last_st = st
else:
self.edge_last_st = None
def event(self, ev, x, y, st):
self.xoffset = None
self.yoffset = None
rv = super(Viewport, self).event(ev, x, y, st)
if rv is not None:
return rv
if self.draggable and renpy.display.focus.get_grab() == self:
oldx, oldy = self.drag_position
dx = x - oldx
dy = y - oldy
self.xadjustment.change(self.xadjustment.value - dx)
self.yadjustment.change(self.yadjustment.value - dy)
self.drag_position = (x, y) # W0201
if renpy.display.behavior.map_event(ev, 'viewport_drag_end'):
renpy.display.focus.set_grab(None)
raise renpy.display.core.IgnoreEvent()
if not ((0 <= x < self.width) and (0 <= y <= self.height)):
return
if self.mousewheel:
if renpy.display.behavior.map_event(ev, 'viewport_up'):
rv = self.yadjustment.change(self.yadjustment.value - self.yadjustment.step)
if rv is not None:
return rv
else:
raise renpy.display.core.IgnoreEvent()
if renpy.display.behavior.map_event(ev, 'viewport_down'):
rv = self.yadjustment.change(self.yadjustment.value + self.yadjustment.step)
if rv is not None:
return rv
else:
raise renpy.display.core.IgnoreEvent()
if self.draggable:
if renpy.display.behavior.map_event(ev, 'viewport_drag_start'):
self.drag_position = (x, y)
renpy.display.focus.set_grab(self)
raise renpy.display.core.IgnoreEvent()
if self.edge_size and ev.type in [ pygame.MOUSEMOTION, pygame.MOUSEBUTTONDOWN, pygame.MOUSEBUTTONUP ]:
def speed(n, zero, one):
"""
Given a position `n`, computes the speed. The speed is 0.0
when `n` == `zero`, 1.0 when `n` == `one`, and linearly
interpolated when between.
Returns 0.0 when outside the bounds - in either direction.
"""
n = 1.0 * (n - zero) / (one - zero)
if n < 0.0:
return 0.0
if n > 1.0:
return 0.0
return n
xspeed = speed(x, self.width - self.edge_size, self.width)
xspeed -= speed(x, self.edge_size, 0)
self.edge_xspeed = self.edge_speed * self.edge_function(xspeed)
yspeed = speed(y, self.height - self.edge_size, self.height)
yspeed -= speed(y, self.edge_size, 0)
self.edge_yspeed = self.edge_speed * self.edge_function(yspeed)
if xspeed or yspeed:
self.check_edge_redraw(st)
else:
self.edge_last_st = None
return None
def set_xoffset(self, offset):
self.xoffset = offset
renpy.display.render.redraw(self, 0)
def set_yoffset(self, offset):
self.yoffset = offset
renpy.display.render.redraw(self, 0)
def LiveCrop(rect, child, **properties):
"""
:doc: disp_imagelike
This created a displayable by cropping `child` to `rect`, where
`rect` is an (x, y, width, height) tuple. ::
image eileen cropped = LiveCrop((0, 0, 300, 300), "eileen happy")
"""
return renpy.display.motion.Transform(child, crop=rect, **properties)
class Side(Container):
possible_positions = set([ 'tl', 't', 'tr', 'r', 'br', 'b', 'bl', 'l', 'c'])
def after_setstate(self):
self.sized = False
def __init__(self, positions, style='side', **properties):
super(Side, self).__init__(style=style, **properties)
if isinstance(positions, basestring):
positions = positions.split()
seen = set()
for i in positions:
if not i in Side.possible_positions:
raise Exception("Side used with impossible position '%s'." % (i,))
if i in seen:
raise Exception("Side used with duplicate position '%s'." % (i,))
seen.add(i)
self.positions = tuple(positions)
self.sized = False
def add(self, d):
if len(self.children) >= len(self.positions):
raise Exception("Side has been given too many arguments.")
super(Side, self).add(d)
def _clear(self):
super(Side, self)._clear()
self.sized = False
def render(self, width, height, st, at):
pos_d = { }
pos_i = { }
for i, (pos, d) in enumerate(zip(self.positions, self.children)):
pos_d[pos] = d
pos_i[pos] = i
# Figure out the size of each widget (and hence where the
# widget needs to be placed).
if not self.sized:
self.sized = True
# Deal with various spacings.
spacing = self.style.spacing
def spacer(a, b, c, axis):
if (a in pos_d) or (b in pos_d) or (c in pos_d):
return spacing, axis - spacing
else:
return 0, axis
self.left_space, width = spacer('tl', 'l', 'bl', width) # W0201
self.right_space, width = spacer('tr', 'r', 'br', width) # W0201
self.top_space, height = spacer('tl', 't', 'tr', height) # W0201
self.bottom_space, height = spacer('bl', 'b', 'br', height) # W0201
# The sizes of the various borders.
left = 0
right = 0
top = 0
bottom = 0
cwidth = 0
cheight = 0
def sizeit(pos, width, height, owidth, oheight):
if pos not in pos_d:
return owidth, oheight
rend = render(pos_d[pos], width, height, st, at)
rv = max(owidth, rend.width), max(oheight, rend.height)
rend.kill()
return rv
cwidth, cheight = sizeit('c', width, height, 0, 0)
cwidth, top = sizeit('t', cwidth, height, cwidth, top)
cwidth, bottom = sizeit('b', cwidth, height, cwidth, bottom)
left, cheight = sizeit('l', width, cheight, left, cheight)
right, cheight = sizeit('r', width, cheight, right, cheight)
left, top = sizeit('tl', left, top, left, top)
left, bottom = sizeit('bl', left, bottom, left, bottom)
right, top = sizeit('tr', right, top, right, top)
right, bottom = sizeit('br', right, bottom, right, bottom)
self.cwidth = cwidth # W0201
self.cheight = cheight # W0201
self.top = top # W0201
self.bottom = bottom # W0201
self.left = left # W0201
self.right = right # W0201
else:
cwidth = self.cwidth
cheight = self.cheight
top = self.top
bottom = self.bottom
left = self.left
right = self.right
# Now, place everything onto the render.
self.offsets = [ None ] * len(self.children)
lefts = self.left_space
rights = self.right_space
tops = self.top_space
bottoms = self.bottom_space
cwidth = min(cwidth, width - left - lefts - right - rights)
cheight = min(cheight, height - top - tops - bottom - bottoms)
rv = renpy.display.render.Render(left + lefts + cwidth + rights + right,
top + tops + cheight + bottoms + bottom)
def place(pos, x, y, w, h):
if pos not in pos_d:
return
d = pos_d[pos]
i = pos_i[pos]
rend = render(d, w, h, st, at)
self.offsets[i] = pos_d[pos].place(rv, x, y, w, h, rend)
col1 = 0
col2 = left + lefts
col3 = left + lefts + cwidth + rights
row1 = 0
row2 = top + tops
row3 = top + tops + cheight + bottoms
place('c', col2, row2, cwidth, cheight)
place('t', col2, row1, cwidth, top)
place('r', col3, row2, right, cheight)
place('b', col2, row3, cwidth, bottom)
place('l', col1, row2, left, cheight)
place('tl', col1, row1, left, top)
place('tr', col3, row1, right, top)
place('br', col3, row3, right, bottom)
place('bl', col1, row3, left, bottom)
return rv
class Alpha(renpy.display.core.Displayable):
def __init__(self, start, end, time, child=None, repeat=False, bounce=False,
anim_timebase=False, time_warp=None, **properties):
super(Alpha, self).__init__(**properties)
self.start = start
self.end = end
self.time = time
self.child = renpy.easy.displayable(child)
self.repeat = repeat
self.anim_timebase = anim_timebase
self.time_warp = time_warp
def visit(self):
return [ self.child ]
def render(self, height, width, st, at):
if self.anim_timebase:
t = at
else:
t = st
if self.time:
done = min(t / self.time, 1.0)
else:
done = 1.0
if renpy.game.less_updates:
done = 1.0
elif self.repeat:
done = done % 1.0
renpy.display.render.redraw(self, 0)
elif done != 1.0:
renpy.display.render.redraw(self, 0)
if self.time_warp:
done = self.time_warp(done)
alpha = self.start + done * (self.end - self.start)
rend = renpy.display.render.render(self.child, height, width, st, at)
w, h = rend.get_size()
rv = renpy.display.render.Render(w, h)
rv.blit(rend, (0, 0))
rv.alpha = alpha
return rv
class AdjustTimes(Container):
def __init__(self, child, start_time, anim_time, **properties):
super(AdjustTimes, self).__init__(**properties)
self.start_time = start_time
self.anim_time = anim_time
self.add(child)
def render(self, w, h, st, at):
if self.start_time is None:
self.start_time = renpy.game.interface.frame_time
if self.anim_time is None:
self.anim_time = renpy.game.interface.frame_time
st = renpy.game.interface.frame_time - self.start_time
at = renpy.game.interface.frame_time - self.anim_time
cr = renpy.display.render.render(self.child, w, h, st, at)
cw, ch = cr.get_size()
rv = renpy.display.render.Render(cw, ch)
rv.blit(cr, (0, 0))
self.offsets = [ (0, 0) ]
return rv
def get_placement(self):
return self.child.get_placement()
class LiveTile(Container):
"""
:doc: disp_imagelike
Tiles `child` until it fills the area allocated to this displayable.
::
image bg tile = LiveTile("bg.png")
"""
def __init__(self, child, style='tile', **properties):
super(LiveTile, self).__init__(style=style, **properties)
self.add(child)
def render(self, width, height, st, at):
cr = renpy.display.render.render(self.child, width, height, st, at)
cw, ch = cr.get_size()
rv = renpy.display.render.Render(width, height)
width = int(width)
height = int(height)
cw = int(cw)
ch = int(ch)
for y in range(0, height, ch):
for x in range(0, width, cw):
rv.blit(cr, (x, y), focus=False)
return rv
class Flatten(Container):
"""
:doc: disp_imagelike
This flattens `child`, which may be made up of multiple textures, into
a single texture.
Certain operations, like the alpha transform property, apply to every
texture making up a displayable, which can yield incorrect results
when the textures overlap on screen. Flatten creates a single texture
from multiple textures, which can prevent this problem.
Flatten is a relatively expensive operation, and so should only be used
when absolutely required.
"""
def __init__(self, child, **properties):
super(Flatten, self).__init__(**properties)
self.add(child)
def render(self, width, height, st, at):
cr = renpy.display.render.render(self.child, width, height, st, at)
cw, ch = cr.get_size()
tex = cr.render_to_texture(True)
rv = renpy.display.render.Render(cw, ch)
rv.blit(tex, (0, 0))
rv.depends_on(cr, focus=True)
return rv
|
joxer/Baka-No-Voltron
|
tmp/android.dist/private/renpy/display/layout.py
|
Python
|
gpl-2.0
| 55,212
|
[
"VisIt"
] |
c0455997ae265fddfdbe2a195b727d4a98a16e1a99dfa2cfc33c06b51e4f3aeb
|
# $Id$
#
# Copyright (C) 2001-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" This has all be re-implemented in the C++ code
"""
from __future__ import print_function
import math
class BitVect:
def __init__(self,nBits):
self.nBits = nBits
self.bits = [0]*nBits
def NumOnBits(self):
return len(self.GetOnBits())
def GetOnBits(self,sort=1,reverse=0):
l = [idx for idx in xrange(self.nBits) if self.bits[idx] == 1]
if reverse:
l.reverse()
return l
def TanimotoSimilarity(self,other):
if not isinstance(other,BitVect):
raise TypeError("Tanimoto similarities can only be calculated between two BitVects")
if len(self)!=len(other):
raise ValueError("BitVects must be the same length")
bc = len(self & other)
b1 = self.NumOnBits()
b2 = other.NumOnBits()
return float(bc) / float(b1 + b2 - bc)
TanimotoSimilarity = TanimotoSimilarity
def EuclideanDistance(self,other):
if not isinstance(other,BitVect):
raise TypeError("Tanimoto similarities can only be calculated between two BitVects")
bt = len(self)
bi = len(self ^ (~ other))
return math.sqrt(bt-bi)/bt
def __getitem__(self,which):
if which >= self.nBits or which < 0:
raise ValueError('bad index')
return self.bits[which]
def __setitem__(self,which,val):
if which >= self.nBits or which < 0:
raise ValueError('bad index')
if val not in [0,1]:
raise ValueError('val must be 0 or 1')
self.bits[which] = val
def __len__(self):
return self.nBits
def __and__(self,other):
if not isinstance(other,BitVect):
raise TypeError("BitVects can only be &'ed with other BitVects")
if len(self) != len(other):
raise ValueError("BitVects must be of the same length")
l1 = self.GetOnBits()
l2 = other.GetOnBits()
r = [bit for bit in l1 if bit in l2]
return r
def __or__(self,other):
if not isinstance(other,BitVect):
raise TypeError("BitVects can only be |'ed with other BitVects")
if len(self) != len(other):
raise ValueError("BitVects must be of the same length")
l1 = self.GetOnBits()
l2 = other.GetOnBits()
r = l1 + [bit for bit in l2 if bit not in l1]
r.sort()
return r
def __xor__(self,other):
if not isinstance(other,BitVect):
raise TypeError("BitVects can only be ^'ed with other BitVects")
if len(self) != len(other):
raise ValueError("BitVects must be of the same length")
l1 = self.GetOnBits()
l2 = other.GetOnBits()
r = [bit for bit in l1 if bit not in l2] + [bit for bit in l2 if bit not in l1]
r.sort()
return r
def __invert__(self):
res = BitVect(len(self))
for i in xrange(len(self)):
res[i] = not self[i]
return res
class SparseBitVect(BitVect):
def __init__(self,nBits):
self.nBits = nBits
self.bits = []
def NumOnBits(self):
return len(self.bits)
def GetOnBits(self,sort=1,reverse=0):
l = self.bits[:]
if sort:
l.sort()
if reverse:
l.reverse()
return l
def __getitem__(self,which):
if which >= self.nBits or which < 0:
raise ValueError('bad index')
if which in self.bits:
return 1
else:
return 0
def __setitem__(self,which,val):
if which >= self.nBits or which < 0:
raise ValueError('bad index')
if val == 0:
if which in self.bits:
self.bits.remove(which)
else:
self.bits.append(which)
def __len__(self):
return self.nBits
if __name__ == '__main__':
b1 = BitVect(10)
b2 = SparseBitVect(10)
b1[0] = 1
b2[0] = 1
b1[3] = 1
b2[4] = 1
b2[5] = 1
b2[5] = 0
print('b1:',b1.GetOnBits())
print('b2:',b2.GetOnBits())
print('&:', b1 & b2)
print('|:', b1 | b2)
print('^:', b1 ^ b2)
print('b1.Tanimoto(b2):',b1.TanimotoSimilarity(b2))
print('b1.Tanimoto(b1):',b1.TanimotoSimilarity(b1))
print('b2.Tanimoto(b2):',b2.TanimotoSimilarity(b2))
print('b2.Tanimoto(b1):',b2.TanimotoSimilarity(b1))
|
soerendip42/rdkit
|
rdkit/DataStructs/BitVect.py
|
Python
|
bsd-3-clause
| 4,250
|
[
"RDKit"
] |
00a37da6799eb40a01e7656ee161c66e1cdb83e22c80c264ba33dd2cdfedc93e
|
"""Support the ElkM1 Gold and ElkM1 EZ8 alarm/integration panels."""
import asyncio
import logging
import re
import async_timeout
import elkm1_lib as elkm1
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_EXCLUDE,
CONF_HOST,
CONF_INCLUDE,
CONF_PASSWORD,
CONF_PREFIX,
CONF_TEMPERATURE_UNIT,
CONF_USERNAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady, HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType
import homeassistant.util.dt as dt_util
from .const import (
ATTR_KEY,
ATTR_KEY_NAME,
ATTR_KEYPAD_ID,
BARE_TEMP_CELSIUS,
BARE_TEMP_FAHRENHEIT,
CONF_AREA,
CONF_AUTO_CONFIGURE,
CONF_COUNTER,
CONF_ENABLED,
CONF_KEYPAD,
CONF_OUTPUT,
CONF_PLC,
CONF_SETTING,
CONF_TASK,
CONF_THERMOSTAT,
CONF_ZONE,
DOMAIN,
ELK_ELEMENTS,
EVENT_ELKM1_KEYPAD_KEY_PRESSED,
)
SYNC_TIMEOUT = 120
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [
"alarm_control_panel",
"climate",
"light",
"scene",
"sensor",
"switch",
]
SPEAK_SERVICE_SCHEMA = vol.Schema(
{
vol.Required("number"): vol.All(vol.Coerce(int), vol.Range(min=0, max=999)),
vol.Optional("prefix", default=""): cv.string,
}
)
SET_TIME_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional("prefix", default=""): cv.string,
}
)
def _host_validator(config):
"""Validate that a host is properly configured."""
if config[CONF_HOST].startswith("elks://"):
if CONF_USERNAME not in config or CONF_PASSWORD not in config:
raise vol.Invalid("Specify username and password for elks://")
elif not config[CONF_HOST].startswith("elk://") and not config[
CONF_HOST
].startswith("serial://"):
raise vol.Invalid("Invalid host URL")
return config
def _elk_range_validator(rng):
def _housecode_to_int(val):
match = re.search(r"^([a-p])(0[1-9]|1[0-6]|[1-9])$", val.lower())
if match:
return (ord(match.group(1)) - ord("a")) * 16 + int(match.group(2))
raise vol.Invalid("Invalid range")
def _elk_value(val):
return int(val) if val.isdigit() else _housecode_to_int(val)
vals = [s.strip() for s in str(rng).split("-")]
start = _elk_value(vals[0])
end = start if len(vals) == 1 else _elk_value(vals[1])
return (start, end)
def _has_all_unique_prefixes(value):
"""Validate that each m1 configured has a unique prefix.
Uniqueness is determined case-independently.
"""
prefixes = [device[CONF_PREFIX] for device in value]
schema = vol.Schema(vol.Unique())
schema(prefixes)
return value
DEVICE_SCHEMA_SUBDOMAIN = vol.Schema(
{
vol.Optional(CONF_ENABLED, default=True): cv.boolean,
vol.Optional(CONF_INCLUDE, default=[]): [_elk_range_validator],
vol.Optional(CONF_EXCLUDE, default=[]): [_elk_range_validator],
}
)
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PREFIX, default=""): vol.All(cv.string, vol.Lower),
vol.Optional(CONF_USERNAME, default=""): cv.string,
vol.Optional(CONF_PASSWORD, default=""): cv.string,
vol.Optional(CONF_AUTO_CONFIGURE, default=False): cv.boolean,
# cv.temperature_unit will mutate 'C' -> '°C' and 'F' -> '°F'
vol.Optional(
CONF_TEMPERATURE_UNIT, default=BARE_TEMP_FAHRENHEIT
): cv.temperature_unit,
vol.Optional(CONF_AREA, default={}): DEVICE_SCHEMA_SUBDOMAIN,
vol.Optional(CONF_COUNTER, default={}): DEVICE_SCHEMA_SUBDOMAIN,
vol.Optional(CONF_KEYPAD, default={}): DEVICE_SCHEMA_SUBDOMAIN,
vol.Optional(CONF_OUTPUT, default={}): DEVICE_SCHEMA_SUBDOMAIN,
vol.Optional(CONF_PLC, default={}): DEVICE_SCHEMA_SUBDOMAIN,
vol.Optional(CONF_SETTING, default={}): DEVICE_SCHEMA_SUBDOMAIN,
vol.Optional(CONF_TASK, default={}): DEVICE_SCHEMA_SUBDOMAIN,
vol.Optional(CONF_THERMOSTAT, default={}): DEVICE_SCHEMA_SUBDOMAIN,
vol.Optional(CONF_ZONE, default={}): DEVICE_SCHEMA_SUBDOMAIN,
},
_host_validator,
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [DEVICE_SCHEMA], _has_all_unique_prefixes)},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, hass_config: ConfigType) -> bool:
"""Set up the Elk M1 platform."""
hass.data.setdefault(DOMAIN, {})
_create_elk_services(hass)
if DOMAIN not in hass_config:
return True
for index, conf in enumerate(hass_config[DOMAIN]):
_LOGGER.debug("Importing elkm1 #%d - %s", index, conf[CONF_HOST])
# The update of the config entry is done in async_setup
# to ensure the entry if updated before async_setup_entry
# is called to avoid a situation where the user has to restart
# twice for the changes to take effect
current_config_entry = _async_find_matching_config_entry(
hass, conf[CONF_PREFIX]
)
if current_config_entry:
# If they alter the yaml config we import the changes
# since there currently is no practical way to do an options flow
# with the large amount of include/exclude/enabled options that elkm1 has.
hass.config_entries.async_update_entry(current_config_entry, data=conf)
continue
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=conf,
)
)
return True
@callback
def _async_find_matching_config_entry(hass, prefix):
for entry in hass.config_entries.async_entries(DOMAIN):
if entry.unique_id == prefix:
return entry
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Elk-M1 Control from a config entry."""
conf = entry.data
_LOGGER.debug("Setting up elkm1 %s", conf["host"])
temperature_unit = TEMP_FAHRENHEIT
if conf[CONF_TEMPERATURE_UNIT] in (BARE_TEMP_CELSIUS, TEMP_CELSIUS):
temperature_unit = TEMP_CELSIUS
config = {"temperature_unit": temperature_unit}
if not conf[CONF_AUTO_CONFIGURE]:
# With elkm1-lib==0.7.16 and later auto configure is available
config["panel"] = {"enabled": True, "included": [True]}
for item, max_ in ELK_ELEMENTS.items():
config[item] = {
"enabled": conf[item][CONF_ENABLED],
"included": [not conf[item]["include"]] * max_,
}
try:
_included(conf[item]["include"], True, config[item]["included"])
_included(conf[item]["exclude"], False, config[item]["included"])
except (ValueError, vol.Invalid) as err:
_LOGGER.error("Config item: %s; %s", item, err)
return False
elk = elkm1.Elk(
{
"url": conf[CONF_HOST],
"userid": conf[CONF_USERNAME],
"password": conf[CONF_PASSWORD],
}
)
elk.connect()
def _element_changed(element, changeset):
keypress = changeset.get("last_keypress")
if keypress is None:
return
hass.bus.async_fire(
EVENT_ELKM1_KEYPAD_KEY_PRESSED,
{
ATTR_KEYPAD_ID: element.index + 1,
ATTR_KEY_NAME: keypress[0],
ATTR_KEY: keypress[1],
},
)
for keypad in elk.keypads: # pylint: disable=no-member
keypad.add_callback(_element_changed)
try:
if not await async_wait_for_elk_to_sync(elk, SYNC_TIMEOUT, conf[CONF_HOST]):
return False
except asyncio.TimeoutError as exc:
raise ConfigEntryNotReady from exc
hass.data[DOMAIN][entry.entry_id] = {
"elk": elk,
"prefix": conf[CONF_PREFIX],
"auto_configure": conf[CONF_AUTO_CONFIGURE],
"config": config,
"keypads": {},
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
def _included(ranges, set_to, values):
for rng in ranges:
if not rng[0] <= rng[1] <= len(values):
raise vol.Invalid(f"Invalid range {rng}")
values[rng[0] - 1 : rng[1]] = [set_to] * (rng[1] - rng[0] + 1)
def _find_elk_by_prefix(hass, prefix):
"""Search all config entries for a given prefix."""
for entry_id in hass.data[DOMAIN]:
if hass.data[DOMAIN][entry_id]["prefix"] == prefix:
return hass.data[DOMAIN][entry_id]["elk"]
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
# disconnect cleanly
hass.data[DOMAIN][entry.entry_id]["elk"].disconnect()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def async_wait_for_elk_to_sync(elk, timeout, conf_host):
"""Wait until the elk has finished sync. Can fail login or timeout."""
def login_status(succeeded):
nonlocal success
success = succeeded
if succeeded:
_LOGGER.debug("ElkM1 login succeeded")
else:
elk.disconnect()
_LOGGER.error("ElkM1 login failed; invalid username or password")
event.set()
def sync_complete():
event.set()
success = True
event = asyncio.Event()
elk.add_handler("login", login_status)
elk.add_handler("sync_complete", sync_complete)
try:
with async_timeout.timeout(timeout):
await event.wait()
except asyncio.TimeoutError:
_LOGGER.error(
"Timed out after %d seconds while trying to sync with ElkM1 at %s",
timeout,
conf_host,
)
elk.disconnect()
raise
return success
def _create_elk_services(hass):
def _getelk(service):
prefix = service.data["prefix"]
elk = _find_elk_by_prefix(hass, prefix)
if elk is None:
raise HomeAssistantError(f"No ElkM1 with prefix '{prefix}' found")
return elk
def _speak_word_service(service):
_getelk(service).panel.speak_word(service.data["number"])
def _speak_phrase_service(service):
_getelk(service).panel.speak_phrase(service.data["number"])
def _set_time_service(service):
_getelk(service).panel.set_time(dt_util.now())
hass.services.async_register(
DOMAIN, "speak_word", _speak_word_service, SPEAK_SERVICE_SCHEMA
)
hass.services.async_register(
DOMAIN, "speak_phrase", _speak_phrase_service, SPEAK_SERVICE_SCHEMA
)
hass.services.async_register(
DOMAIN, "set_time", _set_time_service, SET_TIME_SERVICE_SCHEMA
)
def create_elk_entities(elk_data, elk_elements, element_type, class_, entities):
"""Create the ElkM1 devices of a particular class."""
auto_configure = elk_data["auto_configure"]
if not auto_configure and not elk_data["config"][element_type]["enabled"]:
return
elk = elk_data["elk"]
_LOGGER.debug("Creating elk entities for %s", elk)
for element in elk_elements:
if auto_configure:
if not element.configured:
continue
# Only check the included list if auto configure is not
elif not elk_data["config"][element_type]["included"][element.index]:
continue
entities.append(class_(element, elk, elk_data))
return entities
class ElkEntity(Entity):
"""Base class for all Elk entities."""
def __init__(self, element, elk, elk_data):
"""Initialize the base of all Elk devices."""
self._elk = elk
self._element = element
self._prefix = elk_data["prefix"]
self._temperature_unit = elk_data["config"]["temperature_unit"]
# unique_id starts with elkm1_ iff there is no prefix
# it starts with elkm1m_{prefix} iff there is a prefix
# this is to avoid a conflict between
# prefix=foo, name=bar (which would be elkm1_foo_bar)
# - and -
# prefix="", name="foo bar" (which would be elkm1_foo_bar also)
# we could have used elkm1__foo_bar for the latter, but that
# would have been a breaking change
if self._prefix != "":
uid_start = f"elkm1m_{self._prefix}"
else:
uid_start = "elkm1"
self._unique_id = f"{uid_start}_{self._element.default_name('_')}".lower()
@property
def name(self):
"""Name of the element."""
return f"{self._prefix}{self._element.name}"
@property
def unique_id(self):
"""Return unique id of the element."""
return self._unique_id
@property
def should_poll(self) -> bool:
"""Don't poll this device."""
return False
@property
def extra_state_attributes(self):
"""Return the default attributes of the element."""
return {**self._element.as_dict(), **self.initial_attrs()}
@property
def available(self):
"""Is the entity available to be updated."""
return self._elk.is_connected()
def initial_attrs(self):
"""Return the underlying element's attributes as a dict."""
attrs = {}
attrs["index"] = self._element.index + 1
return attrs
def _element_changed(self, element, changeset):
pass
@callback
def _element_callback(self, element, changeset):
"""Handle callback from an Elk element that has changed."""
self._element_changed(element, changeset)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callback for ElkM1 changes and update entity state."""
self._element.add_callback(self._element_callback)
self._element_callback(self._element, {})
@property
def device_info(self):
"""Device info connecting via the ElkM1 system."""
return {
"via_device": (DOMAIN, f"{self._prefix}_system"),
}
class ElkAttachedEntity(ElkEntity):
"""An elk entity that is attached to the elk system."""
@property
def device_info(self):
"""Device info for the underlying ElkM1 system."""
device_name = "ElkM1"
if self._prefix:
device_name += f" {self._prefix}"
return {
"name": device_name,
"identifiers": {(DOMAIN, f"{self._prefix}_system")},
"sw_version": self._elk.panel.elkm1_version,
"manufacturer": "ELK Products, Inc.",
"model": "M1",
}
|
sander76/home-assistant
|
homeassistant/components/elkm1/__init__.py
|
Python
|
apache-2.0
| 14,937
|
[
"Elk"
] |
df10e1c6c789bbce1b7b4baeaedd2730c8d8830fdc357ca6f72c4adc68113ea9
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# PySCM -- Python Spike Counter Model
# Copyright (C) 2016 Christoph Jenzen, Andreas Stöckel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import sys
import matplotlib.pyplot as plt
import pyscm
import pynam.data as data
import pynam.network as netw
import pynnless as pynl
# check simulator
if len(sys.argv) != 2:
print("Usage: " + sys.argv[0] + " <SIMULATOR>")
sys.exit(1)
# Read in neuron data
with open("data/neuron_data.json", 'r') as outfile:
dict = json.load(outfile)
data_params = dict["data_params"]
params = dict["neuron_params"]
delay = dict["delay"]
terminating_neurons = dict["terminating_neurons"]
flag = dict["Simple_Network"]
# Read in neuron weights, some still have to be set manually
with open("data/optimised_weights.json", 'r') as outfile:
weights = json.load(outfile)
# Generate BiNAM
mat_in = data.generate(data_params["n_bits_in"], data_params["n_ones_in"],
data_params["n_samples"])
mat_out = data.generate(data_params["n_bits_out"], data_params["n_ones_out"],
data_params["n_samples"])
print "Data generated!"
# set up simulator
scm = pyscm.SpikeCounterModel(mat_in, mat_out)
sim = pynl.PyNNLess(sys.argv[1])
net, input_indices, _, input_times = scm.build(params=params, weights=weights,
delay=delay,
terminating_neurons=terminating_neurons,
flag=flag)
print "Preparations done"
# Simulation
res = sim.run(net)
print "Simulation done"
# Plot
for pIdx, pop in enumerate(res):
if (not "spikes" in pop):
continue
output_times = pop["spikes"]
fig = plt.figure()
ax = fig.gca()
for i, spikes in enumerate(output_times):
ax.plot(spikes, [i + 1] * len(spikes), '.', color=[0, 0, 0])
ax.set_xlabel("Spike time [ms]")
ax.set_ylabel("Neuron index")
ax.set_title("Population " + str(pIdx))
# plt.show()
# Analyse
output_times, output_indices = netw.NetworkInstance.match_static(input_times,
input_indices,
res[0][
"spikes"])
analysis = netw.NetworkAnalysis(input_times=input_times,
input_indices=input_indices,
output_times=output_times,
output_indices=output_indices,
data_params=data_params,
mat_in=mat_in, mat_out=mat_out)
I, I_norm, fp, fn, I_start, I_norm_start, fp_start, fn_start = pyscm.scm_analysis(
analysis, res[2]["spikes"], delay, flag)
|
hbp-sanncs/pyscm
|
run.py
|
Python
|
gpl-3.0
| 3,469
|
[
"NEURON"
] |
de9521acb7eb19d115530c24c72b7e9da4fd5c72cf1d6c818fe10e54e30c203c
|
from keys import *
from simulation_params import *
import nest
import numpy.random as random
# Neuron parameters
hh_neuronparams = {'E_L': -70., # Resting membrane potential in mV
'V_T': -63., # Voltage offset that controls dynamics.
# -63mV results in a threshold around -50mV.
'C_m': 2., # Capacity of the membrane in pF 1
't_ref': 2., # Duration of refractory period (V_m = V_reset) in ms
'tau_syn_ex': 5., # Time constant of postsynaptic excitatory currents in ms
'tau_syn_in': 10. # Time constant of postsynaptic inhibitory currents in ms
}
# Synapse common parameters
STDP_synapseparams = {
'alpha': random.normal(0.5, 5.0), # Asymmetry parameter (scales depressing increments as alpha*lambda)
'lambda': 0.5 # Step size
}
# Glutamate synapse
STDP_synparams_Glu = dict({'delay': random.uniform(low=1.0, high=1.3), # Distribution of delay values for connections
'weight': w_Glu, # Weight (power) of synapse
'Wmax': 20.}, **STDP_synapseparams) # Maximum allowed weight
# GABA synapse
STDP_synparams_GABA = dict({'delay': random.uniform(low=1.0, high=1.3),
'weight': w_GABA,
'Wmax': -20.}, **STDP_synapseparams)
# Acetylcholine synapse
STDP_synparams_ACh = dict({'delay': random.uniform(low=1.0, high=1.3),
'weight': w_ACh,
'Wmax': 20.}, **STDP_synapseparams)
# Noradrenaline excitatory synapse
NORA_synparams_ex = dict({'delay': 1.,
'weight': w_NA_ex,
'Wmax': 100.})
# Noradrenaline inhibitory synapse
NORA_synparams_in = dict({'delay': 1.,
'weight': w_NA_in,
'Wmax': -100.})
# Dopamine excitatory synapse
DOPA_synparams_ex = dict({'delay': 1.,
'weight': w_DA_ex,
'Wmax': 100.})
# Dopamine inhibitory synapse
DOPA_synparams_in = dict({'delay': 1.,
'weight': w_DA_in,
'Wmax': -100.})
# Dictionary of synapses with keys and their parameters
synapses = {GABA: (gaba_synapse, w_GABA ),
Glu: (glu_synapse, w_Glu ),
ACh: (ach_synapse, w_ACh ),
NA_ex: (nora_synapse_ex, w_NA_ex),
NA_in: (nora_synapse_in, w_NA_in),
DA_ex: (dopa_synapse_ex, w_DA_ex),
DA_in: (dopa_synapse_in, w_DA_in)
}
# Parameters for generator
static_syn = {
'weight': w_Glu * 5,
'delay': pg_delay
}
# Device parameters
multimeter_param = {'to_memory': True,
'to_file': False,
'withtime': True,
'interval': 0.1,
'record_from': ['V_m'],
'withgid': True}
detector_param = {'label': 'spikes',
'withtime': True,
'withgid': True,
'to_file': False,
'to_memory': True,
'scientific': True}
|
research-team/NEUCOGAR
|
NEST/cube/noradrenaline/scripts-2/synapses.py
|
Python
|
gpl-2.0
| 3,267
|
[
"NEURON"
] |
d00e7e349223e5a9f5e19c1e918f1023db7b184610381db1fdff043bed35eeb2
|
from __future__ import annotations
import argparse
import sys
from typing import Iterable
from typing import Sequence
from tokenize_rt import src_to_tokens
from tokenize_rt import Token
from tokenize_rt import tokens_to_src
from add_trailing_comma._ast_helpers import ast_parse
from add_trailing_comma._data import FUNCS
from add_trailing_comma._data import visit
from add_trailing_comma._token_helpers import find_simple
from add_trailing_comma._token_helpers import fix_brace
from add_trailing_comma._token_helpers import START_BRACES
def _changing_list(lst: list[Token]) -> Iterable[tuple[int, Token]]:
i = 0
while i < len(lst):
yield i, lst[i]
i += 1
def _fix_src(contents_text: str, min_version: tuple[int, ...]) -> str:
try:
ast_obj = ast_parse(contents_text)
except SyntaxError:
return contents_text
callbacks = visit(FUNCS, ast_obj, min_version)
tokens = src_to_tokens(contents_text)
for i, token in _changing_list(tokens):
# DEDENT is a zero length token
if not token.src:
continue
# though this is a defaultdict, by using `.get()` this function's
# self time is almost 50% faster
for callback in callbacks.get(token.offset, ()):
callback(i, tokens)
if token.src in START_BRACES:
fix_brace(
tokens, find_simple(i, tokens),
add_comma=False,
remove_comma=False,
)
return tokens_to_src(tokens)
def fix_file(filename: str, args: argparse.Namespace) -> int:
if filename == '-':
contents_bytes = sys.stdin.buffer.read()
else:
with open(filename, 'rb') as fb:
contents_bytes = fb.read()
try:
contents_text_orig = contents_text = contents_bytes.decode()
except UnicodeDecodeError:
msg = f'{filename} is non-utf-8 (not supported)'
print(msg, file=sys.stderr)
return 1
contents_text = _fix_src(contents_text, args.min_version)
if filename == '-':
print(contents_text, end='')
elif contents_text != contents_text_orig:
print(f'Rewriting {filename}', file=sys.stderr)
with open(filename, 'wb') as f:
f.write(contents_text.encode())
if args.exit_zero_even_if_changed:
return 0
else:
return contents_text != contents_text_orig
def main(argv: Sequence[str] | None = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
parser.add_argument('--exit-zero-even-if-changed', action='store_true')
parser.add_argument(
'--py35-plus',
action='store_const', dest='min_version', const=(3, 5), default=(2, 7),
)
parser.add_argument(
'--py36-plus',
action='store_const', dest='min_version', const=(3, 6),
)
args = parser.parse_args(argv)
ret = 0
for filename in args.filenames:
ret |= fix_file(filename, args)
return ret
if __name__ == '__main__':
raise SystemExit(main())
|
asottile/add-trailing-comma
|
add_trailing_comma/_main.py
|
Python
|
mit
| 3,062
|
[
"VisIt"
] |
fde25e78fe623625d25936a3d1fc3c2d3be3cd58ee9fb718eb5dae9e7bf43f77
|
# Copyright 1999 by Jeffrey Chang. All rights reserved.
# Copyright 2000 by Jeffrey Chang. All rights reserved.
# Revisions Copyright 2007 by Peter Cock. All rights reserved.
# Revisions Copyright 2009 by Michiel de Hoon. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""
This module provides code to work with the prosite dat file from
Prosite.
http://www.expasy.ch/prosite/
Tested with:
Release 20.43, 10-Feb-2009
Functions:
- read Reads a Prosite file containing one Prosite record
- parse Iterates over records in a Prosite file.
Classes:
- Record Holds Prosite data.
"""
__docformat__ = "restructuredtext en"
def parse(handle):
"""Parse Prosite records.
This function is for parsing Prosite files containing multiple
records.
handle - handle to the file."""
while True:
record = __read(handle)
if not record:
break
yield record
def read(handle):
"""Read one Prosite record.
This function is for parsing Prosite files containing
exactly one record.
handle - handle to the file."""
record = __read(handle)
# We should have reached the end of the record by now
remainder = handle.read()
if remainder:
raise ValueError("More than one Prosite record found")
return record
class Record(object):
"""Holds information from a Prosite record.
Members:
- name ID of the record. e.g. ADH_ZINC
- type Type of entry. e.g. PATTERN, MATRIX, or RULE
- accession e.g. PS00387
- created Date the entry was created. (MMM-YYYY)
- data_update Date the 'primary' data was last updated.
- info_update Date data other than 'primary' data was last updated.
- pdoc ID of the PROSITE DOCumentation.
- description Free-format description.
- pattern The PROSITE pattern. See docs.
- matrix List of strings that describes a matrix entry.
- rules List of rule definitions (from RU lines). (strings)
- prorules List of prorules (from PR lines). (strings)
NUMERICAL RESULTS
- nr_sp_release SwissProt release.
- nr_sp_seqs Number of seqs in that release of Swiss-Prot. (int)
- nr_total Number of hits in Swiss-Prot. tuple of (hits, seqs)
- nr_positive True positives. tuple of (hits, seqs)
- nr_unknown Could be positives. tuple of (hits, seqs)
- nr_false_pos False positives. tuple of (hits, seqs)
- nr_false_neg False negatives. (int)
- nr_partial False negatives, because they are fragments. (int)
COMMENTS
- cc_taxo_range Taxonomic range. See docs for format
- cc_max_repeat Maximum number of repetitions in a protein
- cc_site Interesting site. list of tuples (pattern pos, desc.)
- cc_skip_flag Can this entry be ignored?
- cc_matrix_type
- cc_scaling_db
- cc_author
- cc_ft_key
- cc_ft_desc
- cc_version version number (introduced in release 19.0)
The following are all lists if tuples (swiss-prot accession, swiss-prot name).
DATA BANK REFERENCES
- dr_positive
- dr_false_neg
- dr_false_pos
- dr_potential Potential hits, but fingerprint region not yet available.
- dr_unknown Could possibly belong
- pdb_structs List of PDB entries.
"""
def __init__(self):
self.name = ''
self.type = ''
self.accession = ''
self.created = ''
self.data_update = ''
self.info_update = ''
self.pdoc = ''
self.description = ''
self.pattern = ''
self.matrix = []
self.rules = []
self.prorules = []
self.postprocessing = []
self.nr_sp_release = ''
self.nr_sp_seqs = ''
self.nr_total = (None, None)
self.nr_positive = (None, None)
self.nr_unknown = (None, None)
self.nr_false_pos = (None, None)
self.nr_false_neg = None
self.nr_partial = None
self.cc_taxo_range = ''
self.cc_max_repeat = ''
self.cc_site = []
self.cc_skip_flag = ''
self.dr_positive = []
self.dr_false_neg = []
self.dr_false_pos = []
self.dr_potential = []
self.dr_unknown = []
self.pdb_structs = []
# Everything below are private functions
def __read(handle):
import re
record = None
for line in handle:
keyword, value = line[:2], line[5:].rstrip()
if keyword == 'ID':
record = Record()
cols = value.split("; ")
if len(cols) != 2:
raise ValueError("I don't understand identification line\n%s"
% line)
record.name = cols[0]
record.type = cols[1].rstrip('.') # don't want '.'
elif keyword == 'AC':
record.accession = value.rstrip(';')
elif keyword == 'DT':
dates = value.rstrip('.').split("; ")
if (not dates[0].endswith('(CREATED)')) or \
(not dates[1].endswith('(DATA UPDATE)')) or \
(not dates[2].endswith('(INFO UPDATE)')):
raise ValueError("I don't understand date line\n%s" % line)
record.created = dates[0].rstrip(' (CREATED)')
record.data_update = dates[1].rstrip(' (DATA UPDATE)')
record.info_update = dates[2].rstrip(' (INFO UPDATE)')
elif keyword == 'DE':
record.description = value
elif keyword == 'PA':
record.pattern += value
elif keyword == 'MA':
record.matrix.append(value)
elif keyword == 'PP':
record.postprocessing.extend(value.split(";"))
elif keyword == 'RU':
record.rules.append(value)
elif keyword == 'NR':
cols = value.split(";")
for col in cols:
if not col:
continue
qual, data = [word.lstrip() for word in col.split("=")]
if qual == '/RELEASE':
release, seqs = data.split(",")
record.nr_sp_release = release
record.nr_sp_seqs = int(seqs)
elif qual == '/FALSE_NEG':
record.nr_false_neg = int(data)
elif qual == '/PARTIAL':
record.nr_partial = int(data)
elif qual in ['/TOTAL', '/POSITIVE', '/UNKNOWN', '/FALSE_POS']:
m = re.match(r'(\d+)\((\d+)\)', data)
if not m:
raise Exception("Broken data %s in comment line\n%s"
% (repr(data), line))
hits = tuple(map(int, m.groups()))
if(qual == "/TOTAL"):
record.nr_total = hits
elif(qual == "/POSITIVE"):
record.nr_positive = hits
elif(qual == "/UNKNOWN"):
record.nr_unknown = hits
elif(qual == "/FALSE_POS"):
record.nr_false_pos = hits
else:
raise ValueError("Unknown qual %s in comment line\n%s"
% (repr(qual), line))
elif keyword == 'CC':
# Expect CC lines like this:
# CC /TAXO-RANGE=??EPV; /MAX-REPEAT=2;
# Can (normally) split on ";" and then on "="
cols = value.split(";")
for col in cols:
if not col or col[:17] == 'Automatic scaling':
# DNAJ_2 in Release 15 has a non-standard comment line:
# CC Automatic scaling using reversed database
# Throw it away. (Should I keep it?)
continue
if col.count("=") == 0:
# Missing qualifier! Can we recover gracefully?
# For example, from Bug 2403, in PS50293 have:
# CC /AUTHOR=K_Hofmann; N_Hulo
continue
qual, data = [word.lstrip() for word in col.split("=")]
if qual == '/TAXO-RANGE':
record.cc_taxo_range = data
elif qual == '/MAX-REPEAT':
record.cc_max_repeat = data
elif qual == '/SITE':
pos, desc = data.split(",")
record.cc_site.append((int(pos), desc))
elif qual == '/SKIP-FLAG':
record.cc_skip_flag = data
elif qual == '/MATRIX_TYPE':
record.cc_matrix_type = data
elif qual == '/SCALING_DB':
record.cc_scaling_db = data
elif qual == '/AUTHOR':
record.cc_author = data
elif qual == '/FT_KEY':
record.cc_ft_key = data
elif qual == '/FT_DESC':
record.cc_ft_desc = data
elif qual == '/VERSION':
record.cc_version = data
else:
raise ValueError("Unknown qual %s in comment line\n%s"
% (repr(qual), line))
elif keyword == 'DR':
refs = value.split(";")
for ref in refs:
if not ref:
continue
acc, name, type = [word.strip() for word in ref.split(",")]
if type == 'T':
record.dr_positive.append((acc, name))
elif type == 'F':
record.dr_false_pos.append((acc, name))
elif type == 'N':
record.dr_false_neg.append((acc, name))
elif type == 'P':
record.dr_potential.append((acc, name))
elif type == '?':
record.dr_unknown.append((acc, name))
else:
raise ValueError("I don't understand type flag %s" % type)
elif keyword == '3D':
cols = value.split()
for id in cols:
record.pdb_structs.append(id.rstrip(';'))
elif keyword == 'PR':
rules = value.split(";")
record.prorules.extend(rules)
elif keyword == 'DO':
record.pdoc = value.rstrip(';')
elif keyword == 'CC':
continue
elif keyword == '//':
if not record:
# Then this was the copyright statement
continue
break
else:
raise ValueError("Unknown keyword %s found" % keyword)
else:
return
if not record:
raise ValueError("Unexpected end of stream.")
return record
|
poojavade/Genomics_Docker
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/ExPASy/Prosite.py
|
Python
|
apache-2.0
| 11,153
|
[
"Biopython"
] |
9ca026de31634278b42386d4682ef6bce229147c61b664dab5dddc149ad96e67
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import glob
import os.path
from spack import *
class Espresso(Package):
"""Quantum-ESPRESSO is an integrated suite of Open-Source computer codes
for electronic-structure calculations and materials modeling at the
nanoscale. It is based on density-functional theory, plane waves, and
pseudopotentials.
"""
homepage = 'http://quantum-espresso.org'
url = 'http://www.qe-forge.org/gf/download/frsrelease/204/912/espresso-5.3.0.tar.gz'
version(
'6.1.0',
'db398edcad76e085f8c8a3f6ecb7aaab',
url='http://www.qe-forge.org/gf/download/frsrelease/240/1075/qe-6.1.tar.gz'
)
version(
'5.4.0',
'8bb78181b39bd084ae5cb7a512c1cfe7',
url='http://www.qe-forge.org/gf/download/frsrelease/211/968/espresso-5.4.0.tar.gz'
)
version('5.3.0', '6848fcfaeb118587d6be36bd10b7f2c3')
variant('mpi', default=True, description='Builds with mpi support')
variant('openmp', default=False, description='Enables openMP support')
variant('scalapack', default=True, description='Enables scalapack support')
variant('elpa', default=True, description='Uses elpa as an eigenvalue solver')
# Support for HDF5 has been added starting in version 6.1.0 and is
# still experimental, therefore we default to False for the variant
variant('hdf5', default=False, description='Builds with HDF5 support')
depends_on('blas')
depends_on('lapack')
depends_on('mpi', when='+mpi')
depends_on('scalapack', when='+scalapack+mpi')
depends_on('fftw+mpi', when='+mpi')
depends_on('fftw~mpi', when='~mpi')
depends_on('elpa+openmp', when='+elpa+openmp')
depends_on('elpa~openmp', when='+elpa~openmp')
depends_on('hdf5', when='+hdf5')
patch('dspev_drv_elpa.patch', when='@6.1 ^elpa@2016.05.004')
patch('dspev_drv_elpa.patch', when='@6.1 ^elpa@2016.05.003')
# We can't ask for scalapack or elpa if we don't want MPI
conflicts(
'+scalapack',
when='~mpi',
msg='scalapack is a parallel library and needs MPI support'
)
conflicts(
'+elpa',
when='~mpi',
msg='elpa is a parallel library and needs MPI support'
)
# Elpa is formally supported by @:5.4.0, but QE configure searches
# for it in the wrong folders (or tries to download it within
# the build directory). Instead of patching Elpa to provide the
# folder QE expects as a link, we issue a conflict here.
conflicts('+elpa', when='@:5.4.0')
conflicts('+hdf5', when='@:5.4.0')
# Spurious problems running in parallel the Makefile
# generated by the configure
parallel = False
def install(self, spec, prefix):
prefix_path = prefix.bin if '@:5.4.0' in spec else prefix
options = ['-prefix={0}'.format(prefix_path)]
if '+mpi' in spec:
options.append('--enable-parallel=yes')
else:
options.append('--enable-parallel=no')
if '+openmp' in spec:
options.append('--enable-openmp')
if '+scalapack' in spec:
scalapack_option = 'intel' if '^intel-mkl' in spec else 'yes'
options.append('--with-scalapack={0}'.format(scalapack_option))
if '+elpa' in spec:
# Spec for elpa
elpa = spec['elpa']
# Find where the Fortran module resides
elpa_module = find(elpa.prefix, 'elpa.mod')
# Compute the include directory from there: versions
# of espresso prior to 6.1 requires -I in front of the directory
elpa_include = '' if '@6.1:' in spec else '-I'
elpa_include += os.path.dirname(elpa_module[0])
options.extend([
'--with-elpa-include={0}'.format(elpa_include),
'--with-elpa-lib={0}'.format(elpa.libs[0])
])
if '+hdf5' in spec:
options.append('--with-hdf5={0}'.format(spec['hdf5'].prefix))
# Add a list of directories to search
search_list = []
for dependency_spec in spec.dependencies():
search_list.extend([
dependency_spec.prefix.lib,
dependency_spec.prefix.lib64
])
search_list = " ".join(search_list)
options.extend([
'LIBDIRS={0}'.format(search_list),
'F90={0}'.format(env['SPACK_FC']),
'CC={0}'.format(env['SPACK_CC'])
])
configure(*options)
make('all')
if 'platform=darwin' in spec:
mkdirp(prefix.bin)
for filename in glob.glob("bin/*.x"):
install(filename, prefix.bin)
else:
make('install')
|
wscullin/spack
|
var/spack/repos/builtin/packages/espresso/package.py
|
Python
|
lgpl-2.1
| 5,919
|
[
"ESPResSo"
] |
31173d3e3983a93ef1b63633bd5087568594b366722ceeee0ac6eee9ec4430a8
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: pycodegen.py
import imp
import os
import marshal
import struct
import sys
from cStringIO import StringIO
from compiler import ast, parse, walk, syntax
from compiler import pyassem, misc, future, symbols
from compiler.consts import SC_LOCAL, SC_GLOBAL_IMPLICIT, SC_GLOBAL_EXPLICT, SC_FREE, SC_CELL
from compiler.consts import CO_VARARGS, CO_VARKEYWORDS, CO_NEWLOCALS, CO_NESTED, CO_GENERATOR, CO_FUTURE_DIVISION, CO_FUTURE_ABSIMPORT, CO_FUTURE_WITH_STATEMENT, CO_FUTURE_PRINT_FUNCTION
from compiler.pyassem import TupleArg
try:
VERSION = sys.version_info[0]
except AttributeError:
VERSION = 1
callfunc_opcode_info = {(0, 0): 'CALL_FUNCTION',
(1, 0): 'CALL_FUNCTION_VAR',
(0, 1): 'CALL_FUNCTION_KW',
(1, 1): 'CALL_FUNCTION_VAR_KW'
}
LOOP = 1
EXCEPT = 2
TRY_FINALLY = 3
END_FINALLY = 4
def compileFile(filename, display=0):
f = open(filename, 'U')
buf = f.read()
f.close()
mod = Module(buf, filename)
try:
mod.compile(display)
except SyntaxError:
raise
else:
f = open(filename + 'c', 'wb')
mod.dump(f)
f.close()
def compile(source, filename, mode, flags=None, dont_inherit=None):
"""Replacement for builtin compile() function"""
if flags is not None or dont_inherit is not None:
raise RuntimeError, 'not implemented yet'
if mode == 'single':
gen = Interactive(source, filename)
elif mode == 'exec':
gen = Module(source, filename)
elif mode == 'eval':
gen = Expression(source, filename)
else:
raise ValueError("compile() 3rd arg must be 'exec' or 'eval' or 'single'")
gen.compile()
return gen.code
class AbstractCompileMode():
mode = None
def __init__(self, source, filename):
self.source = source
self.filename = filename
self.code = None
return
def _get_tree(self):
tree = parse(self.source, self.mode)
misc.set_filename(self.filename, tree)
syntax.check(tree)
return tree
def compile(self):
pass
def getCode(self):
return self.code
class Expression(AbstractCompileMode):
mode = 'eval'
def compile(self):
tree = self._get_tree()
gen = ExpressionCodeGenerator(tree)
self.code = gen.getCode()
class Interactive(AbstractCompileMode):
mode = 'single'
def compile(self):
tree = self._get_tree()
gen = InteractiveCodeGenerator(tree)
self.code = gen.getCode()
class Module(AbstractCompileMode):
mode = 'exec'
def compile(self, display=0):
tree = self._get_tree()
gen = ModuleCodeGenerator(tree)
if display:
import pprint
print pprint.pprint(tree)
self.code = gen.getCode()
def dump(self, f):
f.write(self.getPycHeader())
marshal.dump(self.code, f)
MAGIC = imp.get_magic()
def getPycHeader(self):
mtime = os.path.getmtime(self.filename)
mtime = struct.pack('<i', mtime)
return self.MAGIC + mtime
class LocalNameFinder():
"""Find local names in scope"""
def __init__(self, names=()):
self.names = misc.Set()
self.globals = misc.Set()
for name in names:
self.names.add(name)
def getLocals(self):
for elt in self.globals.elements():
if self.names.has_elt(elt):
self.names.remove(elt)
return self.names
def visitDict(self, node):
pass
def visitGlobal(self, node):
for name in node.names:
self.globals.add(name)
def visitFunction(self, node):
self.names.add(node.name)
def visitLambda(self, node):
pass
def visitImport(self, node):
for name, alias in node.names:
self.names.add(alias or name)
def visitFrom(self, node):
for name, alias in node.names:
self.names.add(alias or name)
def visitClass(self, node):
self.names.add(node.name)
def visitAssName(self, node):
self.names.add(node.name)
def is_constant_false(node):
if isinstance(node, ast.Const):
if not node.value:
return 1
return 0
class CodeGenerator():
"""Defines basic code generator for Python bytecode
This class is an abstract base class. Concrete subclasses must
define an __init__() that defines self.graph and then calls the
__init__() defined in this class.
The concrete class must also define the class attributes
NameFinder, FunctionGen, and ClassGen. These attributes can be
defined in the initClass() method, which is a hook for
initializing these methods after all the classes have been
defined.
"""
optimized = 0
__initialized = None
class_name = None
def __init__(self):
if self.__initialized is None:
self.initClass()
self.__class__.__initialized = 1
self.checkClass()
self.locals = misc.Stack()
self.setups = misc.Stack()
self.last_lineno = None
self._setupGraphDelegation()
self._div_op = 'BINARY_DIVIDE'
futures = self.get_module().futures
for feature in futures:
if feature == 'division':
self.graph.setFlag(CO_FUTURE_DIVISION)
self._div_op = 'BINARY_TRUE_DIVIDE'
elif feature == 'absolute_import':
self.graph.setFlag(CO_FUTURE_ABSIMPORT)
elif feature == 'with_statement':
self.graph.setFlag(CO_FUTURE_WITH_STATEMENT)
elif feature == 'print_function':
self.graph.setFlag(CO_FUTURE_PRINT_FUNCTION)
return
def initClass(self):
"""This method is called once for each class"""
pass
def checkClass(self):
"""Verify that class is constructed correctly"""
try:
pass
except AssertionError as msg:
intro = 'Bad class construction for %s' % self.__class__.__name__
raise AssertionError, intro
def _setupGraphDelegation(self):
self.emit = self.graph.emit
self.newBlock = self.graph.newBlock
self.startBlock = self.graph.startBlock
self.nextBlock = self.graph.nextBlock
self.setDocstring = self.graph.setDocstring
def getCode(self):
"""Return a code object"""
return self.graph.getCode()
def mangle(self, name):
if self.class_name is not None:
return misc.mangle(name, self.class_name)
else:
return name
return
def parseSymbols(self, tree):
s = symbols.SymbolVisitor()
walk(tree, s)
return s.scopes
def get_module(self):
raise RuntimeError, 'should be implemented by subclasses'
def isLocalName(self, name):
return self.locals.top().has_elt(name)
def storeName(self, name):
self._nameOp('STORE', name)
def loadName(self, name):
self._nameOp('LOAD', name)
def delName(self, name):
self._nameOp('DELETE', name)
def _nameOp(self, prefix, name):
name = self.mangle(name)
scope = self.scope.check_name(name)
if scope == SC_LOCAL:
if not self.optimized:
self.emit(prefix + '_NAME', name)
else:
self.emit(prefix + '_FAST', name)
elif scope == SC_GLOBAL_EXPLICT:
self.emit(prefix + '_GLOBAL', name)
elif scope == SC_GLOBAL_IMPLICIT:
if not self.optimized:
self.emit(prefix + '_NAME', name)
else:
self.emit(prefix + '_GLOBAL', name)
elif scope == SC_FREE or scope == SC_CELL:
self.emit(prefix + '_DEREF', name)
else:
raise RuntimeError, 'unsupported scope for var %s: %d' % (
name, scope)
def _implicitNameOp(self, prefix, name):
"""Emit name ops for names generated implicitly by for loops
The interpreter generates names that start with a period or
dollar sign. The symbol table ignores these names because
they aren't present in the program text.
"""
if self.optimized:
self.emit(prefix + '_FAST', name)
else:
self.emit(prefix + '_NAME', name)
def set_lineno(self, node, force=False):
"""Emit SET_LINENO if necessary.
The instruction is considered necessary if the node has a
lineno attribute and it is different than the last lineno
emitted.
Returns true if SET_LINENO was emitted.
There are no rules for when an AST node should have a lineno
attribute. The transformer and AST code need to be reviewed
and a consistent policy implemented and documented. Until
then, this method works around missing line numbers.
"""
lineno = getattr(node, 'lineno', None)
if lineno is not None and (lineno != self.last_lineno or force):
self.emit('SET_LINENO', lineno)
self.last_lineno = lineno
return True
else:
return False
NameFinder = LocalNameFinder
FunctionGen = None
ClassGen = None
def visitModule(self, node):
self.scopes = self.parseSymbols(node)
self.scope = self.scopes[node]
self.emit('SET_LINENO', 0)
if node.doc:
self.emit('LOAD_CONST', node.doc)
self.storeName('__doc__')
lnf = walk(node.node, self.NameFinder(), verbose=0)
self.locals.push(lnf.getLocals())
self.visit(node.node)
self.emit('LOAD_CONST', None)
self.emit('RETURN_VALUE')
return
def visitExpression(self, node):
self.set_lineno(node)
self.scopes = self.parseSymbols(node)
self.scope = self.scopes[node]
self.visit(node.node)
self.emit('RETURN_VALUE')
def visitFunction(self, node):
self._visitFuncOrLambda(node, isLambda=0)
if node.doc:
self.setDocstring(node.doc)
self.storeName(node.name)
def visitLambda(self, node):
self._visitFuncOrLambda(node, isLambda=1)
def _visitFuncOrLambda(self, node, isLambda=0):
if not isLambda and node.decorators:
for decorator in node.decorators.nodes:
self.visit(decorator)
ndecorators = len(node.decorators.nodes)
else:
ndecorators = 0
gen = self.FunctionGen(node, self.scopes, isLambda, self.class_name, self.get_module())
walk(node.code, gen)
gen.finish()
self.set_lineno(node)
for default in node.defaults:
self.visit(default)
self._makeClosure(gen, len(node.defaults))
for i in range(ndecorators):
self.emit('CALL_FUNCTION', 1)
def visitClass(self, node):
gen = self.ClassGen(node, self.scopes, self.get_module())
walk(node.code, gen)
gen.finish()
self.set_lineno(node)
self.emit('LOAD_CONST', node.name)
for base in node.bases:
self.visit(base)
self.emit('BUILD_TUPLE', len(node.bases))
self._makeClosure(gen, 0)
self.emit('CALL_FUNCTION', 0)
self.emit('BUILD_CLASS')
self.storeName(node.name)
def visitIf(self, node):
end = self.newBlock()
numtests = len(node.tests)
for i in range(numtests):
test, suite = node.tests[i]
if is_constant_false(test):
continue
self.set_lineno(test)
self.visit(test)
nextTest = self.newBlock()
self.emit('POP_JUMP_IF_FALSE', nextTest)
self.nextBlock()
self.visit(suite)
self.emit('JUMP_FORWARD', end)
self.startBlock(nextTest)
if node.else_:
self.visit(node.else_)
self.nextBlock(end)
def visitWhile(self, node):
self.set_lineno(node)
loop = self.newBlock()
else_ = self.newBlock()
after = self.newBlock()
self.emit('SETUP_LOOP', after)
self.nextBlock(loop)
self.setups.push((LOOP, loop))
self.set_lineno(node, force=True)
self.visit(node.test)
self.emit('POP_JUMP_IF_FALSE', else_ or after)
self.nextBlock()
self.visit(node.body)
self.emit('JUMP_ABSOLUTE', loop)
self.startBlock(else_)
self.emit('POP_BLOCK')
self.setups.pop()
if node.else_:
self.visit(node.else_)
self.nextBlock(after)
def visitFor(self, node):
start = self.newBlock()
anchor = self.newBlock()
after = self.newBlock()
self.setups.push((LOOP, start))
self.set_lineno(node)
self.emit('SETUP_LOOP', after)
self.visit(node.list)
self.emit('GET_ITER')
self.nextBlock(start)
self.set_lineno(node, force=1)
self.emit('FOR_ITER', anchor)
self.visit(node.assign)
self.visit(node.body)
self.emit('JUMP_ABSOLUTE', start)
self.nextBlock(anchor)
self.emit('POP_BLOCK')
self.setups.pop()
if node.else_:
self.visit(node.else_)
self.nextBlock(after)
def visitBreak(self, node):
if not self.setups:
raise SyntaxError, "'break' outside loop (%s, %d)" % (
node.filename, node.lineno)
self.set_lineno(node)
self.emit('BREAK_LOOP')
def visitContinue(self, node):
if not self.setups:
raise SyntaxError, "'continue' outside loop (%s, %d)" % (
node.filename, node.lineno)
kind, block = self.setups.top()
if kind == LOOP:
self.set_lineno(node)
self.emit('JUMP_ABSOLUTE', block)
self.nextBlock()
elif kind == EXCEPT or kind == TRY_FINALLY:
self.set_lineno(node)
top = len(self.setups)
while top > 0:
top = top - 1
kind, loop_block = self.setups[top]
if kind == LOOP:
break
if kind != LOOP:
raise SyntaxError, "'continue' outside loop (%s, %d)" % (
node.filename, node.lineno)
self.emit('CONTINUE_LOOP', loop_block)
self.nextBlock()
elif kind == END_FINALLY:
msg = "'continue' not allowed inside 'finally' clause (%s, %d)"
raise SyntaxError, msg % (node.filename, node.lineno)
def visitTest(self, node, jump):
end = self.newBlock()
for child in node.nodes[:-1]:
self.visit(child)
self.emit(jump, end)
self.nextBlock()
self.visit(node.nodes[-1])
self.nextBlock(end)
def visitAnd(self, node):
self.visitTest(node, 'JUMP_IF_FALSE_OR_POP')
def visitOr(self, node):
self.visitTest(node, 'JUMP_IF_TRUE_OR_POP')
def visitIfExp(self, node):
endblock = self.newBlock()
elseblock = self.newBlock()
self.visit(node.test)
self.emit('POP_JUMP_IF_FALSE', elseblock)
self.visit(node.then)
self.emit('JUMP_FORWARD', endblock)
self.nextBlock(elseblock)
self.visit(node.else_)
self.nextBlock(endblock)
def visitCompare(self, node):
self.visit(node.expr)
cleanup = self.newBlock()
for op, code in node.ops[:-1]:
self.visit(code)
self.emit('DUP_TOP')
self.emit('ROT_THREE')
self.emit('COMPARE_OP', op)
self.emit('JUMP_IF_FALSE_OR_POP', cleanup)
self.nextBlock()
if node.ops:
op, code = node.ops[-1]
self.visit(code)
self.emit('COMPARE_OP', op)
if len(node.ops) > 1:
end = self.newBlock()
self.emit('JUMP_FORWARD', end)
self.startBlock(cleanup)
self.emit('ROT_TWO')
self.emit('POP_TOP')
self.nextBlock(end)
def visitListComp(self, node):
self.set_lineno(node)
self.emit('BUILD_LIST', 0)
stack = []
for i, for_ in zip(range(len(node.quals)), node.quals):
start, anchor = self.visit(for_)
cont = None
for if_ in for_.ifs:
if cont is None:
cont = self.newBlock()
self.visit(if_, cont)
stack.insert(0, (start, cont, anchor))
self.visit(node.expr)
self.emit('LIST_APPEND', len(node.quals) + 1)
for start, cont, anchor in stack:
if cont:
self.nextBlock(cont)
self.emit('JUMP_ABSOLUTE', start)
self.startBlock(anchor)
return
def visitSetComp(self, node):
self.set_lineno(node)
self.emit('BUILD_SET', 0)
stack = []
for i, for_ in zip(range(len(node.quals)), node.quals):
start, anchor = self.visit(for_)
cont = None
for if_ in for_.ifs:
if cont is None:
cont = self.newBlock()
self.visit(if_, cont)
stack.insert(0, (start, cont, anchor))
self.visit(node.expr)
self.emit('SET_ADD', len(node.quals) + 1)
for start, cont, anchor in stack:
if cont:
self.nextBlock(cont)
self.emit('JUMP_ABSOLUTE', start)
self.startBlock(anchor)
return
def visitDictComp(self, node):
self.set_lineno(node)
self.emit('BUILD_MAP', 0)
stack = []
for i, for_ in zip(range(len(node.quals)), node.quals):
start, anchor = self.visit(for_)
cont = None
for if_ in for_.ifs:
if cont is None:
cont = self.newBlock()
self.visit(if_, cont)
stack.insert(0, (start, cont, anchor))
self.visit(node.value)
self.visit(node.key)
self.emit('MAP_ADD', len(node.quals) + 1)
for start, cont, anchor in stack:
if cont:
self.nextBlock(cont)
self.emit('JUMP_ABSOLUTE', start)
self.startBlock(anchor)
return
def visitListCompFor(self, node):
start = self.newBlock()
anchor = self.newBlock()
self.visit(node.list)
self.emit('GET_ITER')
self.nextBlock(start)
self.set_lineno(node, force=True)
self.emit('FOR_ITER', anchor)
self.nextBlock()
self.visit(node.assign)
return (
start, anchor)
def visitListCompIf(self, node, branch):
self.set_lineno(node, force=True)
self.visit(node.test)
self.emit('POP_JUMP_IF_FALSE', branch)
self.newBlock()
def _makeClosure(self, gen, args):
frees = gen.scope.get_free_vars()
if frees:
for name in frees:
self.emit('LOAD_CLOSURE', name)
self.emit('BUILD_TUPLE', len(frees))
self.emit('LOAD_CONST', gen)
self.emit('MAKE_CLOSURE', args)
else:
self.emit('LOAD_CONST', gen)
self.emit('MAKE_FUNCTION', args)
def visitGenExpr(self, node):
gen = GenExprCodeGenerator(node, self.scopes, self.class_name, self.get_module())
walk(node.code, gen)
gen.finish()
self.set_lineno(node)
self._makeClosure(gen, 0)
self.visit(node.code.quals[0].iter)
self.emit('GET_ITER')
self.emit('CALL_FUNCTION', 1)
def visitGenExprInner(self, node):
self.set_lineno(node)
stack = []
for i, for_ in zip(range(len(node.quals)), node.quals):
start, anchor, end = self.visit(for_)
cont = None
for if_ in for_.ifs:
if cont is None:
cont = self.newBlock()
self.visit(if_, cont)
stack.insert(0, (start, cont, anchor, end))
self.visit(node.expr)
self.emit('YIELD_VALUE')
self.emit('POP_TOP')
for start, cont, anchor, end in stack:
if cont:
self.nextBlock(cont)
self.emit('JUMP_ABSOLUTE', start)
self.startBlock(anchor)
self.emit('POP_BLOCK')
self.setups.pop()
self.nextBlock(end)
self.emit('LOAD_CONST', None)
return
def visitGenExprFor(self, node):
start = self.newBlock()
anchor = self.newBlock()
end = self.newBlock()
self.setups.push((LOOP, start))
self.emit('SETUP_LOOP', end)
if node.is_outmost:
self.loadName('.0')
else:
self.visit(node.iter)
self.emit('GET_ITER')
self.nextBlock(start)
self.set_lineno(node, force=True)
self.emit('FOR_ITER', anchor)
self.nextBlock()
self.visit(node.assign)
return (
start, anchor, end)
def visitGenExprIf(self, node, branch):
self.set_lineno(node, force=True)
self.visit(node.test)
self.emit('POP_JUMP_IF_FALSE', branch)
self.newBlock()
def visitAssert(self, node):
pass
def visitRaise(self, node):
self.set_lineno(node)
n = 0
if node.expr1:
self.visit(node.expr1)
n = n + 1
if node.expr2:
self.visit(node.expr2)
n = n + 1
if node.expr3:
self.visit(node.expr3)
n = n + 1
self.emit('RAISE_VARARGS', n)
def visitTryExcept(self, node):
body = self.newBlock()
handlers = self.newBlock()
end = self.newBlock()
if node.else_:
lElse = self.newBlock()
else:
lElse = end
self.set_lineno(node)
self.emit('SETUP_EXCEPT', handlers)
self.nextBlock(body)
self.setups.push((EXCEPT, body))
self.visit(node.body)
self.emit('POP_BLOCK')
self.setups.pop()
self.emit('JUMP_FORWARD', lElse)
self.startBlock(handlers)
last = len(node.handlers) - 1
for i in range(len(node.handlers)):
expr, target, body = node.handlers[i]
self.set_lineno(expr)
if expr:
self.emit('DUP_TOP')
self.visit(expr)
self.emit('COMPARE_OP', 'exception match')
next = self.newBlock()
self.emit('POP_JUMP_IF_FALSE', next)
self.nextBlock()
self.emit('POP_TOP')
if target:
self.visit(target)
else:
self.emit('POP_TOP')
self.emit('POP_TOP')
self.visit(body)
self.emit('JUMP_FORWARD', end)
if expr:
self.nextBlock(next)
else:
self.nextBlock()
self.emit('END_FINALLY')
if node.else_:
self.nextBlock(lElse)
self.visit(node.else_)
self.nextBlock(end)
def visitTryFinally(self, node):
body = self.newBlock()
final = self.newBlock()
self.set_lineno(node)
self.emit('SETUP_FINALLY', final)
self.nextBlock(body)
self.setups.push((TRY_FINALLY, body))
self.visit(node.body)
self.emit('POP_BLOCK')
self.setups.pop()
self.emit('LOAD_CONST', None)
self.nextBlock(final)
self.setups.push((END_FINALLY, final))
self.visit(node.final)
self.emit('END_FINALLY')
self.setups.pop()
return
__with_count = 0
def visitWith(self, node):
body = self.newBlock()
final = self.newBlock()
self.__with_count += 1
valuevar = '_[%d]' % self.__with_count
self.set_lineno(node)
self.visit(node.expr)
self.emit('DUP_TOP')
self.emit('LOAD_ATTR', '__exit__')
self.emit('ROT_TWO')
self.emit('LOAD_ATTR', '__enter__')
self.emit('CALL_FUNCTION', 0)
if node.vars is None:
self.emit('POP_TOP')
else:
self._implicitNameOp('STORE', valuevar)
self.emit('SETUP_FINALLY', final)
self.nextBlock(body)
self.setups.push((TRY_FINALLY, body))
if node.vars is not None:
self._implicitNameOp('LOAD', valuevar)
self._implicitNameOp('DELETE', valuevar)
self.visit(node.vars)
self.visit(node.body)
self.emit('POP_BLOCK')
self.setups.pop()
self.emit('LOAD_CONST', None)
self.nextBlock(final)
self.setups.push((END_FINALLY, final))
self.emit('WITH_CLEANUP')
self.emit('END_FINALLY')
self.setups.pop()
self.__with_count -= 1
return
def visitDiscard(self, node):
self.set_lineno(node)
self.visit(node.expr)
self.emit('POP_TOP')
def visitConst(self, node):
self.emit('LOAD_CONST', node.value)
def visitKeyword(self, node):
self.emit('LOAD_CONST', node.name)
self.visit(node.expr)
def visitGlobal(self, node):
pass
def visitName(self, node):
self.set_lineno(node)
self.loadName(node.name)
def visitPass(self, node):
self.set_lineno(node)
def visitImport(self, node):
self.set_lineno(node)
level = 0 if self.graph.checkFlag(CO_FUTURE_ABSIMPORT) else -1
for name, alias in node.names:
if VERSION > 1:
self.emit('LOAD_CONST', level)
self.emit('LOAD_CONST', None)
self.emit('IMPORT_NAME', name)
mod = name.split('.')[0]
if alias:
self._resolveDots(name)
self.storeName(alias)
else:
self.storeName(mod)
return
def visitFrom(self, node):
self.set_lineno(node)
level = node.level
if level == 0 and not self.graph.checkFlag(CO_FUTURE_ABSIMPORT):
level = -1
fromlist = tuple((name for name, alias in node.names))
if VERSION > 1:
self.emit('LOAD_CONST', level)
self.emit('LOAD_CONST', fromlist)
self.emit('IMPORT_NAME', node.modname)
for name, alias in node.names:
if VERSION > 1:
if name == '*':
self.namespace = 0
self.emit('IMPORT_STAR')
return
self.emit('IMPORT_FROM', name)
self._resolveDots(name)
self.storeName(alias or name)
else:
self.emit('IMPORT_FROM', name)
self.emit('POP_TOP')
def _resolveDots(self, name):
elts = name.split('.')
if len(elts) == 1:
return
for elt in elts[1:]:
self.emit('LOAD_ATTR', elt)
def visitGetattr(self, node):
self.visit(node.expr)
self.emit('LOAD_ATTR', self.mangle(node.attrname))
def visitAssign(self, node):
self.set_lineno(node)
self.visit(node.expr)
dups = len(node.nodes) - 1
for i in range(len(node.nodes)):
elt = node.nodes[i]
if i < dups:
self.emit('DUP_TOP')
if isinstance(elt, ast.Node):
self.visit(elt)
def visitAssName(self, node):
if node.flags == 'OP_ASSIGN':
self.storeName(node.name)
elif node.flags == 'OP_DELETE':
self.set_lineno(node)
self.delName(node.name)
else:
print 'oops', node.flags
def visitAssAttr(self, node):
self.visit(node.expr)
if node.flags == 'OP_ASSIGN':
self.emit('STORE_ATTR', self.mangle(node.attrname))
elif node.flags == 'OP_DELETE':
self.emit('DELETE_ATTR', self.mangle(node.attrname))
else:
print 'warning: unexpected flags:', node.flags
print node
def _visitAssSequence(self, node, op='UNPACK_SEQUENCE'):
if findOp(node) != 'OP_DELETE':
self.emit(op, len(node.nodes))
for child in node.nodes:
self.visit(child)
if VERSION > 1:
visitAssTuple = _visitAssSequence
visitAssList = _visitAssSequence
else:
def visitAssTuple(self, node):
self._visitAssSequence(node, 'UNPACK_TUPLE')
def visitAssList(self, node):
self._visitAssSequence(node, 'UNPACK_LIST')
def visitAugAssign(self, node):
self.set_lineno(node)
aug_node = wrap_aug(node.node)
self.visit(aug_node, 'load')
self.visit(node.expr)
self.emit(self._augmented_opcode[node.op])
self.visit(aug_node, 'store')
_augmented_opcode = {'+=': 'INPLACE_ADD',
'-=': 'INPLACE_SUBTRACT',
'*=': 'INPLACE_MULTIPLY',
'/=': 'INPLACE_DIVIDE',
'//=': 'INPLACE_FLOOR_DIVIDE',
'%=': 'INPLACE_MODULO',
'**=': 'INPLACE_POWER',
'>>=': 'INPLACE_RSHIFT',
'<<=': 'INPLACE_LSHIFT',
'&=': 'INPLACE_AND',
'^=': 'INPLACE_XOR',
'|=': 'INPLACE_OR'
}
def visitAugName(self, node, mode):
if mode == 'load':
self.loadName(node.name)
elif mode == 'store':
self.storeName(node.name)
def visitAugGetattr(self, node, mode):
if mode == 'load':
self.visit(node.expr)
self.emit('DUP_TOP')
self.emit('LOAD_ATTR', self.mangle(node.attrname))
elif mode == 'store':
self.emit('ROT_TWO')
self.emit('STORE_ATTR', self.mangle(node.attrname))
def visitAugSlice(self, node, mode):
if mode == 'load':
self.visitSlice(node, 1)
elif mode == 'store':
slice = 0
if node.lower:
slice = slice | 1
if node.upper:
slice = slice | 2
if slice == 0:
self.emit('ROT_TWO')
elif slice == 3:
self.emit('ROT_FOUR')
else:
self.emit('ROT_THREE')
self.emit('STORE_SLICE+%d' % slice)
def visitAugSubscript(self, node, mode):
if mode == 'load':
self.visitSubscript(node, 1)
elif mode == 'store':
self.emit('ROT_THREE')
self.emit('STORE_SUBSCR')
def visitExec(self, node):
self.visit(node.expr)
if node.locals is None:
self.emit('LOAD_CONST', None)
else:
self.visit(node.locals)
if node.globals is None:
self.emit('DUP_TOP')
else:
self.visit(node.globals)
self.emit('EXEC_STMT')
return
def visitCallFunc(self, node):
pos = 0
kw = 0
self.set_lineno(node)
self.visit(node.node)
for arg in node.args:
self.visit(arg)
if isinstance(arg, ast.Keyword):
kw = kw + 1
else:
pos = pos + 1
if node.star_args is not None:
self.visit(node.star_args)
if node.dstar_args is not None:
self.visit(node.dstar_args)
have_star = node.star_args is not None
have_dstar = node.dstar_args is not None
opcode = callfunc_opcode_info[have_star, have_dstar]
self.emit(opcode, kw << 8 | pos)
return
def visitPrint(self, node, newline=0):
self.set_lineno(node)
if node.dest:
self.visit(node.dest)
for child in node.nodes:
if node.dest:
self.emit('DUP_TOP')
self.visit(child)
if node.dest:
self.emit('ROT_TWO')
self.emit('PRINT_ITEM_TO')
else:
self.emit('PRINT_ITEM')
if node.dest and not newline:
self.emit('POP_TOP')
def visitPrintnl(self, node):
self.visitPrint(node, newline=1)
if node.dest:
self.emit('PRINT_NEWLINE_TO')
else:
self.emit('PRINT_NEWLINE')
def visitReturn(self, node):
self.set_lineno(node)
self.visit(node.value)
self.emit('RETURN_VALUE')
def visitYield(self, node):
self.set_lineno(node)
self.visit(node.value)
self.emit('YIELD_VALUE')
def visitSlice(self, node, aug_flag=None):
self.visit(node.expr)
slice = 0
if node.lower:
self.visit(node.lower)
slice = slice | 1
if node.upper:
self.visit(node.upper)
slice = slice | 2
if aug_flag:
if slice == 0:
self.emit('DUP_TOP')
elif slice == 3:
self.emit('DUP_TOPX', 3)
else:
self.emit('DUP_TOPX', 2)
if node.flags == 'OP_APPLY':
self.emit('SLICE+%d' % slice)
elif node.flags == 'OP_ASSIGN':
self.emit('STORE_SLICE+%d' % slice)
elif node.flags == 'OP_DELETE':
self.emit('DELETE_SLICE+%d' % slice)
else:
print 'weird slice', node.flags
raise
def visitSubscript(self, node, aug_flag=None):
self.visit(node.expr)
for sub in node.subs:
self.visit(sub)
if len(node.subs) > 1:
self.emit('BUILD_TUPLE', len(node.subs))
if aug_flag:
self.emit('DUP_TOPX', 2)
if node.flags == 'OP_APPLY':
self.emit('BINARY_SUBSCR')
elif node.flags == 'OP_ASSIGN':
self.emit('STORE_SUBSCR')
elif node.flags == 'OP_DELETE':
self.emit('DELETE_SUBSCR')
def binaryOp(self, node, op):
self.visit(node.left)
self.visit(node.right)
self.emit(op)
def visitAdd(self, node):
return self.binaryOp(node, 'BINARY_ADD')
def visitSub(self, node):
return self.binaryOp(node, 'BINARY_SUBTRACT')
def visitMul(self, node):
return self.binaryOp(node, 'BINARY_MULTIPLY')
def visitDiv(self, node):
return self.binaryOp(node, self._div_op)
def visitFloorDiv(self, node):
return self.binaryOp(node, 'BINARY_FLOOR_DIVIDE')
def visitMod(self, node):
return self.binaryOp(node, 'BINARY_MODULO')
def visitPower(self, node):
return self.binaryOp(node, 'BINARY_POWER')
def visitLeftShift(self, node):
return self.binaryOp(node, 'BINARY_LSHIFT')
def visitRightShift(self, node):
return self.binaryOp(node, 'BINARY_RSHIFT')
def unaryOp(self, node, op):
self.visit(node.expr)
self.emit(op)
def visitInvert(self, node):
return self.unaryOp(node, 'UNARY_INVERT')
def visitUnarySub(self, node):
return self.unaryOp(node, 'UNARY_NEGATIVE')
def visitUnaryAdd(self, node):
return self.unaryOp(node, 'UNARY_POSITIVE')
def visitUnaryInvert(self, node):
return self.unaryOp(node, 'UNARY_INVERT')
def visitNot(self, node):
return self.unaryOp(node, 'UNARY_NOT')
def visitBackquote(self, node):
return self.unaryOp(node, 'UNARY_CONVERT')
def bitOp(self, nodes, op):
self.visit(nodes[0])
for node in nodes[1:]:
self.visit(node)
self.emit(op)
def visitBitand(self, node):
return self.bitOp(node.nodes, 'BINARY_AND')
def visitBitor(self, node):
return self.bitOp(node.nodes, 'BINARY_OR')
def visitBitxor(self, node):
return self.bitOp(node.nodes, 'BINARY_XOR')
def visitEllipsis(self, node):
self.emit('LOAD_CONST', Ellipsis)
def visitTuple(self, node):
self.set_lineno(node)
for elt in node.nodes:
self.visit(elt)
self.emit('BUILD_TUPLE', len(node.nodes))
def visitList(self, node):
self.set_lineno(node)
for elt in node.nodes:
self.visit(elt)
self.emit('BUILD_LIST', len(node.nodes))
def visitSet(self, node):
self.set_lineno(node)
for elt in node.nodes:
self.visit(elt)
self.emit('BUILD_SET', len(node.nodes))
def visitSliceobj(self, node):
for child in node.nodes:
self.visit(child)
self.emit('BUILD_SLICE', len(node.nodes))
def visitDict(self, node):
self.set_lineno(node)
self.emit('BUILD_MAP', 0)
for k, v in node.items:
self.emit('DUP_TOP')
self.visit(k)
self.visit(v)
self.emit('ROT_THREE')
self.emit('STORE_SUBSCR')
class NestedScopeMixin():
"""Defines initClass() for nested scoping (Python 2.2-compatible)"""
def initClass(self):
self.__class__.NameFinder = LocalNameFinder
self.__class__.FunctionGen = FunctionCodeGenerator
self.__class__.ClassGen = ClassCodeGenerator
class ModuleCodeGenerator(NestedScopeMixin, CodeGenerator):
__super_init = CodeGenerator.__init__
scopes = None
def __init__(self, tree):
self.graph = pyassem.PyFlowGraph('<module>', tree.filename)
self.futures = future.find_futures(tree)
self.__super_init()
walk(tree, self)
def get_module(self):
return self
class ExpressionCodeGenerator(NestedScopeMixin, CodeGenerator):
__super_init = CodeGenerator.__init__
scopes = None
futures = ()
def __init__(self, tree):
self.graph = pyassem.PyFlowGraph('<expression>', tree.filename)
self.__super_init()
walk(tree, self)
def get_module(self):
return self
class InteractiveCodeGenerator(NestedScopeMixin, CodeGenerator):
__super_init = CodeGenerator.__init__
scopes = None
futures = ()
def __init__(self, tree):
self.graph = pyassem.PyFlowGraph('<interactive>', tree.filename)
self.__super_init()
self.set_lineno(tree)
walk(tree, self)
self.emit('RETURN_VALUE')
def get_module(self):
return self
def visitDiscard(self, node):
self.visit(node.expr)
self.emit('PRINT_EXPR')
class AbstractFunctionCode():
optimized = 1
lambdaCount = 0
def __init__(self, func, scopes, isLambda, class_name, mod):
self.class_name = class_name
self.module = mod
if isLambda:
klass = FunctionCodeGenerator
name = '<lambda.%d>' % klass.lambdaCount
klass.lambdaCount = klass.lambdaCount + 1
else:
name = func.name
args, hasTupleArg = generateArgList(func.argnames)
self.graph = pyassem.PyFlowGraph(name, func.filename, args, optimized=1)
self.isLambda = isLambda
self.super_init()
if not isLambda and func.doc:
self.setDocstring(func.doc)
lnf = walk(func.code, self.NameFinder(args), verbose=0)
self.locals.push(lnf.getLocals())
if func.varargs:
self.graph.setFlag(CO_VARARGS)
if func.kwargs:
self.graph.setFlag(CO_VARKEYWORDS)
self.set_lineno(func)
if hasTupleArg:
self.generateArgUnpack(func.argnames)
def get_module(self):
return self.module
def finish(self):
self.graph.startExitBlock()
if not self.isLambda:
self.emit('LOAD_CONST', None)
self.emit('RETURN_VALUE')
return
def generateArgUnpack(self, args):
for i in range(len(args)):
arg = args[i]
if isinstance(arg, tuple):
self.emit('LOAD_FAST', '.%d' % (i * 2))
self.unpackSequence(arg)
def unpackSequence(self, tup):
if VERSION > 1:
self.emit('UNPACK_SEQUENCE', len(tup))
else:
self.emit('UNPACK_TUPLE', len(tup))
for elt in tup:
if isinstance(elt, tuple):
self.unpackSequence(elt)
else:
self._nameOp('STORE', elt)
unpackTuple = unpackSequence
class FunctionCodeGenerator(NestedScopeMixin, AbstractFunctionCode, CodeGenerator):
super_init = CodeGenerator.__init__
scopes = None
__super_init = AbstractFunctionCode.__init__
def __init__(self, func, scopes, isLambda, class_name, mod):
self.scopes = scopes
self.scope = scopes[func]
self.__super_init(func, scopes, isLambda, class_name, mod)
self.graph.setFreeVars(self.scope.get_free_vars())
self.graph.setCellVars(self.scope.get_cell_vars())
if self.scope.generator is not None:
self.graph.setFlag(CO_GENERATOR)
return
class GenExprCodeGenerator(NestedScopeMixin, AbstractFunctionCode, CodeGenerator):
super_init = CodeGenerator.__init__
scopes = None
__super_init = AbstractFunctionCode.__init__
def __init__(self, gexp, scopes, class_name, mod):
self.scopes = scopes
self.scope = scopes[gexp]
self.__super_init(gexp, scopes, 1, class_name, mod)
self.graph.setFreeVars(self.scope.get_free_vars())
self.graph.setCellVars(self.scope.get_cell_vars())
self.graph.setFlag(CO_GENERATOR)
class AbstractClassCode():
def __init__(self, klass, scopes, module):
self.class_name = klass.name
self.module = module
self.graph = pyassem.PyFlowGraph(klass.name, klass.filename, optimized=0, klass=1)
self.super_init()
lnf = walk(klass.code, self.NameFinder(), verbose=0)
self.locals.push(lnf.getLocals())
self.graph.setFlag(CO_NEWLOCALS)
if klass.doc:
self.setDocstring(klass.doc)
def get_module(self):
return self.module
def finish(self):
self.graph.startExitBlock()
self.emit('LOAD_LOCALS')
self.emit('RETURN_VALUE')
class ClassCodeGenerator(NestedScopeMixin, AbstractClassCode, CodeGenerator):
super_init = CodeGenerator.__init__
scopes = None
__super_init = AbstractClassCode.__init__
def __init__(self, klass, scopes, module):
self.scopes = scopes
self.scope = scopes[klass]
self.__super_init(klass, scopes, module)
self.graph.setFreeVars(self.scope.get_free_vars())
self.graph.setCellVars(self.scope.get_cell_vars())
self.set_lineno(klass)
self.emit('LOAD_GLOBAL', '__name__')
self.storeName('__module__')
if klass.doc:
self.emit('LOAD_CONST', klass.doc)
self.storeName('__doc__')
def generateArgList(arglist):
"""Generate an arg list marking TupleArgs"""
args = []
extra = []
count = 0
for i in range(len(arglist)):
elt = arglist[i]
if isinstance(elt, str):
args.append(elt)
elif isinstance(elt, tuple):
args.append(TupleArg(i * 2, elt))
extra.extend(misc.flatten(elt))
count = count + 1
else:
raise ValueError, 'unexpect argument type:', elt
return (
args + extra, count)
def findOp(node):
"""Find the op (DELETE, LOAD, STORE) in an AssTuple tree"""
v = OpFinder()
walk(node, v, verbose=0)
return v.op
class OpFinder():
def __init__(self):
self.op = None
return
def visitAssName(self, node):
if self.op is None:
self.op = node.flags
elif self.op != node.flags:
raise ValueError, 'mixed ops in stmt'
return
visitAssAttr = visitAssName
visitSubscript = visitAssName
class Delegator():
"""Base class to support delegation for augmented assignment nodes
To generator code for augmented assignments, we use the following
wrapper classes. In visitAugAssign, the left-hand expression node
is visited twice. The first time the visit uses the normal method
for that node . The second time the visit uses a different method
that generates the appropriate code to perform the assignment.
These delegator classes wrap the original AST nodes in order to
support the variant visit methods.
"""
def __init__(self, obj):
self.obj = obj
def __getattr__(self, attr):
return getattr(self.obj, attr)
class AugGetattr(Delegator):
pass
class AugName(Delegator):
pass
class AugSlice(Delegator):
pass
class AugSubscript(Delegator):
pass
wrapper = {ast.Getattr: AugGetattr,
ast.Name: AugName,
ast.Slice: AugSlice,
ast.Subscript: AugSubscript
}
def wrap_aug(node):
return wrapper[node.__class__](node)
if __name__ == '__main__':
for file in sys.argv[1:]:
compileFile(file)
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/compiler/pycodegen.py
|
Python
|
unlicense
| 44,909
|
[
"VisIt"
] |
6622da9584f02142d739b00281cb52488f44033bf441d14c1d7fe0e44a8a04e7
|
# Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
#
# Authors:
# Brian Paul
"""
Tile layout functions
This is a collection of functions for computing tile layouts for
tilesort configs, specifically image reassembly configs.
There are lots of ways this can be done - only a few are presented here.
These functions are used by the graphical config tool and you may use
them in your own tilesort configuration file.
The basic task is to divide a mural into tiles while assigning each
tile to a "server". Ideally, one wants the mural's tiles to be
uniformly distributed among the servers so that we get good load
balancing.
The user might request a particular number of rows and columns of
tiles. Then we'll have to compute the tile sizes to cover the mural.
Or, the user might request a particular tile size. Then we'll have to
compute how many rows and columns of tiles are needed to cover the mural.
These functions all take the same parameters:
muralWidth, muralHeight = the mural size, in pixels
numServers = number of servers available
tileWidth, tileHeight = the desired tile size (may be zero)
tileRows, tileCols = desired number of rows and cols of tiles (may be zero)
Note: only the tileWidth/Height _OR_ tileRows/Cols values can be zero,
not both.
The functions return a list of tuples of the form
(server, x, y, width, height) which describes the position and size of
each tile as well as its assigned server.
"""
def __CheckArgs(muralWidth, muralHeight, numServers, tileWidth, tileHeight, tileRows, tileCols):
"""Helper function for layout functions."""
# error checking
assert muralWidth > 0
assert muralHeight > 0
assert numServers > 0
# XXX Note Python integer division below (use // someday?)
if tileRows > 0 and tileCols > 0:
# we're told how many rows and columns to use
assert tileWidth >= 0
assert tileHeight >= 0
# if tile size is zero, compute it now
if tileWidth == 0:
tileWidth = muralWidth / tileCols
if tileHeight == 0:
tileHeight = muralHeight / tileRows
fixedTileSize = 0
else:
# we're told the tile size, compute rows and columns
assert tileWidth > 0
assert tileHeight > 0
tileCols = (muralWidth + tileWidth - 1) / tileWidth
tileRows = (muralHeight + tileHeight - 1) / tileHeight
fixedTileSize = 1
return (tileWidth, tileHeight, tileRows, tileCols, fixedTileSize)
def LayoutRaster(muralWidth, muralHeight, numServers, tileWidth, tileHeight, tileRows, tileCols):
"""Layout tiles in a simple top-to-bottom, left-to-right raster order."""
# check args, and compute missing values
(tileWidth, tileHeight, tileRows, tileCols, fixedTileSize) = __CheckArgs(muralWidth, muralHeight, numServers, tileWidth, tileHeight, tileRows, tileCols)
# layout tiles now
tiles = []
for i in range(tileRows):
for j in range(tileCols):
server = (i * tileCols + j) % numServers
x = j * tileWidth
y = i * tileHeight
if i < tileRows - 1 or fixedTileSize:
h = tileHeight
else:
h = muralHeight - (tileRows - 1) * tileHeight
if j < tileCols - 1 or fixedTileSize:
w = tileWidth
else:
w = muralWidth - (tileCols - 1) * tileWidth
tiles.append( (server, x, y, w, h) )
return tiles
def LayoutZigZag(muralWidth, muralHeight, numServers, tileWidth, tileHeight, tileRows, tileCols):
"""Layout tiles in a simple top-to-bottom order, alternating between
left-to-right and right-to-left with each row."""
# check args, and compute missing values
(tileWidth, tileHeight, tileRows, tileCols, fixedTileSize) = __CheckArgs(muralWidth, muralHeight, numServers, tileWidth, tileHeight, tileRows, tileCols)
# layout tiles now
tiles = []
for i in range(tileRows):
for j in range(tileCols):
if i % 2 == 1:
# odd row
jj = tileCols - j - 1
server = (i * tileCols + jj) % numServers
else:
# even row
server = (i * tileCols + j) % numServers
x = j * tileWidth
y = i * tileHeight
if i < tileRows - 1 or fixedTileSize:
h = tileHeight
else:
h = muralHeight - (tileRows - 1) * tileHeight
if j < tileCols - 1 or fixedTileSize:
w = tileWidth
else:
w = muralWidth - (tileCols - 1) * tileWidth
tiles.append( (server, x, y, w, h) )
return tiles
def LayoutSpiral(muralWidth, muralHeight, numServers, tileWidth, tileHeight, tileRows, tileCols):
"""Layout tiles starting in the center of the mural and winding outward
in a spiral."""
# check args, and compute missing values
(tileWidth, tileHeight, tileRows, tileCols, fixedTileSize) = __CheckArgs(muralWidth, muralHeight, numServers, tileWidth, tileHeight, tileRows, tileCols)
curRow = (tileRows - 1) / 2
curCol = (tileCols - 1) / 2
radius = 0
march = 0
colStep = 0
rowStep = -1
serv = 0
tiles = []
while 1:
assert ((rowStep == 0 and colStep != 0) or
(rowStep != 0 and colStep == 0))
if (curRow >= 0 and curRow < tileRows and
curCol >= 0 and curCol < tileCols):
server = serv % numServers
# compute tile position and size
x = curCol * tileWidth
y = curRow * tileHeight
if curCol < tileCols - 1 or fixedTileSize:
w = tileWidth
else:
w = muralWidth - (tileCols - 1) * tileWidth
if curRow < tileRows - 1 or fixedTileSize:
h = tileHeight
else:
h = muralHeight - (tileRows - 1) * tileHeight
# save this tile
tiles.append( (server, x, y, w, h) )
# check if we're done
if (len(tiles) >= tileRows * tileCols):
# all done
break
serv += 1
# advance to next space
march += 1
if march < radius:
# step in current direction
curRow += rowStep
curCol += colStep
pass
else:
# change direction
if colStep == 1 and rowStep == 0:
# transition right -> down
colStep = 0
rowStep = 1
elif colStep == 0 and rowStep == 1:
# transition down -> left
colStep = -1
rowStep = 0
radius += 1
elif colStep == -1 and rowStep == 0:
# transition left -> up
colStep = 0
rowStep = -1
else:
# transition up -> right
assert colStep == 0
assert rowStep == -1
colStep = 1
rowStep = 0
radius += 1
#endif
march = 0
curRow += rowStep
curCol += colStep
#endif
#endwhile
return tiles
|
alown/chromium
|
mothership/server/tilelayout.py
|
Python
|
bsd-3-clause
| 6,243
|
[
"Brian"
] |
a5362247dc7f37d42e5c968bf017061d05b4260bbd2ada77389efa483419f932
|
#!/usr/bin/env python
# Copyright (c) 2014 UCLA
#
# This software is distributable under the terms of the GNU General
# Public License (GPL) v2, the text of which can be found at
# http://www.gnu.org/copyleft/gpl.html. Installing, importing or
# otherwise using this module constitutes acceptance of the terms of
# this License.
#
# Disclaimer
#
# This software is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Comments and/or additions are welcome (send e-mail to:
# hdchen@ucla.edu).
import os, sys
import pysam
from optparse import OptionParser
from utils import *
def main():
parser = OptionParser()
parser.add_option("-i", dest="inbam", type='string', help="the folder of input bam files")
(options, args) = parser.parse_args()
if not options.inbam:
options.inbam = "."
open_log(os.path.join(options.inbam, 'MyBamPostProcess.py_log'))
logm('Program starts!')
file_list_t = [os.path.join(options.inbam, x) for x in os.listdir(options.inbam) if x.endswith(".bam")]
file_list = []
# Check log file to see if alignment is successful.
for f in file_list_t:
try:
log_file = open(".".join(f.split(".")[:-1])+".bs_seeker2_log")
for line in log_file:
pass
# Go to the last line
if "END" in line:
file_list.append(f)
logm("File %s is included."%f)
else:
logm("File %s is excluded."%f)
except:
logm("File %s has no alignment log file."%f)
if len(file_list) == 0:
print >> sys.stderr, 'ERROR: no bam files available for post process.'
exit(1)
sorted_list = []
# Sort
for inputsam in file_list:
sortedsam = inputsam + "_sorted"
pysam.sort(inputsam, sortedsam)
sorted_list.append(sortedsam+".bam")
logm('Individual bam file sorting finished.')
# Merge
mergedsam = file_list[0].split(".")
mergedsam[0] = mergedsam[0].split("_")
mergedsam[0][-1] = "merged"
mergedsam[0] = "_".join(mergedsam[0])
mergedsam = ".".join(mergedsam)
merge_params = [mergedsam] + sorted_list
pysam.merge(*merge_params)
logm('Merging finished.')
# Remove sortedsams
for f in sorted_list:
os.remove(f)
close_log()
if __name__ == '__main__':
main()
|
haodongchen/BSseeker2_FOO
|
MyBamPostProcess.py
|
Python
|
gpl-2.0
| 2,517
|
[
"pysam"
] |
14fc0a927321cd23d7146ca7d2168d2eb8d4d45e2a7f8b0647d653fed5a56c84
|
# NOTE: parts of this file were taken from scipy's doc/source/conf.py. See
# scikit-bio/licenses/scipy.txt for scipy's license.
import glob
import sys
import os
# Check that dependencies are installed and the correct version if necessary
sphinx_version = '1.2.2'
import sphinx
if sphinx.__version__ != sphinx_version:
raise RuntimeError("Sphinx %s required" % sphinx_version)
import sphinx_bootstrap_theme
# We currently rely on the latest version of numpydoc available on GitHub:
# git+git://github.com/numpy/numpydoc.git
#
# There isn't a way to specify this in setup.py as a dependency since this
# feature is being removed from pip. We also can't check the version of
# numpydoc installed because there isn't a numpydoc.__version__ defined.
try:
import numpydoc
except ImportError:
raise RuntimeError(
"numpydoc v0.6 or later required. Install it with:\n"
" pip install git+git://github.com/numpy/numpydoc.git")
import skbio
from skbio.util import classproperty
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here:
#
# sys.path.insert(0, os.path.abspath('../sphinxext/foo'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# Using `sphinx_version` doesn't work, likely because Sphinx is expecting a
# version string of the form X.Y, not X.Y.Z.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'numpydoc',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx'
]
# Determine if the matplotlib has a recent enough version of the
# plot_directive.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
raise RuntimeError("You need a recent enough version of matplotlib")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'scikit-bio'
copyright = u'2014--, scikit-bio development team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = skbio.__version__
# The full version, including alpha/beta/rc tags.
release = skbio.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# Exclude this file since it is only used by autosummary to generate other RST
# files during the build process, and it will generate sphinx errors and
# warnings otherwise.
exclude_patterns = ['_templates/autosummary/*.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': 'scikit-bio docs',
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': 'united',
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': False
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static/']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-biodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'scikit-bio.tex', u'scikit-bio Documentation',
u'scikit-bio development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'scikit-bio', u'scikit-bio Documentation',
[u'scikit-bio development team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'scikit-bio', u'scikit-bio Documentation',
u'scikit-bio development team', 'scikit-bio',
'Data structures, algorithms, and educational resources for working with '
'biological data in Python.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for autosummary ----------------------------------------------
autosummary_generate = glob.glob('*.rst')
# -- Options for numpydoc -------------------------------------------------
# Generate plots for example sections
numpydoc_use_plots = True
# If we don't turn numpydoc's toctree generation off, Sphinx will warn about
# the toctree referencing missing document(s). This appears to be related to
# generating docs for classes with a __call__ method.
numpydoc_class_members_toctree = False
#------------------------------------------------------------------------------
# Plot
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
import scipy as sp
np.random.seed(123)
"""
plot_include_source = True
#plot_formats = [('png', 96), 'pdf']
#plot_html_show_formats = False
import math
phi = (math.sqrt(5) + 1)/2
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
# Some of our figures have legends outside the axes area. When they're
# rendered in an interactive context, nothing gets cut off, but when
# rendered in a static context (e.g., with savefig, which the plot
# directive uses), the legend can get cut off. Specifying 'tight' instead
# of 'standard' fixes the issue. See http://stackoverflow.com/a/10154763
'savefig.bbox': 'tight'
}
if not use_matplotlib_plot_directive:
import matplotlib
matplotlib.rcParams.update(plot_rcparams)
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'http://docs.python.org/dev': None,
'http://docs.scipy.org/doc/numpy': None,
'http://docs.scipy.org/doc/scipy/reference': None,
'http://matplotlib.org': None,
'http://pandas.pydata.org': None,
'http://www.biom-format.org':None
}
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print "NOTE: linkcode extension not found -- no links to source generated"
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(skbio.__file__))
if 'dev' in skbio.__version__:
return "http://github.com/biocore/scikit-bio/blob/master/skbio/%s%s" % (
fn, linespec)
else:
return "http://github.com/biocore/scikit-bio/blob/%s/skbio/%s%s" % (
skbio.__version__, fn, linespec)
#------------------------------------------------------------------------------
# linkcheck
#------------------------------------------------------------------------------
# Link-checking on Travis sometimes times out.
linkcheck_timeout = 30
# This is so that our docs build.
def _closure():
def __get__(self, cls, owner):
return self
classproperty.__get__ = __get__
_closure()
def autodoc_skip_member(app, what, name, obj, skip, options):
if what == "method":
if isinstance(obj, classproperty):
return True
return skip
# Add the 'copybutton' javascript, to hide/show the prompt in code
# examples, originally taken from scikit-learn's doc/conf.py
def setup(app):
app.add_javascript('copybutton.js')
app.add_stylesheet('style.css')
app.connect('autodoc-skip-member', autodoc_skip_member)
|
jensreeder/scikit-bio
|
doc/source/conf.py
|
Python
|
bsd-3-clause
| 15,372
|
[
"scikit-bio"
] |
0eb7d0081b92ac0662963c9d1e7b2a9129003b772d527bf0843f6ad068aa7fd7
|
"""
Module responsible for translating read data into GA4GH native
objects.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import os
import pysam
import ga4gh.protocol as protocol
import ga4gh.datamodel as datamodel
import ga4gh.exceptions as exceptions
class SamCigar(object):
"""
Utility class for working with SAM CIGAR strings
"""
# see http://pysam.readthedocs.org/en/latest/api.html
# #pysam.AlignedSegment.cigartuples
cigarStrings = [
protocol.CigarOperation.ALIGNMENT_MATCH,
protocol.CigarOperation.INSERT,
protocol.CigarOperation.DELETE,
protocol.CigarOperation.SKIP,
protocol.CigarOperation.CLIP_SOFT,
protocol.CigarOperation.CLIP_HARD,
protocol.CigarOperation.PAD,
protocol.CigarOperation.SEQUENCE_MATCH,
protocol.CigarOperation.SEQUENCE_MISMATCH,
]
@classmethod
def ga2int(cls, value):
for i, cigarString in enumerate(cls.cigarStrings):
if value == cigarString:
return i
@classmethod
def int2ga(cls, value):
return cls.cigarStrings[value]
class SamFlags(object):
"""
Utility class for working with SAM flags
"""
NUMBER_READS = 0x1
PROPER_PLACEMENT = 0x2
READ_NUMBER_ONE = 0x40
READ_NUMBER_TWO = 0x80
SECONDARY_ALIGNMENT = 0x100
FAILED_VENDOR_QUALITY_CHECKS = 0x200
DUPLICATE_FRAGMENT = 0x400
SUPPLEMENTARY_ALIGNMENT = 0x800
@staticmethod
def isFlagSet(flagAttr, flag):
return flagAttr & flag == flag
@staticmethod
def setFlag(flagAttr, flag):
flagAttr |= flag
class AbstractReadGroupSet(datamodel.DatamodelObject):
"""
The base class of a read group set
"""
def __init__(self, id_):
self._id = id_
self._readGroups = []
def getId(self):
# TODO move into the superclass
return self._id
def getReadGroups(self):
"""
Returns the read groups in this read group set
"""
return self._readGroups
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReadGroupSet.
"""
readGroupSet = protocol.ReadGroupSet()
readGroupSet.id = self._id
readGroupSet.readGroups = [
readGroup.toProtocolElement() for readGroup in self._readGroups]
readGroupSet.name = None
readGroupSet.datasetId = None
return readGroupSet
class SimulatedReadGroupSet(AbstractReadGroupSet):
"""
A simulated read group set
"""
def __init__(self, id_):
super(SimulatedReadGroupSet, self).__init__(id_)
readGroupId = "{}:one".format(id_)
readGroup = SimulatedReadGroup(readGroupId)
self._readGroups.append(readGroup)
class HtslibReadGroupSet(datamodel.PysamDatamodelMixin, AbstractReadGroupSet):
"""
Class representing a logical collection ReadGroups.
"""
def __init__(self, id_, dataDir):
super(HtslibReadGroupSet, self).__init__(id_)
self._dataDir = dataDir
self._readGroups = []
self._setAccessTimes(dataDir)
self._scanDataFiles(dataDir, ["*.bam"])
def _addDataFile(self, path):
filename = os.path.split(path)[1]
localId = os.path.splitext(filename)[0]
readGroupId = "{}:{}".format(self._id, localId)
readGroup = HtslibReadGroup(readGroupId, path)
self._readGroups.append(readGroup)
class AbstractReadGroup(object):
"""
Class representing a ReadGroup. A ReadGroup is all the data that's
processed the same way by the sequencer. There are typically 1-10
ReadGroups in a ReadGroupSet.
"""
def __init__(self, id_):
self._id = id_
now = protocol.convertDatetime(datetime.datetime.now())
self._creationTime = now
self._updateTime = now
def getId(self):
"""
Returns the id of the read group
"""
return self._id
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReadGroup.
"""
# TODO this is very incomplete, but we don't have the
# implementation to fill out the rest of the fields currently
readGroup = protocol.ReadGroup()
readGroup.id = self._id
readGroup.created = self._creationTime
readGroup.updated = self._updateTime
readGroup.datasetId = None
readGroup.description = None
readGroup.experiment = None
readGroup.info = {}
readGroup.name = readGroup.id
readGroup.predictedInsertSize = None
readGroup.programs = []
readGroup.referenceSetId = None
readGroup.sampleId = None
return readGroup
class SimulatedReadGroup(AbstractReadGroup):
"""
A simulated readgroup
"""
def __init__(self, id_):
super(SimulatedReadGroup, self).__init__(id_)
def getReadAlignments(self, referenceId=None, start=None, end=None):
for i in range(2):
alignment = self._createReadAlignment(i)
yield alignment
def _createReadAlignment(self, i):
# TODO fill out a bit more
id_ = "{}:simulated{}".format(self._id, i)
alignment = protocol.ReadAlignment()
alignment.alignedQuality = [1, 2, 3]
alignment.alignedSequence = "ACT"
gaPosition = protocol.Position()
gaPosition.position = 0
gaPosition.referenceName = "whatevs"
gaPosition.strand = protocol.Strand.POS_STRAND
gaLinearAlignment = protocol.LinearAlignment()
gaLinearAlignment.position = gaPosition
alignment.alignment = gaLinearAlignment
alignment.duplicateFragment = False
alignment.failedVendorQualityChecks = False
alignment.fragmentLength = 3
alignment.fragmentName = id_
alignment.id = id_
alignment.info = {}
alignment.nextMatePosition = None
alignment.numberReads = None
alignment.properPlacement = False
alignment.readGroupId = self._id
alignment.readNumber = None
alignment.secondaryAlignment = False
alignment.supplementaryAlignment = False
return alignment
class HtslibReadGroup(datamodel.PysamDatamodelMixin, AbstractReadGroup):
"""
A readgroup based on htslib's reading of a given file
"""
def __init__(self, id_, dataFile):
super(HtslibReadGroup, self).__init__(id_)
self._samFilePath = dataFile
try:
self._samFile = pysam.AlignmentFile(dataFile)
except ValueError:
raise exceptions.FileOpenFailedException(dataFile)
def getSamFilePath(self):
"""
Returns the file path of the sam file
"""
return self._samFilePath
def getReadAlignments(self, referenceId=None, start=None, end=None):
"""
Returns an iterator over the specified reads
"""
# TODO If referenceId is None, return against all references,
# including unmapped reads.
referenceName = ""
if referenceId is not None:
referenceName = self._samFile.getrname(referenceId)
referenceName, start, end = self.sanitizeAlignmentFileFetch(
referenceName, start, end)
# TODO deal with errors from htslib
readAlignments = self._samFile.fetch(referenceName, start, end)
for readAlignment in readAlignments:
yield self.convertReadAlignment(readAlignment)
def convertReadAlignment(self, read):
"""
Convert a pysam ReadAlignment to a GA4GH ReadAlignment
"""
# TODO fill out remaining fields
# TODO refine in tandem with code in converters module
ret = protocol.ReadAlignment()
ret.alignedQuality = list(read.query_qualities)
ret.alignedSequence = read.query_sequence
ret.alignment = protocol.LinearAlignment()
ret.alignment.mappingQuality = read.mapping_quality
ret.alignment.position = protocol.Position()
ret.alignment.position.referenceName = self._samFile.getrname(
read.reference_id)
ret.alignment.position.position = read.reference_start
ret.alignment.position.strand = \
protocol.Strand.POS_STRAND # TODO fix this!
ret.alignment.cigar = []
for operation, length in read.cigar:
gaCigarUnit = protocol.CigarUnit()
gaCigarUnit.operation = SamCigar.int2ga(operation)
gaCigarUnit.operationLength = length
gaCigarUnit.referenceSequence = None # TODO fix this!
ret.alignment.cigar.append(gaCigarUnit)
ret.duplicateFragment = SamFlags.isFlagSet(
read.flag, SamFlags.DUPLICATE_FRAGMENT)
ret.failedVendorQualityChecks = SamFlags.isFlagSet(
read.flag, SamFlags.FAILED_VENDOR_QUALITY_CHECKS)
ret.fragmentLength = read.template_length
ret.fragmentName = read.query_name
ret.id = "{}:{}".format(self._id, read.query_name)
ret.info = {key: [str(value)] for key, value in read.tags}
ret.nextMatePosition = None
if read.next_reference_id != -1:
ret.nextMatePosition = protocol.Position()
ret.nextMatePosition.referenceName = self._samFile.getrname(
read.next_reference_id)
ret.nextMatePosition.position = read.next_reference_start
ret.nextMatePosition.strand = \
protocol.Strand.POS_STRAND # TODO fix this!
# TODO Is this the correct mapping between numberReads and
# sam flag 0x1? What about the mapping between numberReads
# and 0x40 and 0x80?
ret.numberReads = None
ret.readNumber = None
if SamFlags.isFlagSet(read.flag, SamFlags.NUMBER_READS):
ret.numberReads = 2
if SamFlags.isFlagSet(read.flag, SamFlags.READ_NUMBER_ONE):
ret.readNumber = 0
elif SamFlags.isFlagSet(read.flag, SamFlags.READ_NUMBER_TWO):
ret.readNumber = 1
ret.properPlacement = SamFlags.isFlagSet(
read.flag, SamFlags.PROPER_PLACEMENT)
ret.readGroupId = self._id
ret.secondaryAlignment = SamFlags.isFlagSet(
read.flag, SamFlags.SECONDARY_ALIGNMENT)
ret.supplementaryAlignment = SamFlags.isFlagSet(
read.flag, SamFlags.SUPPLEMENTARY_ALIGNMENT)
return ret
|
shajoezhu/server
|
ga4gh/datamodel/reads.py
|
Python
|
apache-2.0
| 10,535
|
[
"pysam"
] |
f85980c0c9d6fd32599e3cd142395a5dc831065bce8a402e44abf392e9ca950f
|
import hashlib
import hmac
from twisted import logger
import urllib
from matrix_gitter.gitter_oauth import setup_gitter_oauth
from matrix_gitter.utils import assert_http_200, Errback, JsonProducer, \
read_json_response, http_request
log = logger.Logger()
class GitterAPI(object):
"""Gitter interface.
This communicates with Gitter using their API, authenticating via OAuth2 as
specific users.
"""
def __init__(self, bridge, port, url, oauth_key, oauth_secret,
debug=False):
self.bridge = bridge
self.oauth_key = oauth_key
self.oauth_secret = oauth_secret
self.url = url
setup_gitter_oauth(self, port, debug=debug)
@property
def bot_fullname(self):
return self.bridge.bot_fullname
def secret_hmac(self, msg):
"""HMAC a message with the secret in the config.
"""
return hmac.new(self.bridge.secret_key, msg, hashlib.sha1).hexdigest()
def gitter_request(self, method, uri, content, *args, **kwargs):
"""Gitter API request.
"""
if 'access_token' in kwargs:
access_token = kwargs.pop('access_token')
else:
access_token = kwargs.pop('user').gitter_access_token
if args:
uri = uri % tuple(urllib.quote(a) for a in args)
if isinstance(uri, unicode):
uri = uri.encode('ascii')
headers = {'accept': 'application/json',
'authorization': 'Bearer %s' % access_token}
if content is not None:
headers['content-type'] = 'application/json'
log.debug("gitter_request {method} {uri} {content!r}",
method=method, uri=uri, content=content)
return http_request(
method,
'https://api.gitter.im/%s' % uri,
headers,
JsonProducer(content) if content is not None else None)
def gitter_stream(self, method, uri, *args, **kwargs):
"""Request to Gitter's streaming API.
"""
if 'access_token' in kwargs:
access_token = kwargs.pop('access_token')
else:
access_token = kwargs.pop('user').gitter_access_token
if args:
uri = uri % tuple(urllib.quote(a) for a in args)
if isinstance(uri, unicode):
uri = uri.encode('ascii')
headers = {'accept': 'application/json',
'authorization': 'Bearer %s' % access_token}
log.debug("gitter_stream {method} {uri} {content!r}",
method=method, uri=uri)
return http_request(
method,
'https://stream.gitter.im/%s' % uri,
headers,
timeout=None)
def set_access_token(self, matrix_user, access_token):
"""Set the access token for a user who completed OAuth.
"""
log.info("Getting GitHub username for Matrix user {matrix}",
matrix=matrix_user)
d = self.gitter_request('GET', 'v1/user', None,
access_token=access_token)
d.addCallback(assert_http_200)
d.addCallback(read_json_response)
d.addCallback(self._set_user_access_token, matrix_user, access_token)
d.addErrback(Errback(log,
"Error getting username for Matrix user {matrix}",
matrix=matrix_user))
def _set_user_access_token(self, (response, content),
matrix_user, access_token):
github_user = content[0]['username']
gitter_id = content[0]['id']
log.info("Storing Gitter access token for user {matrix}/{github}",
matrix=matrix_user, github=github_user)
self.bridge.set_gitter_info(matrix_user, github_user, gitter_id,
access_token)
def get_gitter_user_rooms(self, user_obj):
"""List the Gitter rooms a user is in.
"""
d = self.gitter_request('GET', 'v1/rooms', None,
user=user_obj)
d.addCallback(assert_http_200)
d.addCallback(read_json_response)
d.addCallback(self._read_gitter_rooms)
return d
def _read_gitter_rooms(self, (response, content)):
return [(room['id'], room['url'][1:])
for room in content]
def get_room(self, gitter_room, **kwargs):
"""Get a Gitter room without joining it.
"""
d = self.gitter_request(
'POST',
'v1/rooms',
{'uri': gitter_room},
**kwargs)
d.addCallback(assert_http_200)
d.addCallback(read_json_response)
d.addCallback(lambda (r, c): c)
return d
def join_room(self, user_obj, gitter_room_id):
"""Join a Gitter room.
"""
d = self.gitter_request(
'POST',
'v1/user/%s/rooms',
{'id': gitter_room_id},
user_obj.gitter_id,
user=user_obj)
d.addCallback(assert_http_200)
d.addCallback(read_json_response)
d.addCallback(lambda (r, c): c)
return d
def leave_room(self, user_obj, gitter_room):
"""Leave a Gitter room.
"""
d = self.get_room(gitter_room, user=user_obj)
d.addCallback(self._leave_room, user_obj)
return d
def _leave_room(self, room, user_obj):
log.info("Resolved {name} into {id}, leaving...",
name=room['url'][1:], id=room['id'])
user_id = user_obj.gitter_id
return self.gitter_request(
'DELETE',
'v1/rooms/%s/users/%s' % (room['id'], user_id),
None,
user=user_obj)
def auth_link(self, matrix_user):
"""Get the link a user should visit to authenticate.
"""
state = '%s|%s' % (matrix_user, self.secret_hmac(matrix_user))
return '%sauth_gitter/%s' % (self.url, urllib.quote(state))
|
remram44/matrix-appservice-gitter-twisted
|
matrix_gitter/gitter.py
|
Python
|
bsd-3-clause
| 5,961
|
[
"VisIt"
] |
03f0cc415b044208bf1426b0fa9fb2f31ea884978f6beb2b3335420728d68557
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
The :mod:`~openlp.core.ui.media.vlcplayer` module contains our VLC component wrapper
"""
from datetime import datetime
from distutils.version import LooseVersion
import logging
import os
import sys
from PyQt4 import QtGui
from openlp.core.lib import Settings, translate
from openlp.core.ui.media import MediaState
from openlp.core.ui.media.mediaplayer import MediaPlayer
log = logging.getLogger(__name__)
VLC_AVAILABLE = False
try:
from openlp.core.ui.media.vendor import vlc
VLC_AVAILABLE = bool(vlc.get_default_instance())
except (ImportError, NameError, NotImplementedError):
pass
except OSError, e:
if sys.platform.startswith('win'):
if not isinstance(e, WindowsError) and e.winerror != 126:
raise
else:
raise
if VLC_AVAILABLE:
try:
version = vlc.libvlc_get_version()
except:
version = u'0.0.0'
if LooseVersion(version) < LooseVersion('1.1.0'):
VLC_AVAILABLE = False
log.debug(u'VLC could not be loaded: %s' % version)
AUDIO_EXT = [u'*.mp3', u'*.wav', u'*.wma', u'*.ogg']
VIDEO_EXT = [
u'*.3gp',
u'*.asf', u'*.wmv',
u'*.au',
u'*.avi',
u'*.flv',
u'*.mov',
u'*.mp4', u'*.m4v',
u'*.ogm', u'*.ogv',
u'*.mkv', u'*.mka',
u'*.ts', u'*.mpg',
u'*.mpg', u'*.mp2',
u'*.nsc',
u'*.nsv',
u'*.nut',
u'*.ra', u'*.ram', u'*.rm', u'*.rv', u'*.rmbv',
u'*.a52', u'*.dts', u'*.aac', u'*.flac', u'*.dv', u'*.vid',
u'*.tta', u'*.tac',
u'*.ty',
u'*.dts',
u'*.xa',
u'*.iso',
u'*.vob',
u'*.webm'
]
class VlcPlayer(MediaPlayer):
"""
A specialised version of the MediaPlayer class, which provides a VLC
display.
"""
def __init__(self, parent):
"""
Constructor
"""
MediaPlayer.__init__(self, parent, u'vlc')
self.original_name = u'VLC'
self.display_name = u'&VLC'
self.parent = parent
self.canFolder = True
self.audio_extensions_list = AUDIO_EXT
self.video_extensions_list = VIDEO_EXT
def setup(self, display):
"""
Set up the media player
"""
display.vlcWidget = QtGui.QFrame(display)
display.vlcWidget.setFrameStyle(QtGui.QFrame.NoFrame)
# creating a basic vlc instance
command_line_options = u'--no-video-title-show'
if not display.hasAudio:
command_line_options += u' --no-audio --no-video-title-show'
if Settings().value(u'advanced/hide mouse') and display.controller.isLive:
command_line_options += u' --mouse-hide-timeout=0'
display.vlcInstance = vlc.Instance(command_line_options)
display.vlcInstance.set_log_verbosity(2)
# creating an empty vlc media player
display.vlcMediaPlayer = display.vlcInstance.media_player_new()
display.vlcWidget.resize(display.size())
display.vlcWidget.raise_()
display.vlcWidget.hide()
# The media player has to be 'connected' to the QFrame.
# (otherwise a video would be displayed in it's own window)
# This is platform specific!
# You have to give the id of the QFrame (or similar object)
# to vlc, different platforms have different functions for this.
win_id = int(display.vlcWidget.winId())
if sys.platform == "win32":
display.vlcMediaPlayer.set_hwnd(win_id)
elif sys.platform == "darwin":
# We have to use 'set_nsobject' since Qt4 on OSX uses Cocoa
# framework and not the old Carbon.
display.vlcMediaPlayer.set_nsobject(win_id)
else:
# for Linux using the X Server
display.vlcMediaPlayer.set_xwindow(win_id)
self.hasOwnWidget = True
def check_available(self):
"""
Return the availability of VLC
"""
return VLC_AVAILABLE
def load(self, display):
"""
Load a video into VLC
"""
log.debug(u'load vid in Vlc Controller')
controller = display.controller
volume = controller.media_info.volume
file_path = str(controller.media_info.file_info.absoluteFilePath())
path = os.path.normcase(file_path)
# create the media
display.vlcMedia = display.vlcInstance.media_new_path(path)
# put the media in the media player
display.vlcMediaPlayer.set_media(display.vlcMedia)
# parse the metadata of the file
display.vlcMedia.parse()
self.volume(display, volume)
# We need to set media_info.length during load because we want
# to avoid start and stop the video twice. Once for real playback
# and once to just get media length.
#
# Media plugin depends on knowing media length before playback.
controller.media_info.length = int(display.vlcMediaPlayer.get_media().get_duration() / 1000)
return True
def media_state_wait(self, display, mediaState):
"""
Wait for the video to change its state
Wait no longer than 60 seconds. (loading an iso file needs a long time)
"""
start = datetime.now()
while not mediaState == display.vlcMedia.get_state():
if display.vlcMedia.get_state() == vlc.State.Error:
return False
self.application.process_events()
if (datetime.now() - start).seconds > 60:
return False
return True
def resize(self, display):
"""
Resize the player
"""
display.vlcWidget.resize(display.size())
def play(self, display):
"""
Play the current item
"""
controller = display.controller
start_time = 0
if self.state != MediaState.Paused and controller.media_info.start_time > 0:
start_time = controller.media_info.start_time
display.vlcMediaPlayer.play()
if not self.media_state_wait(display, vlc.State.Playing):
return False
self.volume(display, controller.media_info.volume)
if start_time > 0:
self.seek(display, controller.media_info.start_time * 1000)
controller.media_info.length = int(display.vlcMediaPlayer.get_media().get_duration() / 1000)
controller.seekSlider.setMaximum(controller.media_info.length * 1000)
self.state = MediaState.Playing
display.vlcWidget.raise_()
return True
def pause(self, display):
"""
Pause the current item
"""
if display.vlcMedia.get_state() != vlc.State.Playing:
return
display.vlcMediaPlayer.pause()
if self.media_state_wait(display, vlc.State.Paused):
self.state = MediaState.Paused
def stop(self, display):
"""
Stop the current item
"""
display.vlcMediaPlayer.stop()
self.state = MediaState.Stopped
def volume(self, display, vol):
"""
Set the volume
"""
if display.hasAudio:
display.vlcMediaPlayer.audio_set_volume(vol)
def seek(self, display, seekVal):
"""
Go to a particular position
"""
if display.vlcMediaPlayer.is_seekable():
display.vlcMediaPlayer.set_time(seekVal)
def reset(self, display):
"""
Reset the player
"""
display.vlcMediaPlayer.stop()
display.vlcWidget.setVisible(False)
self.state = MediaState.Off
def set_visible(self, display, status):
"""
Set the visibility
"""
if self.hasOwnWidget:
display.vlcWidget.setVisible(status)
def update_ui(self, display):
"""
Update the UI
"""
# Stop video if playback is finished.
if display.vlcMedia.get_state() == vlc.State.Ended:
self.stop(display)
controller = display.controller
if controller.media_info.end_time > 0:
if display.vlcMediaPlayer.get_time() > controller.media_info.end_time * 1000:
self.stop(display)
self.set_visible(display, False)
if not controller.seekSlider.isSliderDown():
controller.seekSlider.blockSignals(True)
controller.seekSlider.setSliderPosition(display.vlcMediaPlayer.get_time())
controller.seekSlider.blockSignals(False)
def get_info(self):
"""
Return some information about this player
"""
return(translate('Media.player', 'VLC is an external player which '
'supports a number of different formats.') +
u'<br/> <strong>' + translate('Media.player', 'Audio') +
u'</strong><br/>' + unicode(AUDIO_EXT) + u'<br/><strong>' +
translate('Media.player', 'Video') + u'</strong><br/>' +
unicode(VIDEO_EXT) + u'<br/>')
|
marmyshev/transitions
|
openlp/core/ui/media/vlcplayer.py
|
Python
|
gpl-2.0
| 10,978
|
[
"Brian"
] |
68cdce8c856c5caa10c6fc9ca86776135a4f95d38738a14bc23d9db15b5d45ec
|
'''
Functions and classes for infilling missing observations in an
incomplete station time series using probabilistic principal component analysis (PPCA):
Stacklies W, Redestig H, Scholz M, Walther D, Selbig J. 2007.
pcaMethods-a bioconductor package providing PCA methods for incomplete data.
Bioinformatics 23: 1164-1167. DOI: 10.1093/bioinformatics/btm069.
Copyright 2014,2015, Jared Oyler.
This file is part of TopoWx.
TopoWx is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TopoWx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TopoWx. If not, see <http://www.gnu.org/licenses/>.
'''
__all__ = ['InfillMatrixPPCA', 'infill_daily_obs']
import numpy as np
from twx.db import STN_ID, LON, LAT, UTC_OFFSET
from twx.utils import pca_svd, grt_circle_dist
import os
from scipy import stats
from twx.utils.perf_metrics import calc_ioa_d1
# rpy2
robjects = None
numpy2ri = None
r = None
ri = None
R_LOADED = False
MIN_POR_OVERLAP = 2.0 / 3.0
MAX_DISTANCE = 75 # in km
MAX_NNR_VAR = 0.99
MIN_NNR_VAR = 0.90
MIN_DAILY_NGHBRS = 3
NNGH_NNR = 4
NONOPTIM_LOW_PERF = 'low infill performance'
NONOPTIM_IMPOSS_VAL = 'impossible infill values'
NONOPTIM_VARI_CHGPT = 'variance change point'
class InfillMatrixPPCA(object):
'''
A class for building a data matrix of surrounding neighbor station observations for a
target station to run PPCA missing value infilling.
'''
def __init__(self, stn_id, stn_da, tair_var, nnr_ds, vname_mean, vname_vari, min_dist=-1, max_dist=MAX_DISTANCE, tair_mask=None, day_mask=None, add_bestngh=True):
'''
Parameters
----------
stn_id : str
The station id of the target station
stn_da : twx.db.StationDataDb
The station database from which all target and neighboring
station observations should be loaded
tair_var : str
The temperature variable ('tmin' or 'tmax') of focus.
nnr_ds : twx.db.NNRNghData
A NNRNghData object for loading reanalysis data to help supplement
the neighboring station data.
min_dist : int, optional
The minimum distance (exclusive) for which to search for neighboring stations.
Pass -1 if there should be no minimum distance
max_dist : int, optional
The maximum distance (inclusive) for which to search for neighboring stations.
Defaults to MAX_DISTANCE
tair_mask : ndarray, optional
A boolean mask specifying which observations at the target should
artificially be set to nan. This can be used for cross-validation.
Mask size must equal the time series length specified by the passed
StationDataDb.
add_bestngh : boolean optional
Add the best correlated neighbor to the data matrix even if the time
series period-of-record of the neighbor is less than the
MIN_POR_OVERLAP threshold for the entire period over which
the target station is being infilled.
'''
_load_R()
idx_target = np.nonzero(stn_da.stn_ids == stn_id)[0][0]
stn = stn_da.stns[idx_target]
target_tair = stn_da.load_all_stn_obs_var(np.array([stn_id]), tair_var)[0]
target_tair = target_tair.astype(np.float64)
target_norm = stn[vname_mean]
target_std = np.sqrt(stn[vname_vari])
if tair_mask is not None:
target_tair[tair_mask] = np.nan
if day_mask is None:
day_mask = np.ones(target_tair.size, dtype=np.bool)
day_idx = np.nonzero(day_mask)[0]
target_tair = np.take(target_tair, day_idx)
# Number of observations threshold for entire period that is being infilled
nthres_all = np.round(MIN_POR_OVERLAP * target_tair.size)
# Number of observations threshold just for the target's period of record
valid_tair_mask = np.isfinite(target_tair)
ntair_valid = np.nonzero(valid_tair_mask)[0].size
nthres_target_por = np.round(MIN_POR_OVERLAP * ntair_valid)
# Make sure to not include the target station itself as a neighbor station
# and stations that do not have a mean or variance
stns_mask = np.logical_and(stn_da.stns[STN_ID] != stn_id,
np.logical_and(np.isfinite(stn_da.stns[vname_mean]),
np.isfinite(stn_da.stns[vname_vari])))
all_stns = stn_da.stns[stns_mask]
dists = grt_circle_dist(stn[LON], stn[LAT], all_stns[LON], all_stns[LAT])
mask_dists = np.logical_and(dists <= max_dist, dists > min_dist)
while np.nonzero(mask_dists)[0].size == 0:
max_dist += MAX_DISTANCE / 2.0
mask_dists = np.logical_and(dists <= max_dist, dists > min_dist)
ngh_stns = all_stns[mask_dists]
ngh_dists = dists[mask_dists]
ngh_ids = ngh_stns[STN_ID]
nghid_mask = np.in1d(stn_da.stn_ids, ngh_ids, assume_unique=True)
ngh_norms = stn_da.stns[vname_mean][nghid_mask]
ngh_std = np.sqrt(stn_da.stns[vname_vari][nghid_mask])
ngh_tair = stn_da.load_all_stn_obs_var(ngh_ids, tair_var, set_flagged_nan=True)[0]
ngh_tair = ngh_tair.astype(np.float64)
if len(ngh_tair.shape) == 1:
ngh_tair.shape = (ngh_tair.size, 1)
ngh_tair = np.take(ngh_tair, day_idx, axis=0)
dist_sort = np.argsort(ngh_dists)
ngh_stns = ngh_stns[dist_sort]
ngh_dists = ngh_dists[dist_sort]
ngh_norms = ngh_norms[dist_sort]
ngh_std = ngh_std[dist_sort]
ngh_tair = ngh_tair[:, dist_sort]
overlap_mask_tair = np.zeros(ngh_stns.size, dtype=np.bool)
ioa = np.zeros(ngh_stns.size)
best_ioa = 0
i = None
for x in np.arange(ngh_stns.size):
valid_ngh_mask = np.isfinite(ngh_tair[:, x])
nlap = np.nonzero(valid_ngh_mask)[0].size
overlap_mask = np.logical_and(valid_tair_mask, valid_ngh_mask)
nlap_stn = np.nonzero(overlap_mask)[0].size
if nlap >= nthres_all and nlap_stn >= nthres_target_por:
ioa[x] = calc_ioa_d1(target_tair[overlap_mask], ngh_tair[:, x][overlap_mask])
overlap_mask_tair[x] = True
elif nlap_stn >= nthres_target_por and add_bestngh:
aioa = calc_ioa_d1(target_tair[overlap_mask], ngh_tair[:, x][overlap_mask])
if aioa > best_ioa:
ioa[x] = aioa
overlap_mask_tair[x] = True
if i != None:
overlap_mask_tair[i] = False
i = x
best_ioa = aioa
if add_bestngh and i is not None:
if ioa[i] != np.max(ioa) or ioa[i] < 0.7:
overlap_mask_tair[i] = False
ioa = ioa[overlap_mask_tair]
ngh_dists = ngh_dists[overlap_mask_tair]
ngh_tair = ngh_tair[:, overlap_mask_tair]
ngh_norms = ngh_norms[overlap_mask_tair]
ngh_std = ngh_std[overlap_mask_tair]
if ioa.size > 0:
ioa_sort = np.argsort(ioa)[::-1]
ioa = ioa[ioa_sort]
ngh_dists = ngh_dists[ioa_sort]
ngh_tair = ngh_tair[:, ioa_sort]
ngh_norms = ngh_norms[ioa_sort]
ngh_std = ngh_std[ioa_sort]
target_tair.shape = (target_tair.size, 1)
pca_tair = np.hstack((target_tair, ngh_tair))
ngh_dists = np.concatenate((np.zeros(1), ngh_dists))
ioa = np.concatenate((np.ones(1), ioa))
ngh_norms = np.concatenate((np.array([target_norm]), ngh_norms))
ngh_std = np.concatenate((np.array([target_std]), ngh_std))
valid_pca_mask = np.isfinite(pca_tair)
nnghs_per_day = np.sum(valid_pca_mask , axis=1)
else:
target_tair.shape = (target_tair.size, 1)
pca_tair = target_tair
valid_tair_mask.shape = (valid_tair_mask.size, 1)
valid_pca_mask = valid_tair_mask
ioa = np.ones(1)
ngh_dists = np.zeros(1)
ngh_norms = np.array([target_norm])
ngh_std = np.array([target_std])
nnghs_per_day = np.zeros(target_tair.shape[0])
#############################################################
self.pca_tair = np.array(pca_tair, dtype=np.float64)
self.valid_pca_mask = valid_pca_mask
self.ngh_ioa = ioa
self.ngh_dists = ngh_dists
self.ngh_norms = ngh_norms
self.ngh_std = ngh_std
self.max_dist = max_dist
self.stn_id = stn_id
self.stn_da = stn_da
self.tair_var = tair_var
self.tair_mask = tair_mask
self.nnghs_per_day = nnghs_per_day
self.nnr_ds = nnr_ds
self.stn = stn
self.day_idx = day_idx
self.day_mask = day_mask
self.vname_mean = vname_mean
self.vname_vari = vname_vari
def __extend_ngh_radius(self, extend_by):
'''
Extend the search radius for neighboring stations.
The minimum of the search radius is the previous max distance.
Parameters
----------
extend_by: int
The amount (km) by which to extend the radius.
'''
min_dist = self.max_dist
max_dist = self.max_dist + extend_by
pca_matrix2 = InfillMatrixPPCA(self.stn_id, self.stn_da, self.tair_var, self.nnr_ds,
self.vname_mean, self.vname_vari, min_dist,
max_dist, self.tair_mask, self.day_mask, add_bestngh=False)
self.__merge(pca_matrix2)
self.max_dist = max_dist
def __merge(self, matrix2):
'''
Merge this InfillMatrixPPCA with another InfillMatrixPPCA
Parameters
----------
matrix2: InfillMatrixPPCA
The other InfillMatrixPPCA with which to merge
'''
self.pca_tair = np.hstack((self.pca_tair, matrix2.pca_tair[:, 1:]))
self.valid_pca_mask = np.hstack((self.valid_pca_mask, matrix2.valid_pca_mask[:, 1:]))
self.ngh_ioa = np.concatenate((self.ngh_ioa, matrix2.ngh_ioa[1:]))
self.ngh_dists = np.concatenate((self.ngh_dists, matrix2.ngh_dists[1:]))
self.ngh_norms = np.concatenate((self.ngh_norms, matrix2.ngh_norms[1:]))
self.ngh_std = np.concatenate((self.ngh_std, matrix2.ngh_std[1:]))
if self.ngh_ioa.size > 0:
ioa_sort = np.argsort(self.ngh_ioa[1:])[::-1]
ioa_sort = np.concatenate([np.zeros(1, dtype=np.int), ioa_sort + 1])
self.pca_tair = self.pca_tair[:, ioa_sort]
self.valid_pca_mask = self.valid_pca_mask[:, ioa_sort]
self.ngh_ioa = self.ngh_ioa[ioa_sort]
self.ngh_dists = self.ngh_dists[ioa_sort]
self.ngh_norms = self.ngh_norms[ioa_sort]
self.ngh_std = self.ngh_std[ioa_sort]
self.nnghs_per_day = np.sum(self.valid_pca_mask[:, 1:], axis=1)
else:
self.nnghs_per_day = np.zeros(self.pca_tair.shape[1])
def __has_min_daily_nghs(self, nnghs, min_daily_nghs):
'''
Check to see if there is a minimum number of
neighbor observations each day
'''
trim_valid_mask = self.valid_pca_mask[:, 0:1 + nnghs]
nnghs_per_day = np.sum(trim_valid_mask[:, 1:], axis=1)
return np.min(nnghs_per_day) >= min_daily_nghs
def infill(self, min_daily_nnghs=MIN_DAILY_NGHBRS, nnghs_nnr=NNGH_NNR, max_nnr_var=MAX_NNR_VAR, chk_perf=True, npcs=0, frac_obs_initnpcs=0.5, ppca_varyexplain=0.99, ppcaConThres=1e-5, verbose=False):
'''
Infill missing values for an incomplete station time series.
Parameters
----------
min_daily_nnghs : int, optional
The minimum neighbors required for each day.
nnghs_nnr : int, optional
The number of neighboring NCEP/NCAR Reanalysis grid cells
max_nnr_var : float, optional
The required variance explained by principal components of
a S-Mode PCA of the reanalysis data.
chk_perf : boolean, optional
If true, check performance of infilled output and if there
are any bad infilled values. If there are bad infilled values,
PPCA will be rerun with different configurations to try to eliminate
the bad values.
npcs : int, optional
Use a specific set number of PCs. If npcs = 0, the number of PCs is determined
dynamically based on ppca_varyexplain.
frac_obs_initnpcs : float, optional
The fraction of the total number of columns that should be used as the
initial number of PCs. Example: if frac_obs is 0.5 and the number of columns
is 10, the initial number of PCs will be 5.
ppca_varyexplain : float, optional
The required variance to be explained by the PCs. Example: if 0.99, the first
n PCs that account for 99% of the variance in pca_matrix will be used
ppcaConThres : float, optional
The convergence threshold for the PPCA algorithm.
verbose : boolean, optional
If true, output PPCA algorithm progress.
Returns
----------
fnl_tair : ndarray
Time series of station observations with missing values infilled
mask_infill : ndarray
Boolean array specifying which values were infilled in fnl_tair
infill_tair : ndarray
Time series of station observations with all observations replaced with
values from the infill model regardless of whether or not an
observation was originally missing.
'''
nnghs = min_daily_nnghs
trim_pca_tair = self.pca_tair[:, 0:1 + nnghs]
trim_ngh_norms = self.ngh_norms[0:1 + nnghs]
trim_ngh_std = self.ngh_std[0:1 + nnghs]
engh_dly_nghs = self.__has_min_daily_nghs(nnghs, min_daily_nnghs)
actual_nnghs = trim_pca_tair.shape[1] - 1
while actual_nnghs < nnghs or not engh_dly_nghs:
if actual_nnghs == nnghs and not engh_dly_nghs:
nnghs += 1
else:
self.__extend_ngh_radius(MAX_DISTANCE / 2.0)
trim_pca_tair = self.pca_tair[:, 0:1 + nnghs]
trim_ngh_norms = self.ngh_norms[0:1 + nnghs]
trim_ngh_std = self.ngh_std[0:1 + nnghs]
engh_dly_nghs = self.__has_min_daily_nghs(nnghs, min_daily_nnghs)
actual_nnghs = trim_pca_tair.shape[1] - 1
#############################################################
nnr_tair = self.nnr_ds.get_nngh_matrix(self.stn[LON], self.stn[LAT], self.tair_var, utc_offset=self.stn[UTC_OFFSET], nngh=nnghs_nnr)
nnr_tair = np.take(nnr_tair, self.day_idx, axis=0)
pc_loads, pc_scores, var_explain = pca_svd(nnr_tair, True, True)
cusum_var = np.cumsum(var_explain)
i = np.nonzero(cusum_var >= max_nnr_var)[0][0]
nnr_tair = pc_scores[:, 0:i + 1]
trim_pca_tair, trim_ngh_norms, trim_ngh_std = _shrink_matrix(trim_pca_tair, trim_ngh_norms, trim_ngh_std, min_daily_nnghs)
if nnr_tair.size > 0:
nnr_norms = np.mean(nnr_tair, dtype=np.float, axis=0)
nnr_std = np.std(nnr_tair, dtype=np.float, axis=0, ddof=1)
trim_pca_tair = np.hstack((trim_pca_tair, nnr_tair))
trim_ngh_norms = np.concatenate((trim_ngh_norms, nnr_norms))
trim_ngh_std = np.concatenate((trim_ngh_std, nnr_std))
############################################################
ppca_rslt = r.ppca_tair(robjects.Matrix(trim_pca_tair),
robjects.FloatVector(trim_ngh_norms),
robjects.FloatVector(trim_ngh_std),
frac_obs=frac_obs_initnpcs,
max_r2cum=ppca_varyexplain,
npcs=npcs,
convThres=ppcaConThres,
verbose=verbose)
infill_tair = np.array(ppca_rslt.rx('ppca_fit'))
infill_tair.shape = (infill_tair.shape[1],)
# npcsr = ppca_rslt.rx('npcs')[0][0]
#############################################################
obs_tair = trim_pca_tair[:, 0]
if chk_perf:
non_optimal, reasons, mae, r2 = _is_nonoptimal_infill(infill_tair, self)
if non_optimal:
infill_tairs = []
nonoptim_reasons = []
maes = []
r2s = []
infill_tairs.append(infill_tair)
nonoptim_reasons.append(reasons)
maes.append(mae)
r2s.append(r2)
print "".join(["WARNING|", self.stn_id, " had nonoptimal infill for ",
self.tair_var, " using ", self.vname_mean,
" as the mean. Reasons: ", "|".join(reasons), ". MAE:%.2f, R2:%.2f. Retrying..." % (mae, r2)])
if MIN_NNR_VAR < max_nnr_var:
infill_tair = self.infill(min_daily_nnghs, nnghs_nnr, MIN_NNR_VAR, False, npcs, frac_obs_initnpcs, ppca_varyexplain, ppcaConThres, verbose)[2]
non_optimal, reasons, mae, r2 = _is_nonoptimal_infill(infill_tair, self)
infill_tairs.append(infill_tair)
nonoptim_reasons.append(reasons)
maes.append(mae)
r2s.append(r2)
if non_optimal:
newThres = [1e-6, 1e-7]
for aThres in newThres:
infill_tair = self.infill(min_daily_nnghs, nnghs_nnr, max_nnr_var, False, npcs, frac_obs_initnpcs, ppca_varyexplain, aThres, verbose)[2]
non_optimal, reasons, mae, r2 = _is_nonoptimal_infill(infill_tair, self)
infill_tairs.append(infill_tair)
nonoptim_reasons.append(reasons)
maes.append(mae)
r2s.append(r2)
if not non_optimal:
break
if non_optimal:
nreasons = np.array([len(a_reasons) for a_reasons in nonoptim_reasons])
first_reason = np.array([a_reasons[0] for a_reasons in nonoptim_reasons])
infill_tairs = np.array(infill_tairs)
nonoptim_reasons = np.array(nonoptim_reasons, dtype=np.object)
maes = np.array(maes)
r2s = np.array(r2s)
mask_nreasons = np.logical_and(nreasons == 1, first_reason == NONOPTIM_LOW_PERF)
if np.sum(mask_nreasons) >= 1:
infill_tairs = infill_tairs[mask_nreasons]
nonoptim_reasons = nonoptim_reasons[mask_nreasons]
maes = maes[mask_nreasons]
r2s = r2s[mask_nreasons]
i = np.argmin(maes)
infill_tair = infill_tairs[i]
reasons = nonoptim_reasons[i]
mae = maes[i]
r2 = r2s[i]
print "".join(["ERROR|", self.stn_id, " had nonoptimal infill for ",
self.tair_var, " using ", self.vname_mean,
" as the mean even after retries. Reasons: ",
"|".join(reasons), ". MAE:%.2f, R2:%.2f" % (mae, r2)])
else:
print "".join(["SUCCESS INFILL RETRY|", self.stn_id, " fixed nonoptimal infill for ", self.tair_var,
" using ", self.vname_mean, " as the mean."])
fnl_tair = np.copy(obs_tair)
mask_infill = np.isnan(fnl_tair)
fnl_tair[mask_infill] = infill_tair[mask_infill]
return fnl_tair, mask_infill, infill_tair
def infill_daily_obs(stn_id, stn_da, tair_var, nnr_ds, vname_mean, vname_vari, tair_mask=None, day_masks=None, add_bestngh=True,
min_daily_nnghs=MIN_DAILY_NGHBRS, nnghs_nnr=NNGH_NNR, max_nnr_var=MAX_NNR_VAR, chk_perf=True,
npcs=0, frac_obs_initnpcs=0.5, ppca_varyexplain=0.99, ppcaConThres=1e-5, verbose=False):
if day_masks == None:
a_matrix = InfillMatrixPPCA(stn_id, stn_da, tair_var, nnr_ds, vname_mean, vname_vari,
tair_mask=tair_mask, day_mask=None, add_bestngh=add_bestngh)
fnl_tair, mask_infill, infill_tair = a_matrix.infill(min_daily_nnghs=min_daily_nnghs, nnghs_nnr=nnghs_nnr, max_nnr_var=max_nnr_var,
chk_perf=chk_perf, npcs=npcs, frac_obs_initnpcs=frac_obs_initnpcs,
ppca_varyexplain=ppca_varyexplain, ppcaConThres=ppcaConThres, verbose=verbose)
else:
n_masks = len(day_masks)
fnl_tair = np.empty(stn_da.days.size)
mask_infill = np.zeros(stn_da.days.size, dtype=np.bool)
infill_tair = np.empty(stn_da.days.size)
for x in np.arange(n_masks):
a_matrix = InfillMatrixPPCA(stn_id, stn_da, tair_var, nnr_ds, vname_mean[x], vname_vari[x],
tair_mask=tair_mask, day_mask=day_masks[x], add_bestngh=add_bestngh)
a_fnl_tair, a_mask_infill, a_infill_tair = a_matrix.infill(min_daily_nnghs=min_daily_nnghs, nnghs_nnr=nnghs_nnr, max_nnr_var=max_nnr_var,
chk_perf=chk_perf, npcs=npcs, frac_obs_initnpcs=frac_obs_initnpcs,
ppca_varyexplain=ppca_varyexplain, ppcaConThres=ppcaConThres, verbose=verbose)
fnl_tair[day_masks[x]] = a_fnl_tair
mask_infill[day_masks[x]] = a_mask_infill
infill_tair[day_masks[x]] = a_infill_tair
return fnl_tair, mask_infill, infill_tair
def _is_nonoptimal_infill(infill_tair, infill_matrix):
non_optimal = False
reasons = []
obs_tair = infill_matrix.pca_tair[:, 0]
chk_obs = obs_tair[infill_matrix.valid_pca_mask[:, 0]]
chk_fit = infill_tair[infill_matrix.valid_pca_mask[:, 0]]
mae = np.mean(np.abs(chk_fit - chk_obs))
r_value = stats.linregress(chk_obs, chk_fit)[2]
r2 = r_value ** 2 # r-squared value; variance explained
hasVarChgPt = r.hasVarChgPt(robjects.FloatVector(infill_tair))[0]
# check for low infill performance
if mae > 2.0 or r2 < 0.7:
non_optimal = True
reasons.append(NONOPTIM_LOW_PERF)
# Check for extreme values
if np.sum(infill_tair > 57.7) > 0 or np.sum(infill_tair < -89.4) > 0:
non_optimal = True
reasons.append(NONOPTIM_IMPOSS_VAL)
# Check for variance change point
if hasVarChgPt:
non_optimal = True
reasons.append(NONOPTIM_VARI_CHGPT)
return non_optimal, reasons, mae, r2
def _shrink_matrix(aMatrix, nghNorms, nghStd, minNghs):
'''
After top minNghs stations, if a neighboring station time series
does not add observations on days with < minNghs, remove
it from the matrix.
'''
validMask = np.isfinite(aMatrix[:, 1:minNghs + 1])
nObs = np.sum(validMask, axis=1)
maskBelowMin = nObs < minNghs
keepCol = np.ones(aMatrix.shape[1], dtype=np.bool)
for x in np.arange(minNghs + 1, aMatrix.shape[1]):
aCol = aMatrix[:, x]
aColValidMask = np.isfinite(aCol)
if np.sum(np.logical_and(maskBelowMin, aColValidMask)) > 0:
aColValidMask.shape = (aColValidMask.size, 1)
validMask = np.hstack((validMask, aColValidMask))
nObs = np.sum(validMask, axis=1)
maskBelowMin = nObs < minNghs
else:
keepCol[x] = False
return aMatrix[:, keepCol], nghNorms[keepCol], nghStd[keepCol]
def _calc_ioa(x, y):
'''
Calculate the index of agreement (Durre et al. 2010; Legates and McCabe 1999) between x and y
'''
y_mean = np.mean(y)
d = np.sum(np.abs(x - y_mean) + np.abs(y - y_mean))
if d == 0.0:
print "|".join(["WARNING: _calc_ioa: x, y identical"])
# The x and y series are exactly the same
# Return a perfect ioa
return 1.0
ioa = 1.0 - (np.sum(np.abs(y - x)) / d)
# if ioa == 0:
# print "|".join(["WARNING: _calc_ioa: ioa == 0"])
# #Means all ys are the same or only one observation.
# #This could possibly happen with prcp in arid regions
# #Add on an extra observation to the time series that has same difference as x[0] and y[0]
# x_new = np.concatenate([x, np.array([x[0] + (x[0] * .1)])])
# y_new = np.concatenate([y, np.array([y[0] + (x[0] * .1)])])
#
# y_mean = np.mean(y_new)
# ioa = 1.0 - (np.sum(np.abs(y_new - x_new)) / np.sum(np.abs(x_new - y_mean) + np.abs(y_new - y_mean)))
return ioa
def _load_R():
global R_LOADED
if not R_LOADED:
global robjects
global numpy2ri
global r
global ri
# https://github.com/ContinuumIO/anaconda-issues/issues/152
import readline
import rpy2
import rpy2.robjects
robjects = rpy2.robjects
r = robjects.r
import rpy2.rinterface
ri = rpy2.rinterface
from rpy2.robjects import numpy2ri
numpy2ri.activate()
path_root = os.path.dirname(__file__)
fpath_rscript = os.path.join(path_root, 'rpy', 'pca_infill.R')
r.source(fpath_rscript)
R_LOADED = True
|
jaredwo/topowx
|
twx/infill/infill_daily.py
|
Python
|
gpl-3.0
| 27,924
|
[
"Bioconductor"
] |
0ae3ac503b469199242919b86cc825faaa61e2b124f006c2199895d1f056561b
|
import os
import tempfile
from ase.io import write
import ase.parallel as parallel
def view(atoms, data=None, viewer='ase-gui', repeat=None, block=False):
# Ignore for parallel calculations:
if parallel.size != 1:
return
vwr = viewer.lower()
if vwr == 'ase-gui':
format = 'traj'
if repeat is None:
command = 'ase-gui'
else:
command = 'ase-gui --repeat=%d,%d,%d' % tuple(repeat)
repeat = None
elif vwr == 'vmd':
format = 'cube'
command = 'vmd'
elif vwr == 'rasmol':
format = 'pdb'
command = 'rasmol -pdb'
elif vwr == 'xmakemol':
format = 'xyz'
command = 'xmakemol -f'
elif vwr == 'gopenmol':
format = 'xyz'
command = 'rungOpenMol'
elif vwr == 'avogadro':
format = 'cube'
command = 'avogadro'
elif vwr == 'sage':
from ase.visualize.sage import view_sage_jmol
view_sage_jmol(atoms)
return
else:
raise RuntimeError('Unknown viewer: ' + viewer)
fd, filename = tempfile.mkstemp('.' + format, 'ase-')
if repeat is not None:
atoms = atoms.repeat()
if data is None:
write(filename, atoms, format=format)
else:
write(filename, atoms, format=format, data=data)
if block:
os.system('%s %s' % (command, filename))
os.remove(filename)
else:
if os.name in ['ce', 'nt']: # Win
# XXX: how to make it non-blocking?
os.system('%s %s' % (command, filename))
os.remove(filename)
else:
os.system('%s %s & ' % (command, filename))
os.system('(sleep 60; rm %s) &' % filename)
|
suttond/MODOI
|
ase/visualize/__init__.py
|
Python
|
lgpl-3.0
| 1,727
|
[
"ASE",
"Avogadro",
"RasMol",
"VMD"
] |
c7802f48f43cf026b1b8a45ed738529ad0577ee2a945b72eb783e365571680e1
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2011 Edgewall Software
# Copyright (C) 2003-2007 Jonas Borgström <jonas@edgewall.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
"""Trac Environment model and related APIs."""
from __future__ import with_statement
import os.path
import pkg_resources
import setuptools
import sys
from urlparse import urlsplit
from trac import db_default
from trac.admin import AdminCommandError, IAdminCommandProvider
from trac.cache import CacheManager
from trac.config import *
from trac.core import Component, ComponentManager, implements, Interface, \
ExtensionPoint, TracError
from trac.db.api import (DatabaseManager, QueryContextManager,
TransactionContextManager, with_transaction)
from trac.util import copytree, create_file, get_pkginfo, lazy, makedirs, \
read_file
from trac.util.compat import sha1
from trac.util.concurrency import threading
from trac.util.text import exception_to_unicode, path_to_unicode, printerr, \
printout
from trac.util.translation import _, N_
from trac.versioncontrol import RepositoryManager
from trac.web.href import Href
__all__ = ['Environment', 'IEnvironmentSetupParticipant', 'open_environment']
# Content of the VERSION file in the environment
_VERSION = 'Trac Environment Version 1'
class ISystemInfoProvider(Interface):
"""Provider of system information, displayed in the "About Trac"
page and in internal error reports.
"""
def get_system_info():
"""Yield a sequence of `(name, version)` tuples describing the
name and version information of external packages used by a
component.
"""
class IEnvironmentSetupParticipant(Interface):
"""Extension point interface for components that need to participate in
the creation and upgrading of Trac environments, for example to create
additional database tables.
Please note that `IEnvironmentSetupParticipant` instances are called in
arbitrary order. If your upgrades must be ordered consistently, please
implement the ordering in a single `IEnvironmentSetupParticipant`. See
the database upgrade infrastructure in Trac core for an example.
"""
def environment_created():
"""Called when a new Trac environment is created."""
def environment_needs_upgrade(db):
"""Called when Trac checks whether the environment needs to be
upgraded.
Should return `True` if this participant needs an upgrade to
be performed, `False` otherwise.
"""
def upgrade_environment(db):
"""Actually perform an environment upgrade.
Implementations of this method don't need to commit any
database transactions. This is done implicitly for each
participant if the upgrade succeeds without an error being
raised.
However, if the `upgrade_environment` consists of small,
restartable, steps of upgrade, it can decide to commit on its
own after each successful step.
"""
class BackupError(RuntimeError):
"""Exception raised during an upgrade when the DB backup fails."""
class Environment(Component, ComponentManager):
"""Trac environment manager.
Trac stores project information in a Trac environment. It consists
of a directory structure containing among other things:
* a configuration file,
* project-specific templates and plugins,
* the wiki and ticket attachments files,
* the SQLite database file (stores tickets, wiki pages...)
in case the database backend is sqlite
"""
implements(ISystemInfoProvider)
required = True
system_info_providers = ExtensionPoint(ISystemInfoProvider)
setup_participants = ExtensionPoint(IEnvironmentSetupParticipant)
components_section = ConfigSection('components',
"""This section is used to enable or disable components
provided by plugins, as well as by Trac itself. The component
to enable/disable is specified via the name of the
option. Whether its enabled is determined by the option value;
setting the value to `enabled` or `on` will enable the
component, any other value (typically `disabled` or `off`)
will disable the component.
The option name is either the fully qualified name of the
components or the module/package prefix of the component. The
former enables/disables a specific component, while the latter
enables/disables any component in the specified
package/module.
Consider the following configuration snippet:
{{{
[components]
trac.ticket.report.ReportModule = disabled
webadmin.* = enabled
}}}
The first option tells Trac to disable the
[wiki:TracReports report module].
The second option instructs Trac to enable all components in
the `webadmin` package. Note that the trailing wildcard is
required for module/package matching.
To view the list of active components, go to the ''Plugins''
page on ''About Trac'' (requires `CONFIG_VIEW`
[wiki:TracPermissions permissions]).
See also: TracPlugins
""")
shared_plugins_dir = PathOption('inherit', 'plugins_dir', '',
"""Path to the //shared plugins directory//.
Plugins in that directory are loaded in addition to those in
the directory of the environment `plugins`, with this one
taking precedence.
(''since 0.11'')""")
base_url = Option('trac', 'base_url', '',
"""Reference URL for the Trac deployment.
This is the base URL that will be used when producing
documents that will be used outside of the web browsing
context, like for example when inserting URLs pointing to Trac
resources in notification e-mails.""")
base_url_for_redirect = BoolOption('trac', 'use_base_url_for_redirect',
False,
"""Optionally use `[trac] base_url` for redirects.
In some configurations, usually involving running Trac behind
a HTTP proxy, Trac can't automatically reconstruct the URL
that is used to access it. You may need to use this option to
force Trac to use the `base_url` setting also for
redirects. This introduces the obvious limitation that this
environment will only be usable when accessible from that URL,
as redirects are frequently used. ''(since 0.10.5)''""")
secure_cookies = BoolOption('trac', 'secure_cookies', False,
"""Restrict cookies to HTTPS connections.
When true, set the `secure` flag on all cookies so that they
are only sent to the server on HTTPS connections. Use this if
your Trac instance is only accessible through HTTPS. (''since
0.11.2'')""")
project_name = Option('project', 'name', 'My Project',
"""Name of the project.""")
project_description = Option('project', 'descr', 'My example project',
"""Short description of the project.""")
project_url = Option('project', 'url', '',
"""URL of the main project web site, usually the website in
which the `base_url` resides. This is used in notification
e-mails.""")
project_admin = Option('project', 'admin', '',
"""E-Mail address of the project's administrator.""")
project_admin_trac_url = Option('project', 'admin_trac_url', '.',
"""Base URL of a Trac instance where errors in this Trac
should be reported.
This can be an absolute or relative URL, or '.' to reference
this Trac instance. An empty value will disable the reporting
buttons. (''since 0.11.3'')""")
project_footer = Option('project', 'footer',
N_('Visit the Trac open source project at<br />'
'<a href="http://trac.edgewall.org/">'
'http://trac.edgewall.org/</a>'),
"""Page footer text (right-aligned).""")
project_icon = Option('project', 'icon', 'common/trac.ico',
"""URL of the icon of the project.""")
log_type = Option('logging', 'log_type', 'none',
"""Logging facility to use.
Should be one of (`none`, `file`, `stderr`, `syslog`, `winlog`).""")
log_file = Option('logging', 'log_file', 'trac.log',
"""If `log_type` is `file`, this should be a path to the
log-file. Relative paths are resolved relative to the `log`
directory of the environment.""")
log_level = Option('logging', 'log_level', 'DEBUG',
"""Level of verbosity in log.
Should be one of (`CRITICAL`, `ERROR`, `WARN`, `INFO`, `DEBUG`).""")
log_format = Option('logging', 'log_format', None,
"""Custom logging format.
If nothing is set, the following will be used:
Trac[$(module)s] $(levelname)s: $(message)s
In addition to regular key names supported by the Python
logger library (see
http://docs.python.org/library/logging.html), one could use:
- $(path)s the path for the current environment
- $(basename)s the last path component of the current environment
- $(project)s the project name
Note the usage of `$(...)s` instead of `%(...)s` as the latter form
would be interpreted by the ConfigParser itself.
Example:
`($(thread)d) Trac[$(basename)s:$(module)s] $(levelname)s: $(message)s`
''(since 0.10.5)''""")
def __init__(self, path, create=False, options=[]):
"""Initialize the Trac environment.
:param path: the absolute path to the Trac environment
:param create: if `True`, the environment is created and
populated with default data; otherwise, the
environment is expected to already exist.
:param options: A list of `(section, name, value)` tuples that
define configuration options
"""
ComponentManager.__init__(self)
self.path = path
self.systeminfo = []
self._href = self._abs_href = None
if create:
self.create(options)
else:
self.verify()
self.setup_config()
if create:
for setup_participant in self.setup_participants:
setup_participant.environment_created()
def get_systeminfo(self):
"""Return a list of `(name, version)` tuples describing the
name and version information of external packages used by Trac
and plugins.
"""
info = self.systeminfo[:]
for provider in self.system_info_providers:
info.extend(provider.get_system_info() or [])
info.sort(key=lambda (name, version): (name != 'Trac', name.lower()))
return info
# ISystemInfoProvider methods
def get_system_info(self):
from trac import core, __version__ as VERSION
yield 'Trac', pkg_resources.resource_string('trac', 'TRAC_VERSION')
yield 'Bloodhound Trac', get_pkginfo(core).get('version', VERSION)
yield 'Python', sys.version
yield 'setuptools', setuptools.__version__
from trac.util.datefmt import pytz
if pytz is not None:
yield 'pytz', pytz.__version__
def component_activated(self, component):
"""Initialize additional member variables for components.
Every component activated through the `Environment` object
gets three member variables: `env` (the environment object),
`config` (the environment configuration) and `log` (a logger
object)."""
component.env = self
component.config = self.config
component.log = self.log
def _component_name(self, name_or_class):
name = name_or_class
if not isinstance(name_or_class, basestring):
name = name_or_class.__module__ + '.' + name_or_class.__name__
return name.lower()
@property
def _component_rules(self):
try:
return self._rules
except AttributeError:
self._rules = {}
for name, value in self.components_section.options():
if name.endswith('.*'):
name = name[:-2]
self._rules[name.lower()] = value.lower() in ('enabled', 'on')
return self._rules
def is_component_enabled(self, cls):
"""Implemented to only allow activation of components that are
not disabled in the configuration.
This is called by the `ComponentManager` base class when a
component is about to be activated. If this method returns
`False`, the component does not get activated. If it returns
`None`, the component only gets activated if it is located in
the `plugins` directory of the environment.
"""
component_name = self._component_name(cls)
# Disable the pre-0.11 WebAdmin plugin
# Please note that there's no recommendation to uninstall the
# plugin because doing so would obviously break the backwards
# compatibility that the new integration administration
# interface tries to provide for old WebAdmin extensions
if component_name.startswith('webadmin.'):
self.log.info("The legacy TracWebAdmin plugin has been "
"automatically disabled, and the integrated "
"administration interface will be used "
"instead.")
return False
rules = self._component_rules
cname = component_name
while cname:
enabled = rules.get(cname)
if enabled is not None:
return enabled
idx = cname.rfind('.')
if idx < 0:
break
cname = cname[:idx]
# By default, all components in the trac package are enabled
return component_name.startswith('trac.') or None
def enable_component(self, cls):
"""Enable a component or module."""
self._component_rules[self._component_name(cls)] = True
def verify(self):
"""Verify that the provided path points to a valid Trac environment
directory."""
try:
tag = read_file(os.path.join(self.path, 'VERSION')).splitlines()[0]
if tag != _VERSION:
raise Exception("Unknown Trac environment type '%s'" % tag)
except Exception, e:
raise TracError("No Trac environment found at %s\n%s"
% (self.path, e))
def get_db_cnx(self):
"""Return a database connection from the connection pool
:deprecated: Use :meth:`db_transaction` or :meth:`db_query` instead
`db_transaction` for obtaining the `db` database connection
which can be used for performing any query
(SELECT/INSERT/UPDATE/DELETE)::
with env.db_transaction as db:
...
Note that within the block, you don't need to (and shouldn't)
call ``commit()`` yourself, the context manager will take care
of it (if it's the outermost such context manager on the
stack).
`db_query` for obtaining a `db` database connection which can
be used for performing SELECT queries only::
with env.db_query as db:
...
"""
return DatabaseManager(self).get_connection()
@lazy
def db_exc(self):
"""Return an object (typically a module) containing all the
backend-specific exception types as attributes, named
according to the Python Database API
(http://www.python.org/dev/peps/pep-0249/).
To catch a database exception, use the following pattern::
try:
with env.db_transaction as db:
...
except env.db_exc.IntegrityError, e:
...
"""
return DatabaseManager(self).get_exceptions()
def with_transaction(self, db=None):
"""Decorator for transaction functions :deprecated:"""
return with_transaction(self, db)
def get_read_db(self):
"""Return a database connection for read purposes :deprecated:
See `trac.db.api.get_read_db` for detailed documentation."""
return DatabaseManager(self).get_connection(readonly=True)
@property
def db_query(self):
"""Return a context manager
(`~trac.db.api.QueryContextManager`) which can be used to
obtain a read-only database connection.
Example::
with env.db_query as db:
cursor = db.cursor()
cursor.execute("SELECT ...")
for row in cursor.fetchall():
...
Note that a connection retrieved this way can be "called"
directly in order to execute a query::
with env.db_query as db:
for row in db("SELECT ..."):
...
:warning: after a `with env.db_query as db` block, though the
`db` variable is still defined, you shouldn't use it as it
might have been closed when exiting the context, if this
context was the outermost context (`db_query` or
`db_transaction`).
If you don't need to manipulate the connection itself, this
can even be simplified to::
for row in env.db_query("SELECT ..."):
...
"""
return QueryContextManager(self)
@property
def db_transaction(self):
"""Return a context manager
(`~trac.db.api.TransactionContextManager`) which can be used
to obtain a writable database connection.
Example::
with env.db_transaction as db:
cursor = db.cursor()
cursor.execute("UPDATE ...")
Upon successful exit of the context, the context manager will
commit the transaction. In case of nested contexts, only the
outermost context performs a commit. However, should an
exception happen, any context manager will perform a rollback.
You should *not* call `commit()` yourself within such block,
as this will force a commit even if that transaction is part
of a larger transaction.
Like for its read-only counterpart, you can directly execute a
DML query on the `db`::
with env.db_transaction as db:
db("UPDATE ...")
:warning: after a `with env.db_transaction` as db` block,
though the `db` variable is still available, you shouldn't
use it as it might have been closed when exiting the
context, if this context was the outermost context
(`db_query` or `db_transaction`).
If you don't need to manipulate the connection itself, this
can also be simplified to::
env.db_transaction("UPDATE ...")
"""
return TransactionContextManager(self)
def shutdown(self, tid=None):
"""Close the environment."""
RepositoryManager(self).shutdown(tid)
DatabaseManager(self).shutdown(tid)
if tid is None:
self.log.removeHandler(self._log_handler)
self._log_handler.flush()
self._log_handler.close()
del self._log_handler
def get_repository(self, reponame=None, authname=None):
"""Return the version control repository with the given name,
or the default repository if `None`.
The standard way of retrieving repositories is to use the
methods of `RepositoryManager`. This method is retained here
for backward compatibility.
:param reponame: the name of the repository
:param authname: the user name for authorization (not used
anymore, left here for compatibility with
0.11)
"""
return RepositoryManager(self).get_repository(reponame)
def create(self, options=[]):
"""Create the basic directory structure of the environment,
initialize the database and populate the configuration file
with default values.
If options contains ('inherit', 'file'), default values will
not be loaded; they are expected to be provided by that file
or other options.
"""
# Create the directory structure
if not os.path.exists(self.path):
os.mkdir(self.path)
os.mkdir(self.get_log_dir())
os.mkdir(self.get_htdocs_dir())
os.mkdir(os.path.join(self.path, 'plugins'))
# Create a few files
create_file(os.path.join(self.path, 'VERSION'), _VERSION + '\n')
create_file(os.path.join(self.path, 'README'),
'This directory contains a Trac environment.\n'
'Visit http://trac.edgewall.org/ for more information.\n')
# Setup the default configuration
os.mkdir(os.path.join(self.path, 'conf'))
create_file(os.path.join(self.path, 'conf', 'trac.ini.sample'))
config = Configuration(os.path.join(self.path, 'conf', 'trac.ini'))
for section, name, value in options:
config.set(section, name, value)
config.save()
self.setup_config()
if not any((section, option) == ('inherit', 'file')
for section, option, value in options):
self.config.set_defaults(self)
self.config.save()
# Create the database
DatabaseManager(self).init_db()
def get_version(self, db=None, initial=False):
"""Return the current version of the database. If the
optional argument `initial` is set to `True`, the version of
the database used at the time of creation will be returned.
In practice, for database created before 0.11, this will
return `False` which is "older" than any db version number.
:since: 0.11
:since 1.0: deprecation warning: the `db` parameter is no
longer used and will be removed in version 1.1.1
"""
rows = self.db_query("""
SELECT value FROM system WHERE name='%sdatabase_version'
""" % ('initial_' if initial else ''))
return rows and int(rows[0][0])
def setup_config(self):
"""Load the configuration file."""
self.config = Configuration(os.path.join(self.path, 'conf', 'trac.ini'),
{'envname': os.path.basename(self.path)})
self.setup_log()
from trac.loader import load_components
plugins_dir = self.shared_plugins_dir
load_components(self, plugins_dir and (plugins_dir,))
def get_templates_dir(self):
"""Return absolute path to the templates directory."""
return os.path.join(self.path, 'templates')
def get_htdocs_dir(self):
"""Return absolute path to the htdocs directory."""
return os.path.join(self.path, 'htdocs')
def get_log_dir(self):
"""Return absolute path to the log directory."""
return os.path.join(self.path, 'log')
def setup_log(self):
"""Initialize the logging sub-system."""
from trac.log import logger_handler_factory
logtype = self.log_type
logfile = self.log_file
if logtype == 'file' and not os.path.isabs(logfile):
logfile = os.path.join(self.get_log_dir(), logfile)
format = self.log_format
logid = 'Trac.%s' % sha1(self.path).hexdigest()
if format:
format = format.replace('$(', '%(') \
.replace('%(path)s', self.path) \
.replace('%(basename)s', os.path.basename(self.path)) \
.replace('%(project)s', self.project_name)
self.log, self._log_handler = logger_handler_factory(
logtype, logfile, self.log_level, logid, format=format)
from trac import core, __version__ as VERSION
self.log.info('-' * 32 + ' environment startup [Trac %s] ' + '-' * 32,
get_pkginfo(core).get('version', VERSION))
def get_known_users(self, cnx=None):
"""Generator that yields information about all known users,
i.e. users that have logged in to this Trac environment and
possibly set their name and email.
This function generates one tuple for every user, of the form
(username, name, email) ordered alpha-numerically by username.
:param cnx: the database connection; if ommitted, a new
connection is retrieved
:since 1.0: deprecation warning: the `cnx` parameter is no
longer used and will be removed in version 1.1.1
"""
for username, name, email in self.db_query("""
SELECT DISTINCT s.sid, n.value, e.value
FROM session AS s
LEFT JOIN session_attribute AS n ON (n.sid=s.sid
and n.authenticated=1 AND n.name = 'name')
LEFT JOIN session_attribute AS e ON (e.sid=s.sid
AND e.authenticated=1 AND e.name = 'email')
WHERE s.authenticated=1 ORDER BY s.sid
"""):
yield username, name, email
def backup(self, dest=None):
"""Create a backup of the database.
:param dest: Destination file; if not specified, the backup is
stored in a file called db_name.trac_version.bak
"""
return DatabaseManager(self).backup(dest)
def needs_upgrade(self):
"""Return whether the environment needs to be upgraded."""
for participant in self.setup_participants:
with self.db_query as db:
if participant.environment_needs_upgrade(db):
self.log.warn("Component %s requires environment upgrade",
participant)
return True
return False
def upgrade(self, backup=False, backup_dest=None):
"""Upgrade database.
:param backup: whether or not to backup before upgrading
:param backup_dest: name of the backup file
:return: whether the upgrade was performed
"""
upgraders = []
for participant in self.setup_participants:
with self.db_query as db:
if participant.environment_needs_upgrade(db):
upgraders.append(participant)
if not upgraders:
return
if backup:
try:
self.backup(backup_dest)
except Exception, e:
raise BackupError(e)
for participant in upgraders:
self.log.info("%s.%s upgrading...", participant.__module__,
participant.__class__.__name__)
with self.db_transaction as db:
participant.upgrade_environment(db)
# Database schema may have changed, so close all connections
DatabaseManager(self).shutdown()
return True
@property
def href(self):
"""The application root path"""
if not self._href:
self._href = Href(urlsplit(self.abs_href.base)[2])
return self._href
@property
def abs_href(self):
"""The application URL"""
if not self._abs_href:
if not self.base_url:
self.log.warn("base_url option not set in configuration, "
"generated links may be incorrect")
self._abs_href = Href('')
else:
self._abs_href = Href(self.base_url)
return self._abs_href
class EnvironmentSetup(Component):
"""Manage automatic environment upgrades."""
required = True
implements(IEnvironmentSetupParticipant)
# IEnvironmentSetupParticipant methods
def environment_created(self):
"""Insert default data into the database."""
with self.env.db_transaction as db:
for table, cols, vals in db_default.get_data(db):
db.executemany("INSERT INTO %s (%s) VALUES (%s)"
% (table, ','.join(cols), ','.join(['%s' for c in cols])),
vals)
self._update_sample_config()
def environment_needs_upgrade(self, db):
dbver = self.env.get_version(db)
if dbver == db_default.db_version:
return False
elif dbver > db_default.db_version:
raise TracError(_('Database newer than Trac version'))
self.log.info("Trac database schema version is %d, should be %d",
dbver, db_default.db_version)
return True
def upgrade_environment(self, db):
"""Each db version should have its own upgrade module, named
upgrades/dbN.py, where 'N' is the version number (int).
"""
cursor = db.cursor()
dbver = self.env.get_version()
for i in range(dbver + 1, db_default.db_version + 1):
name = 'db%i' % i
try:
upgrades = __import__('upgrades', globals(), locals(), [name])
script = getattr(upgrades, name)
except AttributeError:
raise TracError(_("No upgrade module for version %(num)i "
"(%(version)s.py)", num=i, version=name))
script.do_upgrade(self.env, i, cursor)
cursor.execute("""
UPDATE system SET value=%s WHERE name='database_version'
""", (i,))
self.log.info("Upgraded database version from %d to %d", i - 1, i)
db.commit()
self._update_sample_config()
# Internal methods
def _update_sample_config(self):
filename = os.path.join(self.env.path, 'conf', 'trac.ini.sample')
if not os.path.isfile(filename):
return
config = Configuration(filename)
for section, default_options in config.defaults().iteritems():
for name, value in default_options.iteritems():
config.set(section, name, value)
try:
config.save()
self.log.info("Wrote sample configuration file with the new "
"settings and their default values: %s",
filename)
except IOError, e:
self.log.warn("Couldn't write sample configuration file (%s)", e,
exc_info=True)
env_cache = {}
env_cache_lock = threading.Lock()
def open_environment(env_path=None, use_cache=False):
"""Open an existing environment object, and verify that the database is up
to date.
:param env_path: absolute path to the environment directory; if
ommitted, the value of the `TRAC_ENV` environment
variable is used
:param use_cache: whether the environment should be cached for
subsequent invocations of this function
:return: the `Environment` object
"""
if not env_path:
env_path = os.getenv('TRAC_ENV')
if not env_path:
raise TracError(_('Missing environment variable "TRAC_ENV". '
'Trac requires this variable to point to a valid '
'Trac environment.'))
env_path = os.path.normcase(os.path.normpath(env_path))
if use_cache:
with env_cache_lock:
env = env_cache.get(env_path)
if env and env.config.parse_if_needed():
# The environment configuration has changed, so shut it down
# and remove it from the cache so that it gets reinitialized
env.log.info('Reloading environment due to configuration '
'change')
env.shutdown()
del env_cache[env_path]
env = None
if env is None:
env = env_cache.setdefault(env_path, open_environment(env_path))
else:
CacheManager(env).reset_metadata()
else:
env = Environment(env_path)
needs_upgrade = False
try:
needs_upgrade = env.needs_upgrade()
except Exception, e: # e.g. no database connection
env.log.error("Exception caught while checking for upgrade: %s",
exception_to_unicode(e, traceback=True))
if needs_upgrade:
raise TracError(_('The Trac Environment needs to be upgraded.\n\n'
'Run "trac-admin %(path)s upgrade"',
path=env_path))
return env
class EnvironmentAdmin(Component):
"""trac-admin command provider for environment administration."""
implements(IAdminCommandProvider)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('deploy', '<directory>',
'Extract static resources from Trac and all plugins',
None, self._do_deploy)
yield ('hotcopy', '<backupdir> [--no-database]',
"""Make a hot backup copy of an environment
The database is backed up to the 'db' directory of the
destination, unless the --no-database option is
specified.
""",
None, self._do_hotcopy)
yield ('upgrade', '',
'Upgrade database to current version',
None, self._do_upgrade)
def _do_deploy(self, dest):
target = os.path.normpath(dest)
chrome_target = os.path.join(target, 'htdocs')
script_target = os.path.join(target, 'cgi-bin')
# Copy static content
makedirs(target, overwrite=True)
makedirs(chrome_target, overwrite=True)
from trac.web.chrome import Chrome
printout(_("Copying resources from:"))
for provider in Chrome(self.env).template_providers:
paths = list(provider.get_htdocs_dirs() or [])
if not len(paths):
continue
printout(' %s.%s' % (provider.__module__,
provider.__class__.__name__))
for key, root in paths:
if not root:
continue
source = os.path.normpath(root)
printout(' ', source)
if os.path.exists(source):
dest = os.path.join(chrome_target, key)
copytree(source, dest, overwrite=True)
# Create and copy scripts
makedirs(script_target, overwrite=True)
printout(_("Creating scripts."))
data = {'env': self.env, 'executable': sys.executable}
for script in ('cgi', 'fcgi', 'wsgi'):
dest = os.path.join(script_target, 'trac.' + script)
template = Chrome(self.env).load_template('deploy_trac.' + script,
'text')
stream = template.generate(**data)
with open(dest, 'w') as out:
stream.render('text', out=out, encoding='utf-8')
def _do_hotcopy(self, dest, no_db=None):
if no_db not in (None, '--no-database'):
raise AdminCommandError(_("Invalid argument '%(arg)s'", arg=no_db),
show_usage=True)
if os.path.exists(dest):
raise TracError(_("hotcopy can't overwrite existing '%(dest)s'",
dest=path_to_unicode(dest)))
import shutil
# Bogus statement to lock the database while copying files
with self.env.db_transaction as db:
db("UPDATE system SET name=NULL WHERE name IS NULL")
printout(_("Hotcopying %(src)s to %(dst)s ...",
src=path_to_unicode(self.env.path),
dst=path_to_unicode(dest)))
db_str = self.env.config.get('trac', 'database')
prefix, db_path = db_str.split(':', 1)
skip = []
if prefix == 'sqlite':
db_path = os.path.join(self.env.path, os.path.normpath(db_path))
# don't copy the journal (also, this would fail on Windows)
skip = [db_path + '-journal', db_path + '-stmtjrnl']
if no_db:
skip.append(db_path)
try:
copytree(self.env.path, dest, symlinks=1, skip=skip)
retval = 0
except shutil.Error, e:
retval = 1
printerr(_("The following errors happened while copying "
"the environment:"))
for (src, dst, err) in e.args[0]:
if src in err:
printerr(' %s' % err)
else:
printerr(" %s: '%s'" % (err, path_to_unicode(src)))
# db backup for non-sqlite
if prefix != 'sqlite' and not no_db:
printout(_("Backing up database ..."))
sql_backup = os.path.join(dest, 'db',
'%s-db-backup.sql' % prefix)
self.env.backup(sql_backup)
printout(_("Hotcopy done."))
return retval
def _do_upgrade(self, no_backup=None):
if no_backup not in (None, '-b', '--no-backup'):
raise AdminCommandError(_("Invalid arguments"), show_usage=True)
if not self.env.needs_upgrade():
printout(_("Database is up to date, no upgrade necessary."))
return
try:
self.env.upgrade(backup=no_backup is None)
except BackupError, e:
printerr(_("The pre-upgrade backup failed.\nUse '--no-backup' to "
"upgrade without doing a backup.\n"))
raise e.args[0]
except Exception, e:
printerr(_("The upgrade failed. Please fix the issue and try "
"again.\n"))
raise
# Remove wiki-macros if it is empty and warn if it isn't
wiki_macros = os.path.join(self.env.path, 'wiki-macros')
try:
entries = os.listdir(wiki_macros)
except OSError:
pass
else:
if entries:
printerr(_("Warning: the wiki-macros directory in the "
"environment is non-empty, but Trac\n"
"doesn't load plugins from there anymore. "
"Please remove it by hand."))
else:
try:
os.rmdir(wiki_macros)
except OSError, e:
printerr(_("Error while removing wiki-macros: %(err)s\n"
"Trac doesn't load plugins from wiki-macros "
"anymore. Please remove it by hand.",
err=exception_to_unicode(e)))
printout(_("Upgrade done.\n\n"
"You may want to upgrade the Trac documentation now by "
"running:\n\n trac-admin %(path)s wiki upgrade",
path=path_to_unicode(self.env.path)))
|
apache/bloodhound
|
trac/trac/env.py
|
Python
|
apache-2.0
| 39,717
|
[
"VisIt"
] |
08414cc08c173dec4391bff800c6ffe9337306a9db26b19c12fcb36a3b53dce7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.