text
stringlengths
12
1.05M
repo_name
stringlengths
5
86
path
stringlengths
4
191
language
stringclasses
1 value
license
stringclasses
15 values
size
int32
12
1.05M
keyword
listlengths
1
23
text_hash
stringlengths
64
64
#!/usr/bin/env python import boto import imp import json import os import subprocess import sys import webbrowser from distutils.spawn import find_executable from fabric.api import local, prompt, require, settings, task from fabric.state import env from glob import glob from oauth import get_document, get_credentials from time import sleep import app_config import assets import ftp as flat import render import utils from render_utils import load_graphic_config from etc.gdocs import GoogleDoc SPREADSHEET_COPY_URL_TEMPLATE = 'https://www.googleapis.com/drive/v2/files/%s/copy' SPREADSHEET_VIEW_TEMPLATE = 'https://docs.google.com/spreadsheet/ccc?key=%s#gid=1' """ Base configuration """ env.settings = None """ Environments Changing environment requires a full-stack test. An environment points to both a server and an S3 bucket. """ @task def production(): """ Run as though on production. """ env.settings = 'production' app_config.configure_targets(env.settings) @task def staging(): """ Run as though on staging. """ env.settings = 'staging' app_config.configure_targets(env.settings) """ Running the app """ @task def app(port='8000'): """ Serve app.py. """ local('gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload app:wsgi_app' % port) """ Deployment Changes to deployment requires a full-stack test. Deployment has two primary functions: Pushing flat files to S3 and deploying code to a remote server if required. """ @task def deploy_to_production(slug): require('settings', provided_by=[production, staging]) graphic_root = '%s/%s' % (app_config.GRAPHICS_PATH, slug) graphic_assets = '%s/assets' % graphic_root graphic_config = load_graphic_config(graphic_root) default_max_age = getattr(graphic_config, 'DEFAULT_MAX_AGE', None) or app_config.DEFAULT_MAX_AGE flat.deploy_folder( graphic_root, slug, headers={ 'Cache-Control': 'max-age=%i' % default_max_age }, ignore=['%s/*' % graphic_assets] ) write_meta_json(slug, 'deploy') @task def update_from_content(slug): require('settings', provided_by=[production, staging]) if not slug: print 'You must specify a project slug, like this: "update_from_content:slug"' return update_copy(slug) render.render(slug) write_meta_json(slug, 'content') @task def update_from_template(slug, template): require('settings', provided_by=[production, staging]) if not slug: print 'You must specify a project slug and template, like this: "update_from_template:slug,template=template"' return recopy_templates(slug, template) render.render(slug) @task def debug_deploy(slug, template): require('settings', provided_by=[production, staging]) if not slug: print 'You must specify a project slug and template, like this: "debug_deploy:slug,template=template"' return recopy_templates(slug, template) # update_copy(slug) # write_meta_json(slug, 'content') render.render(slug) def write_meta_json(slug, action, template=''): meta_path = '%s/%s/meta.json' % (app_config.GRAPHICS_PATH, slug) import json try: with open(meta_path) as f: json_data = json.load(f) except: # catch *all* exceptions default_json_str = '{"production": {"date": ""}, "staging": {"content": {"date": ""}, "template": {"date": "", "type": ""}}}' json_data = json.loads(default_json_str) import time date_string = int(time.time()) if "content" == action: json_data["staging"]["content"]["date"] = date_string elif "template" == action: json_data["staging"]["template"]["date"] = date_string json_data["staging"]["template"]["type"] = template elif "deploy" == action: json_data["production"]["date"] = date_string with open(meta_path, 'w') as f: json.dump(json_data, f) def recopy_templates(slug, template): graphic_path = '%s/%s' % (app_config.GRAPHICS_PATH, slug) print 'Recopying templates...' local('mv %s/graphic_config.py %s/graphic_config.py.BACKUP' % (graphic_path, graphic_path)) local('cp -r graphic_templates/_base/* %s' % (graphic_path)) local('cp -r graphic_templates/%s/* %s' % (template, graphic_path)) local('mv %s/graphic_config.py.BACKUP %s/graphic_config.py' % (graphic_path, graphic_path)) write_meta_json(slug, 'template', template) def download_copy(slug): """ Downloads a Google Doc as an .xlsx file. """ graphic_path = '%s/%s' % (app_config.GRAPHICS_PATH, slug) try: graphic_config = load_graphic_config(graphic_path) except IOError: print '%s/graphic_config.py does not exist.' % slug return if not hasattr(graphic_config, 'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY: print 'COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.' % slug return copy_path = os.path.join(graphic_path, '%s.xlsx' % slug) get_document(graphic_config.COPY_GOOGLE_DOC_KEY, copy_path) @task def update_copy(slug=None): """ Fetches the latest Google Doc and updates local JSON. """ print '\nUpdating content...' if slug: download_copy(slug) return slugs = os.listdir(app_config.GRAPHICS_PATH) for slug in slugs: graphic_path = '%s/%s' % (app_config.GRAPHICS_PATH, slug) if not os.path.exists('%s/graphic_config.py' % graphic_path): continue print slug download_copy(slug) """ App-specific commands """ def _add_graphic(slug, template, debug=False): """ Create a graphic with `slug` from `template` """ graphic_path = '%s/%s' % (app_config.GRAPHICS_PATH, slug) if _check_slug(slug): return if not debug: _check_credentials() print '\nCopying templates...' local('cp -r graphic_templates/_base %s' % (graphic_path)) local('cp -r graphic_templates/%s/* %s' % (template, graphic_path)) write_meta_json(slug, 'template', template) if debug: local('cp debug.xlsx %s/%s.xlsx' % (graphic_path, slug)) config_path = os.path.join(graphic_path, 'graphic_config.py') if not debug and os.path.isfile(config_path): print '\nCreating spreadsheet...' success = copy_spreadsheet(slug) if success: download_copy(slug) else: local('rm -r graphic_path') print 'Failed to copy spreadsheet! Try again!' return else: print 'No graphic_config.py found, not creating spreadsheet' # print 'Run `fab app` and visit http://127.0.0.1:8000/graphics/%s to view' % slug def _check_slug(slug): """ Does slug exist in graphics folder or production s3 bucket? """ graphic_path = '%s/%s' % (app_config.GRAPHICS_PATH, slug) if os.path.isdir(graphic_path): print 'Error: Directory already exists' return True #try: # s3 = boto.connect_s3() # bucket = s3.get_bucket(app_config.PRODUCTION_S3_BUCKET['bucket_name']) # key = bucket.get_key('%s/graphics/%s/child.html' % (app_config.PROJECT_SLUG, slug)) # # if key: # print 'Error: Slug exists on apps.npr.org' # return True #except boto.exception.NoAuthHandlerFound: # print 'Could not authenticate, skipping Amazon S3 check' #except boto.exception.S3ResponseError: # print 'Could not access S3 bucket, skipping Amazon S3 check' return False @task def add_graphic(slug): """ Create a basic project. """ _add_graphic(slug, 'graphic') @task def add_ai2html_graphic(slug): """ Create a graphic using an Adobe Illustrator base. """ _add_graphic(slug, 'ai2html_graphic') @task def add_bar_chart(slug, debug=False): """ Create a bar chart. """ _add_graphic(slug, 'bar_chart', debug) @task def add_column_chart(slug, debug=False): """ Create a column chart. """ _add_graphic(slug, 'column_chart', debug) @task def add_stacked_column_chart(slug, debug=False): """ Create a stacked column chart. """ _add_graphic(slug, 'stacked_column_chart', debug) @task def add_block_histogram(slug, debug=False): """ Create a block histogram. """ _add_graphic(slug, 'block_histogram', debug) @task def add_grouped_bar_chart(slug, debug=False): """ Create a grouped bar chart. """ _add_graphic(slug, 'grouped_bar_chart', debug) @task def add_stacked_bar_chart(slug, debug=False): """ Create a stacked bar chart. """ _add_graphic(slug, 'stacked_bar_chart', debug) @task def add_state_grid_map(slug): """ Create a state grid cartogram """ _add_graphic(slug, 'state_grid_map') @task def add_line_chart(slug, debug=False): """ Create a line chart. """ _add_graphic(slug, 'line_chart', debug) @task def add_pie_chart(slug, debug=False): """ Create a pie chart. """ _add_graphic(slug, 'pie_chart', debug) @task def add_sparkline_chart(slug, debug=False): """ Create a sparkline chart. """ _add_graphic(slug, 'sparkline_chart', debug) @task def add_waffle_chart(slug, debug=False): """ Create a waffle chart. """ _add_graphic(slug, 'waffle_chart', debug) @task def add_dot_chart(slug): """ Create a dot chart with error bars """ _add_graphic(slug, 'dot_chart') @task def add_slopegraph(slug, debug=False): """ Create a slopegraph (intended for narrow display) """ _add_graphic(slug, 'slopegraph', debug) @task def add_scatterplot(slug, debug=False): """ Create a scatterplot. """ _add_graphic(slug, 'scatterplot', debug) @task def add_bubbleplot(slug, debug=False): """ Create a bubbleplot. """ _add_graphic(slug, 'bubbleplot', debug) @task def add_map(slug): """ Create a locator map. """ _add_graphic(slug, 'locator_map') @task def add_table(slug): """ Create a data table. """ _add_graphic(slug, 'table') def _check_credentials(): """ Check credentials and spawn server and browser if not """ credentials = get_credentials() if not credentials or 'https://www.googleapis.com/auth/drive' not in credentials.config['google']['scope']: try: with open(os.devnull, 'w') as fnull: print 'Credentials were not found or permissions were not correct. Automatically opening a browser to authenticate with Google.' gunicorn = find_executable('gunicorn') process = subprocess.Popen([gunicorn, '-b', '0.0.0.0:8888', 'app:wsgi_app'], stdout=fnull, stderr=fnull, cwd=app_config.PROJECT_DIR) print 'Visit newsdev3:8888/oauth' # webbrowser.open_new('http://127.0.0.1:8888/oauth') print 'Waiting...' while not credentials: try: credentials = get_credentials() sleep(1) except ValueError: continue print 'Successfully authenticated!' process.terminate() except KeyboardInterrupt: print '\nCtrl-c pressed. Later, skater!' exit() def copy_spreadsheet(slug): """ Copy the COPY spreadsheet """ _check_credentials() config_path = '%s/%s/' % (app_config.GRAPHICS_PATH, slug) graphic_config = load_graphic_config(config_path) if not hasattr(graphic_config, 'COPY_GOOGLE_DOC_KEY') or not graphic_config.COPY_GOOGLE_DOC_KEY: print 'Skipping spreadsheet creation. (COPY_GOOGLE_DOC_KEY is not defined in %s/graphic_config.py.)' % slug return kwargs = { 'credentials': get_credentials(), 'url': SPREADSHEET_COPY_URL_TEMPLATE % graphic_config.COPY_GOOGLE_DOC_KEY, 'method': 'POST', 'headers': {'Content-Type': 'application/json'}, 'body': json.dumps({ 'title': '%s GRAPHIC COPY' % slug, }), } resp = app_config.authomatic.access(**kwargs) if resp.status == 200: spreadsheet_key = resp.data['id'] spreadsheet_url = SPREADSHEET_VIEW_TEMPLATE % spreadsheet_key print 'New spreadsheet created successfully!' print 'View it online at %s' % spreadsheet_url utils.replace_in_file('%s/graphic_config.py' % config_path , graphic_config.COPY_GOOGLE_DOC_KEY, spreadsheet_key) return True utils.replace_in_file(config_path, graphic_config.COPY_GOOGLE_DOC_KEY, '') print 'Error creating spreadsheet (status code %s) with message %s' % (resp.status, resp.reason) return False
abcnews/dailygraphics
fabfile/__init__.py
Python
mit
12,803
[ "VisIt" ]
ddef8c768230164e8aa3fb82280b1c9c61fe7406284c84018dda58262c4f6b79
""" Portable Executable (PE) 32 bit, little endian Used on MSWindows systems (including DOS) for EXEs and DLLs 1999 paper: http://download.microsoft.com/download/1/6/1/161ba512-40e2-4cc9-843a-923143f3456c/pecoff.doc 2006 with updates relevant for .NET: http://download.microsoft.com/download/9/c/5/9c5b2167-8017-4bae-9fde-d599bac8184a/pecoff_v8.doc """ from construct import * import time import six class UTCTimeStampAdapter(Adapter): def _decode(self, obj, context): return time.ctime(obj) def _encode(self, obj, context): return int(time.mktime(time.strptime(obj))) def UTCTimeStamp(name): return UTCTimeStampAdapter(ULInt32(name)) class NamedSequence(Adapter): """ creates a mapping between the elements of a sequence and their respective names. this is useful for sequences of a variable length, where each element in the sequence has a name (as is the case with the data directories of the PE header) """ __slots__ = ["mapping", "rev_mapping"] prefix = "unnamed_" def __init__(self, subcon, mapping): Adapter.__init__(self, subcon) self.mapping = mapping self.rev_mapping = dict((v, k) for k, v in mapping.items()) def _encode(self, obj, context): d = obj.__dict__ obj2 = [None] * len(d) for name, value in d.items(): if name in self.rev_mapping: index = self.rev_mapping[name] elif name.startswith("__"): obj2.pop(-1) continue elif name.startswith(self.prefix): index = int(name.split(self.prefix)[1]) else: raise ValueError("no mapping defined for %r" % (name,)) obj2[index] = value return obj2 def _decode(self, obj, context): obj2 = Container() for i, item in enumerate(obj): if i in self.mapping: name = self.mapping[i] else: name = "%s%d" % (self.prefix, i) setattr(obj2, name, item) return obj2 msdos_header = Struct("msdos_header", Magic("MZ"), ULInt16("partPag"), ULInt16("page_count"), ULInt16("relocation_count"), ULInt16("header_size"), ULInt16("minmem"), ULInt16("maxmem"), ULInt16("relocation_stackseg"), ULInt16("exe_stackptr"), ULInt16("checksum"), ULInt16("exe_ip"), ULInt16("relocation_codeseg"), ULInt16("table_offset"), ULInt16("overlay"), Padding(8), ULInt16("oem_id"), ULInt16("oem_info"), Padding(20), ULInt32("coff_header_pointer"), Anchor("_assembly_start"), OnDemand( HexDumpAdapter( Field("code", lambda ctx: ctx.coff_header_pointer - ctx._assembly_start ) ) ), ) symbol_table = Struct("symbol_table", String("name", 8, padchar = six.b("\x00")), ULInt32("value"), Enum(ExprAdapter(SLInt16("section_number"), encoder = lambda obj, ctx: obj + 1, decoder = lambda obj, ctx: obj - 1, ), UNDEFINED = -1, ABSOLUTE = -2, DEBUG = -3, _default_ = Pass, ), Enum(ULInt8("complex_type"), NULL = 0, POINTER = 1, FUNCTION = 2, ARRAY = 3, ), Enum(ULInt8("base_type"), NULL = 0, VOID = 1, CHAR = 2, SHORT = 3, INT = 4, LONG = 5, FLOAT = 6, DOUBLE = 7, STRUCT = 8, UNION = 9, ENUM = 10, MOE = 11, BYTE = 12, WORD = 13, UINT = 14, DWORD = 15, ), Enum(ULInt8("storage_class"), END_OF_FUNCTION = 255, NULL = 0, AUTOMATIC = 1, EXTERNAL = 2, STATIC = 3, REGISTER = 4, EXTERNAL_DEF = 5, LABEL = 6, UNDEFINED_LABEL = 7, MEMBER_OF_STRUCT = 8, ARGUMENT = 9, STRUCT_TAG = 10, MEMBER_OF_UNION = 11, UNION_TAG = 12, TYPE_DEFINITION = 13, UNDEFINED_STATIC = 14, ENUM_TAG = 15, MEMBER_OF_ENUM = 16, REGISTER_PARAM = 17, BIT_FIELD = 18, BLOCK = 100, FUNCTION = 101, END_OF_STRUCT = 102, FILE = 103, SECTION = 104, WEAK_EXTERNAL = 105, ), ULInt8("number_of_aux_symbols"), Array(lambda ctx: ctx.number_of_aux_symbols, Bytes("aux_symbols", 18) ) ) coff_header = Struct("coff_header", Magic("PE\x00\x00"), Enum(ULInt16("machine_type"), UNKNOWN = 0x0, AM33 = 0x1d3, AMD64 = 0x8664, ARM = 0x1c0, EBC = 0xebc, I386 = 0x14c, IA64 = 0x200, M32R = 0x9041, MIPS16 = 0x266, MIPSFPU = 0x366, MIPSFPU16 = 0x466, POWERPC = 0x1f0, POWERPCFP = 0x1f1, R4000 = 0x166, SH3 = 0x1a2, SH3DSP = 0x1a3, SH4 = 0x1a6, SH5= 0x1a8, THUMB = 0x1c2, WCEMIPSV2 = 0x169, _default_ = Pass ), ULInt16("number_of_sections"), UTCTimeStamp("time_stamp"), ULInt32("symbol_table_pointer"), ULInt32("number_of_symbols"), ULInt16("optional_header_size"), FlagsEnum(ULInt16("characteristics"), RELOCS_STRIPPED = 0x0001, EXECUTABLE_IMAGE = 0x0002, LINE_NUMS_STRIPPED = 0x0004, LOCAL_SYMS_STRIPPED = 0x0008, AGGRESSIVE_WS_TRIM = 0x0010, LARGE_ADDRESS_AWARE = 0x0020, MACHINE_16BIT = 0x0040, BYTES_REVERSED_LO = 0x0080, MACHINE_32BIT = 0x0100, DEBUG_STRIPPED = 0x0200, REMOVABLE_RUN_FROM_SWAP = 0x0400, SYSTEM = 0x1000, DLL = 0x2000, UNIPROCESSOR_ONLY = 0x4000, BIG_ENDIAN_MACHINE = 0x8000, ), # symbol table Pointer(lambda ctx: ctx.symbol_table_pointer, Array(lambda ctx: ctx.number_of_symbols, symbol_table) ) ) def PEPlusField(name): return IfThenElse(name, lambda ctx: ctx.pe_type == "PE32_plus", ULInt64(None), ULInt32(None), ) optional_header = Struct("optional_header", # standard fields Enum(ULInt16("pe_type"), PE32 = 0x10b, PE32_plus = 0x20b, ), ULInt8("major_linker_version"), ULInt8("minor_linker_version"), ULInt32("code_size"), ULInt32("initialized_data_size"), ULInt32("uninitialized_data_size"), ULInt32("entry_point_pointer"), ULInt32("base_of_code"), # only in PE32 files If(lambda ctx: ctx.pe_type == "PE32", ULInt32("base_of_data") ), # WinNT-specific fields PEPlusField("image_base"), ULInt32("section_aligment"), ULInt32("file_alignment"), ULInt16("major_os_version"), ULInt16("minor_os_version"), ULInt16("major_image_version"), ULInt16("minor_image_version"), ULInt16("major_subsystem_version"), ULInt16("minor_subsystem_version"), Padding(4), ULInt32("image_size"), ULInt32("headers_size"), ULInt32("checksum"), Enum(ULInt16("subsystem"), UNKNOWN = 0, NATIVE = 1, WINDOWS_GUI = 2, WINDOWS_CUI = 3, POSIX_CIU = 7, WINDOWS_CE_GUI = 9, EFI_APPLICATION = 10, EFI_BOOT_SERVICE_DRIVER = 11, EFI_RUNTIME_DRIVER = 12, EFI_ROM = 13, XBOX = 14, _defualt_ = Pass ), FlagsEnum(ULInt16("dll_characteristics"), NO_BIND = 0x0800, WDM_DRIVER = 0x2000, TERMINAL_SERVER_AWARE = 0x8000, ), PEPlusField("reserved_stack_size"), PEPlusField("stack_commit_size"), PEPlusField("reserved_heap_size"), PEPlusField("heap_commit_size"), ULInt32("loader_flags"), ULInt32("number_of_data_directories"), NamedSequence( Array(lambda ctx: ctx.number_of_data_directories, Struct("data_directories", ULInt32("address"), ULInt32("size"), ) ), mapping = { 0 : 'export_table', 1 : 'import_table', 2 : 'resource_table', 3 : 'exception_table', 4 : 'certificate_table', 5 : 'base_relocation_table', 6 : 'debug', 7 : 'architecture', 8 : 'global_ptr', 9 : 'tls_table', 10 : 'load_config_table', 11 : 'bound_import', 12 : 'import_address_table', 13 : 'delay_import_descriptor', 14 : 'complus_runtime_header', } ), ) section = Struct("section", String("name", 8, padchar = six.b("\x00")), ULInt32("virtual_size"), ULInt32("virtual_address"), ULInt32("raw_data_size"), ULInt32("raw_data_pointer"), ULInt32("relocations_pointer"), ULInt32("line_numbers_pointer"), ULInt16("number_of_relocations"), ULInt16("number_of_line_numbers"), FlagsEnum(ULInt32("characteristics"), TYPE_REG = 0x00000000, TYPE_DSECT = 0x00000001, TYPE_NOLOAD = 0x00000002, TYPE_GROUP = 0x00000004, TYPE_NO_PAD = 0x00000008, TYPE_COPY = 0x00000010, CNT_CODE = 0x00000020, CNT_INITIALIZED_DATA = 0x00000040, CNT_UNINITIALIZED_DATA = 0x00000080, LNK_OTHER = 0x00000100, LNK_INFO = 0x00000200, TYPE_OVER = 0x00000400, LNK_REMOVE = 0x00000800, LNK_COMDAT = 0x00001000, MEM_FARDATA = 0x00008000, MEM_PURGEABLE = 0x00020000, MEM_16BIT = 0x00020000, MEM_LOCKED = 0x00040000, MEM_PRELOAD = 0x00080000, ALIGN_1BYTES = 0x00100000, ALIGN_2BYTES = 0x00200000, ALIGN_4BYTES = 0x00300000, ALIGN_8BYTES = 0x00400000, ALIGN_16BYTES = 0x00500000, ALIGN_32BYTES = 0x00600000, ALIGN_64BYTES = 0x00700000, ALIGN_128BYTES = 0x00800000, ALIGN_256BYTES = 0x00900000, ALIGN_512BYTES = 0x00A00000, ALIGN_1024BYTES = 0x00B00000, ALIGN_2048BYTES = 0x00C00000, ALIGN_4096BYTES = 0x00D00000, ALIGN_8192BYTES = 0x00E00000, LNK_NRELOC_OVFL = 0x01000000, MEM_DISCARDABLE = 0x02000000, MEM_NOT_CACHED = 0x04000000, MEM_NOT_PAGED = 0x08000000, MEM_SHARED = 0x10000000, MEM_EXECUTE = 0x20000000, MEM_READ = 0x40000000, MEM_WRITE = 0x80000000, ), OnDemandPointer(lambda ctx: ctx.raw_data_pointer, HexDumpAdapter(Field("raw_data", lambda ctx: ctx.raw_data_size)) ), OnDemandPointer(lambda ctx: ctx.line_numbers_pointer, Array(lambda ctx: ctx.number_of_line_numbers, Struct("line_numbers", ULInt32("type"), ULInt16("line_number"), ) ) ), OnDemandPointer(lambda ctx: ctx.relocations_pointer, Array(lambda ctx: ctx.number_of_relocations, Struct("relocations", ULInt32("virtual_address"), ULInt32("symbol_table_index"), ULInt16("type"), ) ) ), ) pe32_file = Struct("pe32_file", # headers msdos_header, coff_header, Anchor("_start_of_optional_header"), optional_header, Anchor("_end_of_optional_header"), Padding(lambda ctx: min(0, ctx.coff_header.optional_header_size - ctx._end_of_optional_header + ctx._start_of_optional_header ) ), # sections Array(lambda ctx: ctx.coff_header.number_of_sections, section) ) if __name__ == "__main__": print (pe32_file.parse_stream(open("../../../tests/NOTEPAD.EXE", "rb"))) print (pe32_file.parse_stream(open("../../../tests/sqlite3.dll", "rb")))
PythEch/pymobiledevice
libs/python/construct/formats/executable/pe32.py
Python
lgpl-3.0
11,720
[ "MOE" ]
c83c0698149992b989b128cbc03c927ebd06a376c63cfe2da7e5b238101913a2
#!/usr/bin/python # (c) 2005-2009 Divmod, Inc. See LICENSE file for details from distutils.core import setup setup( name="pyflakes", license="MIT", version="0.4.3", description="passive checker of Python programs testcomment", author="Phil Frost", maintainer="Moe Aboulkheir", maintainer_email="moe@divmod.com", url="http://www.divmod.org/trac/wiki/DivmodPyflakes", packages=["pyflakes", "pyflakes.scripts", "pyflakes.test"], scripts=["bin/pyflakes"], long_description="""Pyflakes is program to analyze Python programs and detect various errors. It works by parsing the source file, not importing it, so it is safe to use on modules with side effects. It's also much faster.""", classifiers=[ "Development Status :: 6 - Mature", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Topic :: Software Development", "Topic :: Utilities", ])
sanzinger/pyflakes
setup.py
Python
mit
1,039
[ "MOE" ]
50983d6024544d2e2611103dc2c90199787fd6c1b6d940e64db107523f0756b7
#!/usr/bin/env python import numpy as np from .utils import get_maker from . import utils from . import medsmakers import fitsio import os,sys import time from .shearmakers import get_shear_maker class SimpSimMaker(dict): def __init__(self,conf,num_seeds=1,seed_index=0): self.global_start_time = time.time() self.update(conf) self.set_defaults(num_seeds,seed_index) self.setup_shears() if not self['silent']: import pprint pprint.pprint(self) sys.stdout.flush() self.set_extra_and_percutout_data() self.set_seeds() self.set_galaxy_psf_makers() def set_defaults(self,num_seeds,seed_index): self['silent'] = self.get('silent',True) self['num_seeds'] = self.get('num_seeds',num_seeds) self['seed_index'] = self.get('seed_index',seed_index) self['seed_fmt'] = self.get('seed_fmt','%06d') self['Ngals'] = self.get('Ngals',1) if 'sizes' not in self: self['sizes'] = utils.get_fft_sizes(min_size=self['min_size'],max_size=self['max_size']) self['output_base'] = self.get('output_base','') if len(self['output_base']) > 0 and self['output_base'][-1] != '_': self['output_base'] += '_' def setup_shears(self): if 'shear' not in self: print "no shear specified" shearpdf = None else: shearpdf = get_shear_maker(self['shear']) print "loaded shearpdf:",shearpdf self.shearpdf = shearpdf def set_extra_and_percutout_data(self): self.extra_data = self.get('extra_data',[]) self.extra_percutout_data = self.get('extra_percutout_data',[]) self.extra_percutout_data = [('psf_id','i8')] def set_seeds(self): self.rng = np.random.RandomState(self['global_seed']) self.galaxy_seeds = self.rng.choice(10000000,size=self['num_seeds'],replace=False) self.psf_seeds = self.rng.choice(10000000,size=self['num_seeds'],replace=False) def set_galaxy_psf_makers(self): if not self['silent']: print "getting galaxy maker..." sys.stdout.flush() self.galaxy_maker = get_maker(self['galaxymaker']['type']) self.galaxy_maker = self.galaxy_maker(seed=self.galaxy_seeds[self['seed_index']],**self) self.extra_data.extend(self.galaxy_maker.get_extra_data_dtype()) self.extra_percutout_data.extend(self.galaxy_maker.get_extra_percutout_data_dtype()) if not self['silent']: print "getting psf maker..." sys.stdout.flush() self.psf_maker = get_maker(self['psfmaker']['type']) self.psf_maker = self.psf_maker(seed=self.psf_seeds[self['seed_index']],**self) def get_shear(self): """ Get a shear dict. If no shear was specified return None The shear dict contains a ngmix.Shape object in the 'shear' field, as well as further information such as 'shear_index' in the 'meta' field """ if self.shearpdf is None: return {'shear':None, 'meta':{'shear_index':-1}} else: return self.shearpdf.sample() def get_object(self): # get PSF psf = self.psf_maker.get_psf() # get gal shdict = self.get_shear() gal = self.galaxy_maker.get_galaxy(psf=psf, shear=shdict['shear']) gal['extra_data']['shear_meta'] = shdict['meta'] # recenter drow = gal['row'] - gal.image.shape[0]/2.0 dcol = gal['col'] - gal.image.shape[1]/2.0 psf = self.psf_maker.get_psf(psf=psf,shift=[dcol,drow]) # swap for input to dx,dy in galsim # set the psf gal.psf = psf return gal def make_meds(self): outputbase = self['output_base'] psfs = [] mm = medsmakers.MemoryMEDSMaker(extra_data=self.extra_data,extra_percutout_data=self.extra_percutout_data) if not self['silent']: print "making galaxies..." sys.stdout.flush() for i in xrange(self['Ngals']): if not self['silent']: print "gal:",i sys.stdout.flush() if i == 1: self.start_time = time.time() gal = self.get_object() seg = np.zeros_like(gal.image,dtype='i4') seg[:,:] = i+1 row,col = gal['row'],gal['col'] # append the psf psfs.append((i,gal.psf.image.copy())) psf_size = gal.psf.image.shape[0] # we want this copied to the meds object_data extension, so add it # directly to objinfo (gal['extra_data'] is also added later but is # not copied to the meds file) shear_index = gal['extra_data']['shear_meta']['shear_index'] # put it into meds objinfo = dict(id=i, number=i+1, orig_row=np.array([-99,row]), orig_col=[-99,col], orig_start_row=[-99,0], orig_start_col=[-99,0], dudrow=[-99,gal['pixel_scale']], dudcol=[-99,0.0], dvdrow=[-99,0.0], dvdcol=[-99,gal['pixel_scale']], cutout_row=[-99,row], cutout_col=[-99,col], shear_index=shear_index) objinfo.update(gal['extra_data']) pdata = {} for nm,tp in self.extra_percutout_data: pdata[nm] = [-99.0] pdata['psf_id'].append(i) for nm in gal['extra_percutout_data'].keys(): assert nm in pdata,"Percutout data %s not found!" % nm pdata[nm].extend(gal['extra_percutout_data'][nm]) objinfo.update(pdata) mm.add_object(objinfo, \ [np.zeros_like(gal.image),gal.image], \ [np.zeros_like(gal.weight),gal.weight], \ [np.zeros_like(seg),seg]) del gal del objinfo del seg del row del col del pdata del nm del tp if not self['silent']: print "writing files..." sys.stdout.flush() tail = '%s.fits' % self['seed_fmt'] tail = tail % self['seed_index'] mfname = outputbase+'meds'+tail mm.write(mfname) mm.fpack() os.remove(mfname) psfs = np.array(psfs,dtype=[('psf_id','i8'),('psf_im','f8',(psf_size,psf_size))]) pfname = outputbase+'psf'+tail fitsio.write(pfname,psfs,clobber=True) self.end_time = time.time() if not self['silent']: tt = self.end_time-self.global_start_time print 'sim took %f seconds' % tt tpg = (self.end_time-self.start_time)/(self['Ngals'] - 1.0) ohead = tt - tpg*self['Ngals'] print 'init took %s seconds' % ohead print 'used %f seconds per galaxy' % tpg sys.stdout.flush()
esheldon/egret
egret/simpsimmakers.py
Python
bsd-3-clause
7,414
[ "Galaxy" ]
cec20898b9d5f5578151edf2716eaa63869e7a157203906b7a5e569f865083fe
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class PyPyscf(PythonPackage): """PySCF is a collection of electronic structure programs powered by Python.""" homepage = "https://sunqm.github.io/pyscf/" git = "https://github.com/pyscf/pyscf" maintainers = ['naromero77'] version('1.7.5', tag='v1.7.5') version('1.7.3', tag='v1.7.3') # dependencies depends_on('cmake@2.8:', type='build') depends_on('python@2.6:', type=('build', 'run')) depends_on('py-numpy@1.8.0:', type=('build', 'run')) depends_on('py-scipy@0.12:', type=('build', 'run')) depends_on('py-h5py@2.3.0:', type=('build', 'run')) depends_on('blas') depends_on('libcint+coulomb_erf+f12') depends_on('libxc') depends_on('xcfun') def setup_build_environment(self, env): # Tell PSCF where supporting libraries are located." spec = self.spec pyscf_search_dir = [] pyscf_search_dir.append(spec['blas'].prefix) pyscf_search_dir.append(spec['libcint'].prefix) pyscf_search_dir.append(spec['libcint'].prefix.lib64) pyscf_search_dir.append(spec['libxc'].prefix) pyscf_search_dir.append(spec['xcfun'].prefix) pyscf_search_dir.append(spec['xcfun'].prefix.include.XCFun) env.set('PYSCF_INC_DIR', ":".join(pyscf_search_dir))
LLNL/spack
var/spack/repos/builtin/packages/py-pyscf/package.py
Python
lgpl-2.1
1,510
[ "PySCF" ]
8ce894307ba17e99b4a84597e15a6379a9e7d94a2393468c86ca07d210b911ef
#!/usr/bin/env python # encoding: utf-8 """ fp.py Created by Brian Whitman on 2010-06-16. Copyright (c) 2010 The Echo Nest Corporation. All rights reserved. """ from __future__ import with_statement import logging import solr import pickle from collections import defaultdict import zlib, base64, re, time, random, string, math import pytyrant import datetime now = datetime.datetime.utcnow() IMPORTDATE = now.strftime("%Y-%m-%dT%H:%M:%SZ") try: import json except ImportError: import simplejson as json _fp_solr = solr.SolrConnectionPool("http://localhost:8502/solr/fp") _hexpoch = int(time.time() * 1000) logger = logging.getLogger(__name__) _tyrant_address = ['localhost', 1978] _tyrant = None class Response(object): # Response codes NOT_ENOUGH_CODE, CANNOT_DECODE, SINGLE_BAD_MATCH, SINGLE_GOOD_MATCH, NO_RESULTS, MULTIPLE_GOOD_MATCH_HISTOGRAM_INCREASED, \ MULTIPLE_GOOD_MATCH_HISTOGRAM_DECREASED, MULTIPLE_BAD_HISTOGRAM_MATCH, MULTIPLE_GOOD_MATCH = range(9) def __init__(self, code, TRID=None, score=0, qtime=0, tic=0, metadata={}): self.code = code self.qtime = qtime self.TRID = TRID self.score = score self.total_time = int(time.time()*1000) - tic self.metadata = metadata def __len__(self): if self.TRID is not None: return 1 else: return 0 def message(self): if self.code == self.NOT_ENOUGH_CODE: return "query code length is too small" if self.code == self.CANNOT_DECODE: return "could not decode query code" if self.code == self.SINGLE_BAD_MATCH or self.code == self.NO_RESULTS or self.code == self.MULTIPLE_BAD_HISTOGRAM_MATCH: return "no results found (type %d)" % (self.code) return "OK (match type %d)" % (self.code) def match(self): return self.TRID is not None def inflate_code_string(s): """ Takes an uncompressed code string consisting of 0-padded fixed-width sorted hex and converts it to the standard code string.""" n = int(len(s) / 10.0) # 5 hex bytes for hash, 5 hex bytes for time (40 bits) def pairs(l, n=2): """Non-overlapping [1,2,3,4] -> [(1,2), (3,4)]""" # return zip(*[[v for i,v in enumerate(l) if i % n == j] for j in range(n)]) end = n res = [] while end <= len(l): start = end - n res.append(tuple(l[start:end])) end += n return res # Parse out n groups of 5 timestamps in hex; then n groups of 8 hash codes in hex. end_timestamps = n*5 times = [int(''.join(t), 16) for t in chunker(s[:end_timestamps], 5)] codes = [int(''.join(t), 16) for t in chunker(s[end_timestamps:], 5)] assert(len(times) == len(codes)) # these should match up! return ' '.join('%d %d' % (c, t) for c,t in zip(codes, times)) def decode_code_string(compressed_code_string): compressed_code_string = compressed_code_string.encode('utf8') if compressed_code_string == "": return "" # do the zlib/base64 stuff try: # this will decode both URL safe b64 and non-url-safe actual_code = zlib.decompress(base64.urlsafe_b64decode(compressed_code_string)) except (zlib.error, TypeError): logger.warn("Could not decode base64 zlib string %s" % (compressed_code_string)) import traceback; logger.warn(traceback.format_exc()) return None # If it is a deflated code, expand it from hex if ' ' not in actual_code: actual_code = inflate_code_string(actual_code) return actual_code def metadata_for_track_id(track_id, local=False): if not track_id or not len(track_id): return {} # Assume track_ids have 1 - and it's at the end of the id. if "-" not in track_id: track_id = "%s-0" % track_id if local: return _fake_solr["metadata"][track_id] with solr.pooled_connection(_fp_solr) as host: response = host.query("track_id:%s" % track_id) if len(response.results): return response.results[0] else: return {} def cut_code_string_length(code_string): """ Remove all codes from a codestring that are > 60 seconds in length. Because we can only match 60 sec, everything else is unnecessary """ split = code_string.split() if len(split) < 2: return code_string # If we use the codegen on a file with start/stop times, the first timestamp # is ~= the start time given. There might be a (slightly) earlier timestamp # in another band, but this is good enough first_timestamp = int(split[1]) sixty_seconds = int(60.0 * 1000.0 / 23.2 + first_timestamp) parts = [] for (code, t) in zip(split[::2], split[1::2]): tstamp = int(t) if tstamp <= sixty_seconds: parts.append(code) parts.append(t) return " ".join(parts) def best_match_for_query(code_string, elbow=10, local=False): # DEC strings come in as unicode so we have to force them to ASCII code_string = code_string.encode("utf8") tic = int(time.time()*1000) # First see if this is a compressed code if re.match('[A-Za-z\/\+\_\-]', code_string) is not None: code_string = decode_code_string(code_string) if code_string is None: return Response(Response.CANNOT_DECODE, tic=tic) code_len = len(code_string.split(" ")) / 2 if code_len < elbow: logger.warn("Query code length (%d) is less than elbow (%d)" % (code_len, elbow)) return Response(Response.NOT_ENOUGH_CODE, tic=tic) code_string = cut_code_string_length(code_string) code_len = len(code_string.split(" ")) / 2 # Query the FP flat directly. response = query_fp(code_string, rows=30, local=local, get_data=True) logger.debug("solr qtime is %d" % (response.header["QTime"])) if len(response.results) == 0: return Response(Response.NO_RESULTS, qtime=response.header["QTime"], tic=tic) # If we just had one result, make sure that it is close enough. We rarely if ever have a single match so this is not helpful (and probably doesn't work well.) top_match_score = int(response.results[0]["score"]) if len(response.results) == 1: trackid = response.results[0]["track_id"] trackid = trackid.split("-")[0] # will work even if no `-` in trid meta = metadata_for_track_id(trackid, local=local) if code_len - top_match_score < elbow: return Response(Response.SINGLE_GOOD_MATCH, TRID=trackid, score=top_match_score, qtime=response.header["QTime"], tic=tic, metadata=meta) else: return Response(Response.SINGLE_BAD_MATCH, qtime=response.header["QTime"], tic=tic) # If the scores are really low (less than 5% of the query length) then say no results if top_match_score < code_len * 0.05: return Response(Response.MULTIPLE_BAD_HISTOGRAM_MATCH, qtime = response.header["QTime"], tic=tic) # Not a strong match, so we look up the codes in the keystore and compute actual matches... # Get the actual score for all responses original_scores = {} actual_scores = {} trackids = [r["track_id"].encode("utf8") for r in response.results] if local: tcodes = [_fake_solr["store"][t] for t in trackids] else: tcodes = get_tyrant().multi_get(trackids) # For each result compute the "actual score" (based on the histogram matching) for (i, r) in enumerate(response.results): track_id = r["track_id"] original_scores[track_id] = int(r["score"]) track_code = tcodes[i] if track_code is None: # Solr gave us back a track id but that track # is not in our keystore continue actual_scores[track_id] = actual_matches(code_string, track_code, elbow = elbow) #logger.debug("Actual score for %s is %d (code_len %d), original was %d" % (r["track_id"], actual_scores[r["track_id"]], code_len, top_match_score)) # Sort the actual scores sorted_actual_scores = sorted(actual_scores.iteritems(), key=lambda (k,v): (v,k), reverse=True) # Because we split songs up into multiple parts, sometimes the results will have the same track in the # first few results. Remove these duplicates so that the falloff is (potentially) higher. new_sorted_actual_scores = [] existing_trids = [] for trid, result in sorted_actual_scores: trid_split = trid.split("-")[0] if trid_split not in existing_trids: new_sorted_actual_scores.append((trid, result)) existing_trids.append(trid_split) sorted_actual_scores = new_sorted_actual_scores # We might have reduced the length of the list to 1 if len(sorted_actual_scores) == 1: logger.info("only have 1 score result...") (top_track_id, top_score) = sorted_actual_scores[0] if top_score < code_len * 0.1: logger.info("only result less than 10%% of the query string (%d < %d *0.1 (%d)) SINGLE_BAD_MATCH", top_score, code_len, code_len*0.1) return Response(Response.SINGLE_BAD_MATCH, qtime = response.header["QTime"], tic=tic) else: if top_score > (original_scores[top_track_id] / 2): logger.info("top_score > original_scores[%s]/2 (%d > %d) GOOD_MATCH_DECREASED", top_track_id, top_score, original_scores[top_track_id]/2) trid = top_track_id.split("-")[0] meta = metadata_for_track_id(trid, local=local) return Response(Response.MULTIPLE_GOOD_MATCH_HISTOGRAM_DECREASED, TRID=trid, score=top_score, qtime=response.header["QTime"], tic=tic, metadata=meta) else: logger.info("top_score NOT > original_scores[%s]/2 (%d <= %d) BAD_HISTOGRAM_MATCH", top_track_id, top_score, original_scores[top_track_id]/2) return Response(Response.MULTIPLE_BAD_HISTOGRAM_MATCH, qtime=response.header["QTime"], tic=tic) # Get the top one (actual_score_top_track_id, actual_score_top_score) = sorted_actual_scores[0] # Get the 2nd top one (we know there is always at least 2 matches) (actual_score_2nd_track_id, actual_score_2nd_score) = sorted_actual_scores[1] trackid = actual_score_top_track_id.split("-")[0] meta = metadata_for_track_id(trackid, local=local) if actual_score_top_score < code_len * 0.05: return Response(Response.MULTIPLE_BAD_HISTOGRAM_MATCH, qtime = response.header["QTime"], tic=tic) else: # If the actual score went down it still could be close enough, so check for that if actual_score_top_score > (original_scores[actual_score_top_track_id] / 4): if (actual_score_top_score - actual_score_2nd_score) >= (actual_score_top_score / 3): # for examples [10,4], 10-4 = 6, which >= 5, so OK return Response(Response.MULTIPLE_GOOD_MATCH_HISTOGRAM_DECREASED, TRID=trackid, score=actual_score_top_score, qtime=response.header["QTime"], tic=tic, metadata=meta) else: return Response(Response.MULTIPLE_BAD_HISTOGRAM_MATCH, qtime = response.header["QTime"], tic=tic) else: # If the actual score was not close enough, then no match. return Response(Response.MULTIPLE_BAD_HISTOGRAM_MATCH, qtime=response.header["QTime"], tic=tic) def actual_matches(code_string_query, code_string_match, slop = 2, elbow = 10): code_query = code_string_query.split(" ") code_match = code_string_match.split(" ") if (len(code_match) < (elbow*2)): return 0 time_diffs = {} # Normalise the query timecodes to start with offset 0 code_query_int = [int(x) for x in code_query] min_time = min(code_query_int[1::2]) code_query[1::2] = [str(x - min_time) for x in code_query_int[1::2]] # # Invert the query codes query_codes = {} for (qcode, qtime) in zip(code_query[::2], code_query[1::2]): qtime = int(qtime) / slop if qcode in query_codes: query_codes[qcode].append(qtime) else: query_codes[qcode] = [qtime] # # Walk the document codes, handling those that occur in the query match_counter = 1 for match_code in code_match[::2]: if match_code in query_codes: match_code_time = int(code_match[match_counter])/slop min_dist = 32767 for qtime in query_codes[match_code]: # match_code_time > qtime for all corresponding # hashcodes since normalising query timecodes, so no # need for abs() anymore dist = match_code_time - qtime if dist < min_dist: min_dist = dist if min_dist < 32767: if time_diffs.has_key(min_dist): time_diffs[min_dist] += 1 else: time_diffs[min_dist] = 1 match_counter += 2 # sort the histogram, pick the top 2 and return that as your actual score actual_match_list = sorted(time_diffs.iteritems(), key=lambda (k,v): (v,k), reverse=True) print actual_match_list if(len(actual_match_list)>1): return actual_match_list[0][1] + actual_match_list[1][1] if(len(actual_match_list)>0): return actual_match_list[0][1] return 0 def get_tyrant(): global _tyrant if _tyrant is None: _tyrant = pytyrant.PyTyrant.open(*_tyrant_address) return _tyrant """ fp can query the live production flat or the alt flat, or it can query and ingest in memory. the following few functions are to support local query and ingest that ape the response of the live server This is useful for small collections and testing, deduplicating, etc, without having to boot a server. The results should be equivalent but i need to run tests. NB: delete is not supported locally yet """ _fake_solr = {"index": {}, "store": {}, "metadata": {}} class FakeSolrResponse(object): def __init__(self, results): self.header = {'QTime': 0} self.results = [] for r in results: # If the result list has more than 2 elements we've asked for data as well if len(r) > 2: data = {"score":r[1], "track_id":r[0], "fp":r[2]} metadata = r[3] data["length"] = metadata["length"] for m in ["artist", "release", "track"]: if m in metadata: data[m] = metadata[m] self.results.append(data) else: self.results.append({"score":r[1], "track_id":r[0]}) def local_load(filename): global _fake_solr print "Loading from " + filename disk = open(filename,"rb") _fake_solr = pickle.load(disk) disk.close() print "Done" def local_save(filename): print "Saving to " + filename disk = open(filename,"wb") pickle.dump(_fake_solr,disk) disk.close() print "Done" def local_ingest(docs, codes): store = dict(codes) _fake_solr["store"].update(store) for fprint in docs: trackid = fprint["track_id"] keys = set(fprint["fp"].split(" ")[0::2]) # just one code indexed for k in keys: tracks = _fake_solr["index"].setdefault(k,[]) if trackid not in tracks: tracks.append(trackid) _fake_solr["metadata"][trackid] = {"length": fprint["length"], "codever": fprint["codever"]} if "artist" in fprint: _fake_solr["metadata"][trackid]["artist"] = fprint["artist"] if "release" in fprint: _fake_solr["metadata"][trackid]["release"] = fprint["release"] if "track" in fprint: _fake_solr["metadata"][trackid]["track"] = fprint["track"] def local_delete(tracks): for track in tracks: codes = set(_fake_solr["store"][track].split(" ")[0::2]) del _fake_solr["store"][track] for code in codes: # Make copy so destructive editing doesn't break for loop codetracks = list(_fake_solr["index"][code]) for trid in codetracks: if trid.startswith(track): _fake_solr["index"][code].remove(trid) try: del _fake_solr["metadata"][trid] except KeyError: pass if len(_fake_solr["index"][code]) == 0: del _fake_solr["index"][code] def local_dump(): print "Stored tracks:" print _fake_solr["store"].keys() print "Metadata:" for t in _fake_solr["metadata"].keys(): print t, _fake_solr["metadata"][t] print "Keys:" for k in _fake_solr["index"].keys(): print "%s -> %s" % (k, ", ".join(_fake_solr["index"][k])) def local_query_fp(code_string,rows=10,get_data=False): keys = code_string.split(" ")[0::2] track_hist = [] unique_keys = [] for k in keys: if k not in unique_keys: track_hist += _fake_solr["index"].get(k, []) unique_keys += [k] top_matches = defaultdict(int) for track in track_hist: top_matches[track] += 1 if not get_data: # Make a list of lists that have track_id, score return FakeSolrResponse(sorted(top_matches.iteritems(), key=lambda (k,v): (v,k), reverse=True)[0:rows]) else: # Make a list of lists that have track_id, score, then fp lol = sorted(top_matches.iteritems(), key=lambda (k,v): (v,k), reverse=True)[0:rows] lol = map(list, lol) for x in lol: trackid = x[0].split("-")[0] x.append(_fake_solr["store"][x[0]]) x.append(_fake_solr["metadata"][x[0]]) return FakeSolrResponse(lol) def local_fp_code_for_track_id(track_id): return _fake_solr["store"][track_id] """ and these are the server-hosted versions of query, ingest and delete """ def delete(track_ids, do_commit=True, local=False): # delete one or more track_ids from the fp flat. if not isinstance(track_ids, list): track_ids = [track_ids] # delete a code from FP flat if local: return local_delete(track_ids) with solr.pooled_connection(_fp_solr) as host: for t in track_ids: host.delete_query("track_id:%s*" % t) try: get_tyrant().multi_del(track_ids) except KeyError: pass if do_commit: commit() def local_erase_database(): global _fake_solr _fake_solr = {"index": {}, "store": {}, "metadata": {}} def erase_database(really_delete=False, local=False): """ This method will delete your ENTIRE database. Only use it if you know what you're doing. """ if not really_delete: raise Exception("Won't delete unless you pass in really_delete=True") if local: return local_erase_database() with solr.pooled_connection(_fp_solr) as host: host.delete_query("*:*") host.commit() tyrant = get_tyrant() tyrant.multi_del(tyrant.keys()) def chunker(seq, size): return [tuple(seq[pos:pos + size]) for pos in xrange(0, len(seq), size)] def split_codes(fp): """ Split a codestring into a list of codestrings. Each string contains at most 60 seconds of codes, and codes overlap every 30 seconds. Given a track id, return track ids of the form trid-0, trid-1, trid-2, etc. """ # Convert seconds into time units segmentlength = 60 * 1000.0 / 23.2 halfsegment = segmentlength / 2.0 trid = fp["track_id"] codestring = fp["fp"] codes = codestring.split() pairs = chunker(codes, 2) pairs = [(int(x[1]), " ".join(x)) for x in pairs] pairs.sort() size = len(pairs) print(pairs) if len(pairs): lasttime = pairs[-1][0] numsegs = int(lasttime / halfsegment) + 1 else: numsegs = 0 ret = [] sindex = 0 for i in range(numsegs): s = i * halfsegment e = i * halfsegment + segmentlength #print i, s, e while sindex < size and pairs[sindex][0] < s: #print "s", sindex, l[sindex] sindex+=1 eindex = sindex while eindex < size and pairs[eindex][0] < e: #print "e",eindex,l[eindex] eindex+=1 key = "%s-%d" % (trid, i) # print(size) # print(sindex) # print(eindex) segment = {"track_id": key, "fp": " ".join((p[1]) for p in pairs[sindex:eindex]), "length": fp["length"], "codever": fp["codever"]} if "artist" in fp: segment["artist"] = fp["artist"] if "release" in fp: segment["release"] = fp["release"] if "track" in fp: segment["track"] = fp["track"] if "source" in fp: segment["source"] = fp["source"] if "import_date" in fp: segment["import_date"] = fp["import_date"] ret.append(segment) return ret def ingest(fingerprint_list, do_commit=True, local=False, split=True): """ Ingest some fingerprints into the fingerprint database. The fingerprints should be of the form {"track_id": id, "fp": fp string, "artist": artist, "release": release, "track": track, "length": length, "codever": "codever", "source": source, "import_date":import date} or a list of the same. All parameters except length must be strings. Length is an integer. artist, release and track are not required but highly recommended. The import date should be formatted as an ISO 8601 date (yyyy-mm-ddThh:mm:ssZ) and should be the UTC time that the the import was performed. If the date is missing, the time the script was started will be used. length is the length of the track being ingested in seconds. if track_id is empty, one will be generated. """ if not isinstance(fingerprint_list, list): fingerprint_list = [fingerprint_list] docs = [] codes = [] if split: for fprint in fingerprint_list: if not ("track_id" in fprint and "fp" in fprint and "length" in fprint and "codever" in fprint): raise Exception("Missing required fingerprint parameters (track_id, fp, length, codever") if "import_date" not in fprint: fprint["import_date"] = IMPORTDATE if "source" not in fprint: fprint["source"] = "local" split_prints = split_codes(fprint) docs.extend(split_prints) codes.extend(((c["track_id"].encode("utf-8"), c["fp"].encode("utf-8")) for c in split_prints)) else: docs.extend(fingerprint_list) codes.extend(((c["track_id"].encode("utf-8"), c["fp"].encode("utf-8")) for c in fingerprint_list)) if local: return local_ingest(docs, codes) with solr.pooled_connection(_fp_solr) as host: host.add_many(docs) get_tyrant().multi_set(codes) if do_commit: commit() def commit(local=False): with solr.pooled_connection(_fp_solr) as host: host.commit() def query_fp(code_string, rows=15, local=False, get_data=False): if local: return local_query_fp(code_string, rows, get_data=get_data) try: # query the fp flat if get_data: fields = "track_id,artist,release,track,length" else: fields = "track_id" with solr.pooled_connection(_fp_solr) as host: resp = host.query(code_string, qt="/hashq", rows=rows, fields=fields) return resp except solr.SolrException: return None def fp_code_for_track_id(track_id, local=False): if local: return local_fp_code_for_track_id(track_id) return get_tyrant().get(track_id.encode("utf-8")) def new_track_id(): rand5 = ''.join(random.choice(string.letters) for x in xrange(5)).upper() global _hexpoch _hexpoch += 1 hexpoch = str(hex(_hexpoch))[2:].upper() ## On 32-bit machines, the number of milliseconds since 1970 is ## a longint. On 64-bit it is not. hexpoch = hexpoch.rstrip('L') return "TR" + rand5 + hexpoch
alexonea/3rdyrp
server/solr/API/fp.py
Python
mit
24,456
[ "Brian" ]
b151b5b9efa18412760c930f879194fd6ca635cfd7abd4debafbaa471d7b6e6e
from __future__ import absolute_import from __future__ import print_function import getpass import logging import os import pprint import time import uuid from typing import Dict from pwd import getpwuid from tabulate import tabulate import pandas as pd from .object_helpers import ( set_docstring, Workspace, format_timestamp, MetList, MetUnicode, MetFloat, MetInstance, MetInt, MetEnum, MetBool, HasTraits, Stub ) from six.moves import zip logger = logging.getLogger(__name__) #Making a new table means adding a new class to metatlas_objects.py. #Floats are set as single precision by default, unfortunately, so here is the best way to create a table containing floats: #Create a new table #1) Create a new class in metatlas_objects.py. #2) Create a new object of that class and store it. #3) Log into database #4) Run alter table TABLE_NAME modify COLUMN_NAME double; for each float column #Add a floating point object to a new table #1) Update the class in metatlas_objects.py. #2) Create an object with the updated class and store it. #3) Log into database #4) Run alter table TABLE_NAME modify COLUMN_NAME double; for each new float column # Whether to fetch stubs automatically, disabled when we want to display # a large number of objects. FETCH_STUBS = True ADDUCTS = ('','[M]+','[M+H]+','[M+H]2+','[M+2H]2+','[M+H-H2O]2+','[M+K]2+','[M+NH4]+','[M+Na]+','[M+H-H2O]+','[M-H]-','[M-2H]-','[M-H+Cl]-','[M-2H]2-','[M+Cl]-','[2M+H]+','[2M-H]-','[M-H+Na]+','[M+K]+','[M+2Na]2+','[M-e]+','[M+acetate]-','[M+formate]-','[M-H+Cl]2-','[M-H+2Na]+') POLARITY = ('positive', 'negative', 'alternating') FRAGMENTATION_TECHNIQUE = ('hcd','cid','etd','ecd','irmpd') def retrieve(object_type, **kwargs): """Get objects from the Metatlas object database. This will automatically select only objects created by the current user unless `username` is provided. Use `username='*'` to search against all users. Parameters ---------- object_type: string The type of object to search for (i.e. "Groups"). **kwargs Specific search queries (i.e. name="Sargasso"). Use '%' for wildcard patterns (i.e. description='Hello%'). If you want to match a '%' character, use '%%'. Returns ------- objects: list List of Metatlas Objects meeting the criteria. Will return the latest version of each object. """ workspace = Workspace.get_instance() out = workspace.retrieve(object_type, **kwargs) workspace.close_connection() return out def remove(object_type, **kwargs): """Remove objects from the Metatlas object database. Parameters ---------- object_type: string The type of object to remove (i.e. "Groups"). **kwargs Specific search queries (i.e. name="Sargasso"). Use '%' for wildcard patterns (i.e. description='Hello%'). If you want to match a '%' character, use '%%'. """ if not isinstance(object_type, str): print('remove() expects a string argument, use remove_objects() to' 'delete actual objects.') workspace = Workspace.get_instance() workspace.remove(object_type, **kwargs) workspace.close_connection() def remove_objects(objects, all_versions=True, **kwargs): """Remove objects from the database. Parameters ---------- all_versions: boolean, optional If True, remove all versions of the object sharing the current head_id. """ if isinstance(objects, str): print('remove_objects() expects actual objects, use remove() to' 'remove objects by type.') workspace = Workspace.get_instance() workspace.remove_objects(objects, all_versions, **kwargs) workspace.close_connection() def store(objects, **kwargs): """Store Metatlas objects in the database. Parameters ---------- objects: Metatlas object or list of Metatlas Objects Object(s) to store in the database. """ workspace = Workspace.get_instance() workspace.save_objects(objects, **kwargs) workspace.close_connection() @set_docstring class MetatlasObject(HasTraits): name = MetUnicode('Untitled', help='Name of the object') description = MetUnicode('No description', help='Description of the object') unique_id = MetUnicode(help='Unique identifier for the object').tag(readonly=True) creation_time = MetInt(help='Unix timestamp at object creation').tag(readonly=True) username = MetUnicode(help='Username who created the object').tag(readonly=True) last_modified = MetInt(help='Unix timestamp at last object update').tag(readonly=True) prev_uid = MetUnicode(help='Unique id of previous version').tag(readonly=True) head_id = MetUnicode(help='Unique id of most recent version of this object').tag(readonly=True) _loopback_guard = MetBool(False).tag(readonly=True) _changed = MetBool(False).tag(readonly=True) def __init__(self, **kwargs): """Set the default attributes.""" logger.debug('Creating new instance of %s with parameters %s', self.__class__.__name__, kwargs) kwargs.setdefault('unique_id', uuid.uuid4().hex) kwargs.setdefault('head_id', kwargs['unique_id']) kwargs.setdefault('username', getpass.getuser()) kwargs.setdefault('creation_time', int(time.time())) kwargs.setdefault('last_modified', int(time.time())) super(MetatlasObject, self).__init__(**kwargs) self._changed = True self.observe(self._on_update, type='change') def _update(self, override_user=False): """Store the object in the workspace, including child objects. Child objects are stored in their own tables, and Lists of Child objects are captured in link tables. """ self._loopback_guard = True # see if we need to create a new object if not override_user and self.username != getpass.getuser(): self._changed = True self.prev_uid = self.unique_id self.unique_id = uuid.uuid4().hex self.head_id = self.unique_id self.username = getpass.getuser() self.last_modified = time.time() return True, None else: changed, prev_uid = self._changed, self.prev_uid if changed: self.last_modified = time.time() self.prev_uid = uuid.uuid4().hex self._changed = False self._loopback_guard = False return changed, prev_uid def clone(self, recursive=False): """Create a new version of this object. Parameters ---------- recursive: boolean, optional If true, clone all of the descendant objects as well. Returns ------- obj: MetatlasObject Cloned object. """ logger.debug('Cloning instance of %s with recursive=%s', self.__class__.__name__, recursive) obj = self.__class__() for (tname, trait) in self.traits().items(): if tname.startswith('_') or trait.metadata.get('readonly', False): continue val = getattr(self, tname) if recursive and isinstance(trait, MetList): val = [v.clone(True) for v in val] elif recursive and isinstance(trait, MetInstance) and val: val = val.clone(True) setattr(obj, tname, val) obj.prev_uid = self.unique_id obj.head_id = self.unique_id obj.unique_id = uuid.uuid4().hex return obj def show_diff(self, unique_id=None): """Show a diff of what has changed between this and previous version. Parameters ---------- unique_id: optional, string Unique id to compare against (defaults to current entry in db). """ if unique_id is None: unique_id = self.unique_id obj = retrieve(self.__class__.__name__, unique_id=unique_id) if len(obj) != 1: print('No change!') return obj = obj[0] msg = [] for (tname, trait) in self.traits().items(): if tname.startswith('_') or trait.metadata['readonly']: continue val = getattr(self, tname) other = getattr(obj, tname) if isinstance(trait, MetInstance): if val.unique_id != other.unique_id: msg.append((tname, other.unique_id, val.unique_id)) elif isinstance(trait, MetList): if not len(val) == len(other): msg.append((tname, '%s items' % len(other), '%s items' % len(val))) else: for (v, o) in zip(val, other): if not v.unique_id == o.unique_id: msg.append((tname, 'objects changed', '')) break elif val != other: msg.append((tname, str(other), str(val))) print((tabulate(msg))) def _on_update(self, change): """When the model changes, set the update fields. """ if self._loopback_guard or change['name'].startswith('_'): return self._changed = True def __str__(self): return self.__repr__() def __repr__(self): names = sorted(self.trait_names()) names.remove('name') names = ['name'] + [n for n in names if not n.startswith('_')] state = dict([(n, getattr(self, n)) for n in names]) state['creation_time'] = format_timestamp(self.creation_time) state['last_modified'] = format_timestamp(self.last_modified) return pprint.pformat(state) #str(state)# def __getattribute__(self, name): """Automatically resolve stubs on demand. """ # value = super(MetatlasObject, self).__getattribute__(name) value = super().__getattribute__(name) if isinstance(value, Stub) and FETCH_STUBS: value = value.retrieve() setattr(self, name, value) elif isinstance(value, list) and value and FETCH_STUBS: new = [] changed = False for subvalue in value: if isinstance(subvalue, Stub): new.append(subvalue.retrieve()) changed = True else: new.append(subvalue) if changed: setattr(self, name, new) value = new return value @set_docstring class Method(MetatlasObject): """ For each LCMS run, a Method is a consistent description of how the sample was prepared and LCMS data was collected. """ protocol_ref = MetUnicode(help='Reference to a published protocol: ' + 'identical to the protocol used.') quenching_method = MetUnicode(help='Description of the method used to ' + 'stop metabolism.') extraction_solvent = MetUnicode(help='Solvent or solvent mixture used to ' + 'extract metabolites.') reconstitution_method = MetUnicode(help='Solvent or solvent mixture ' + 'the extract is reconstituted in prior to injection for an LCMS Run.') mobile_phase_a = MetUnicode(help='Solvent or solvent mixture.') mobile_phase_b = MetUnicode(help='Solvent or solvent mixture.') temporal_parameters = MetUnicode('List of temporal changes to the' + 'mixing of mobile_phase_a and mobile_phase_b.') #Time = MetList() #Flow = MetList() #A_percent = MetList() #B_percent = MetList() #Chromatography Stack #Model #Serial Number #Modules column_model = MetUnicode(help='Brand and catalog number of the column') column_type = MetUnicode(help='Class of column used.') scan_mz_range = MetUnicode(help='Minimum and ' + 'maximum mz recorded for a run.') instrument = MetUnicode(help='Brand and catalog number for the ' + 'mass spectrometer.') ion_source = MetUnicode(help='Method for ionization.') mass_analyzer = MetUnicode(help='Method for detection.') polarity = MetEnum(POLARITY, 'positive', help='polarity for the run') @set_docstring class Sample(MetatlasObject): """A Sample is the material that is processed with a Method.""" pass # def load_lcms_files(mzml_files): # """Parse mzML files and load them into LcmsRun objects. # Note: This should be done automatically for runs in # /project/projectdirs/metatlas/raw_data/<username> # Parameters # ---------- # mzml_files: list of str # List of paths to mzml_files. # Returns # ------- # runs: list # List of LcmsRun objects. # """ # runs = [] # for fname in mzml_files: # hdf5_file = fname.replace('.mzML', '.h5') # if os.path.exists(hdf5_file): # print('File already exists: %s' % hdf5_file) # continue # try: # from metatlas import LcmsRun, mzml_to_hdf # hdf_file = mzml_to_hdf(fname) # user = getpwuid(os.stat(fname).st_uid).pw_name # filename = os.path.splitext(os.path.basename(fname))[0] # dirname = os.path.dirname(fname) # experiment = os.path.basename(dirname) # description = experiment + ' ' + filename # ctime = os.stat(fname).st_ctime # run = LcmsRun(name=filename, description=description, # created_by=user, # modified_by=user, # created=ctime, last_modified=ctime, # mzml_file=fname, hdf5_file=hdf_file) # runs.append(run) # except Exception as e: # print(e) # store(runs) # return runs @set_docstring class LcmsRun(MetatlasObject): """An LCMS run is the reference to a file prepared with liquid chromatography and mass spectrometry. The msconvert program is used to convert raw data (centroided is prefered) to mzML. Note: These objects are not intented to be created directly, but by putting the files in /project/projectdirs/metatlas/raw_data/<username> or by running `load_lcms_files()`. """ method = MetInstance(Method) experiment = MetUnicode(help='The name of the experiment') hdf5_file = MetUnicode(help='Path to the HDF5 file at NERSC') mzml_file = MetUnicode(help='Path to the MZML file at NERSC') acquisition_time = MetInt(help='Unix timestamp when data was acquired creation') injection_volume = MetFloat() injection_volume_units = MetEnum(('uL', 'nL'), 'uL') pass_qc = MetBool(help= 'True/False for if the LCMS Run has passed a quality control assessment') sample = MetInstance(Sample) @set_docstring class FunctionalSet(MetatlasObject): """Functional sets of compounds. For example set called "hexose" would include "glucose, galactose, etc". Functional sets can be sets-of-sets. "Sugars" would be a set that contains "Hexoses". """ enabled = MetBool(True) members = MetList(MetInstance(MetatlasObject)) @set_docstring class Compound(MetatlasObject): """A Compound is a structurally distinct entry. The majority of MetAtlas compounds are from a merge of WikiData, miBIG, HMDB, ChEBI, LipidMaps, MetaCyc, GNPS, ENZO-Library, MSMLS-Library. Compounds that had an unparseable structural identifier by RDKIT June, 2016, were ignored. Distinct molecules are found by inchi-key of neutralized and de-salted molecules. """ #name is inherited by all metatlas objects and is the most commonly used name for each compound #Description is a short text description of the compound iupac_name = MetUnicode(help='IUPAC International Chemical Identifier, optional') synonyms = MetUnicode() source=MetUnicode() chebi_id=MetUnicode() hmdb_id=MetUnicode() img_abc_id=MetUnicode() kegg_id=MetUnicode() lipidmaps_id=MetUnicode() metacyc_id=MetUnicode() pubchem_compound_id=MetUnicode() pubchem_url = MetUnicode(help='Reference database table url') wikipedia_url = MetUnicode(help='Reference database table url') kegg_url = MetUnicode(help='Reference database table url') hmdb_url = MetUnicode(help='Reference database table url') chebi_url = MetUnicode(help='Reference database table url') lipidmaps_url = MetUnicode(help='Reference database table url') #RDKIT Calculates these with some helper functions formula = MetUnicode() mono_isotopic_molecular_weight = MetFloat() permanent_charge = MetInt() number_components = MetInt(help='Must be one or greater') num_free_radicals = MetInt() inchi = MetUnicode() inchi_key = MetUnicode() neutralized_inchi = MetUnicode() neutralized_inchi_key = MetUnicode() neutralized_2d_inchi = MetUnicode() neutralized_2d_inchi_key = MetUnicode() #reference_xrefs = MetList(MetInstance(ReferenceDatabase), # help='Tag a compound with compound ids from ' + # 'external databases') #functional_sets = MetList(MetInstance(FunctionalSet)) @set_docstring class Reference(MetatlasObject): """Place holder for future reference sources. We expect many in silico methods will soon be robust enough to suggest retention times, m/z, and fragmentation. Pactolus is a great example of this. """ lcms_run = MetInstance(LcmsRun) enabled = MetBool(True) ref_type = MetUnicode(help='The type of reference') @set_docstring class IdentificationGrade(MetatlasObject): """ Each CompoundIdentification will have an identification_grade Identification Grades: 1) High intensity and verifiable by MSMS and RT authentic standard 2) Verifiable by MSMS from database or publication 3) Has fragment ion or neutral loss characteristic of a class of compounds 4) definitive chemical formula and adduct 5) Significant changing metabolite with MSMS suggestion from MIDAS 6) Significant changing metabolite 7) Not Significant changing metabolite with MSMS suggestion from MIDAS 8) Not Significant changing metabolite """ pass ID_GRADES: Dict[str, IdentificationGrade] = dict() class _IdGradeTrait(MetInstance): klass = IdentificationGrade def validate(self, obj, value): global ID_GRADES if not value: return if isinstance(value, self.klass): return value elif isinstance(value, str): if value.upper() in ID_GRADES: return ID_GRADES[value.upper()] objects = Workspace.get_instance().retrieve('identificationgrade', name=value.upper()) if objects: ID_GRADES[value.upper()] = objects[-1] return objects[-1] else: self.error(obj, value) else: self.error(obj, value) @set_docstring class Group(MetatlasObject): """A Group can be a: file, group of files, or group of groups """ items = MetList(MetInstance(MetatlasObject), help='Can contain other groups or LCMS Runs') short_name = MetUnicode() @set_docstring class MzIntensityPair(MetatlasObject): mz = MetFloat() intensity = MetFloat() @set_docstring class FragmentationReference(Reference): #This is specific for storing MS2 fragmentation spectra #A Fragmentation Tree will be added as a datatype when MS^n is deposited polarity = MetEnum(POLARITY, 'positive') precursor_mz = MetFloat() isolation_window = MetFloat(-1.0,help='width of the isolation window in Daltons') collision_energy = MetUnicode()#MetFloat() # adduct = MetEnum(ADDUCTS,'',help='Adduct') technique = MetEnum(FRAGMENTATION_TECHNIQUE,'cid') mz_intensities = MetList(MetInstance(MzIntensityPair), help='list of [mz, intesity] tuples that describe ' + ' a fragmentation spectra') @set_docstring class RtReference(Reference): rt_peak = MetFloat() rt_min = MetFloat() rt_max = MetFloat() rt_units = MetEnum(('sec', 'min'), 'sec') @set_docstring class MzReference(Reference): """Source of the assertion that a compound has a given m/z and other properties directly tied to m/z. """ mz = MetFloat() mz_tolerance = MetFloat() mz_tolerance_units = MetEnum(('ppm', 'Da'), 'ppm') detected_polarity = MetEnum(POLARITY, 'positive') adduct = MetEnum(ADDUCTS,'',help='Adduct') #add when needed: charge = MetFloat(help='the charge on the m/z feature') modification = MetUnicode(help='Optional modification') observed_formula = MetUnicode(help='Optional observed formula') @set_docstring class IntensityReference(Reference): """Source of the assertion that a compound has a given m/z and other properties directly tied to m/z. """ peak_height = MetFloat() peak_area = MetFloat() amount = MetFloat() amount_units = MetEnum(('nmol', 'mol'), 'nmol') @set_docstring class CompoundIdentification(MetatlasObject): """A CompoundIdentification links multiple sources of evidence about a compound's identity to an Atlas.""" compound = MetList(MetInstance(Compound)) identification_grade = _IdGradeTrait( help='Identification grade of the id (can be specified by a letter A-H' ) identification_notes = MetUnicode('', help='notes about this identifiation') ms1_notes = MetUnicode('', help='notes about ms1 peaks') ms2_notes = MetUnicode('', help='notes about ms2 matches') mz_references = MetList(MetInstance(MzReference)) rt_references = MetList(MetInstance(RtReference)) frag_references = MetList(MetInstance(FragmentationReference)) intensity_references = MetList(MetInstance(IntensityReference)) internal_standard_id = MetUnicode(help='Freetext identifier for an internal standard') do_normalization = MetBool(False) internal_standard_to_use = MetUnicode(help='identifier of which internal standard to normalize by') @set_docstring class Atlas(MetatlasObject): """An atlas contains many compound_ids.""" compound_identifications = MetList( MetInstance(CompoundIdentification), help='List of Compound Identification objects') # @set_docstring # class SampleSet(MetatlasObject): # lcmsruns = MetList(MetInstance(LcmsRun)) @set_docstring class MZMineTask(MetatlasObject): """ For a collection of lcms runs, perform untargeted analysis with a scriptable binary Store the run parameters in the database for reuse later """ lcmsruns = MetList(MetInstance(LcmsRun)) output_csv = MetUnicode(help='Path to the output csv file at NERSC') output_project = MetUnicode(help='Path to the output project file at NERSC') input_xml = MetUnicode(help='Path to the input xml file at NERSC') input_xml_text = MetUnicode(help='Text of the batch xml file') mz_tolerance = MetFloat(8.0) mz_tolerance_units = MetEnum(('ppm', 'Da'), 'ppm') polarity = MetEnum(POLARITY, 'positive') min_peak_duration = MetFloat(0.015) max_peak_duration = MetFloat(30) rt_tol_perfile = MetFloat(0.015) rt_tol_multifile = MetFloat(0.15) rt_units = MetEnum(('sec', 'min'), 'min') noise_floor = MetFloat(40000.0,help='Signals below this value are not considered') min_peak_height = MetFloat(100000.0,help='max of eic must be at least this big') mzmine_launcher = MetUnicode(help='Path to the shell script that launches mzmine file at NERSC') # @set_docstring # class PactolusTask(MetatlasObject): # """ # For an LCMS Run, search its msms spectra against pactolus trees # """ # lcmsrun = MetInstance(LcmsRun) # output_file = MetUnicode(help='Path to the output hdf5 file at NERSC') # input_file = MetUnicode(help='Path to the input hdf5 container file at NERSC') # mz_tol = MetFloat(help='mz tolerance in Daltons') # polarity = MetEnum(POLARITY, 'positive') # min_intensity = MetFloat(help='minimum precursor ion intensity') # pactolus_tree_directory = MetUnicode(help='Path to the directory containing pactolus trees at NERSC') # pactolus_launcher = MetUnicode(help='Path to the shell script that launches pactolus search at NERSC') def find_invalid_runs(**kwargs): """Find invalid runs. """ override = kwargs.pop('_override', False) if not override: kwargs.setdefault('username', getpass.getuser()) else: kwargs.setdefault('username', '*') all_runs = retrieve('lcmsruns', **kwargs) invalid = [] for run in all_runs: if (not os.path.exists(run.hdf5_file) or not os.path.exists(run.mzml_file) or not os.stat(run.hdf5_file).st_size): invalid.append(run) return invalid # Singleton Workspace object # Must be instantiated after all of the Metatlas Objects # are defined so we can get all of the subclasses. # workspace = Workspace() def to_dataframe(objects): """ Convert a set of Metatlas objects into a dataframe. """ global FETCH_STUBS # we want to handle dates, enums, and use ids for objects FETCH_STUBS = False objs = [o._trait_values.copy() for o in objects if o.__class__ == objects[0].__class__] if not objs: FETCH_STUBS = True return pd.DataFrame() FETCH_STUBS = True enums = [] cols = [] # remove lists and use strings for objects for (tname, trait) in objects[0].traits().items(): if tname.startswith('_') or tname in ['head_id', 'prev_uid']: continue cols.append(tname) if isinstance(trait, MetList): [o.__setitem__(tname, [i.unique_id for i in o[tname]]) for o in objs] elif isinstance(trait, MetInstance): for obj_id,o in enumerate(objs): if tname not in o: o[tname] = 'None' elif isinstance(trait, MetEnum): enums.append(tname) else: for obj_id,o in enumerate(objs): if tname not in o: o[tname] = 'None' dataframe = pd.DataFrame(objs)[sorted(cols)] # for col in enums: # dataframe[col] = dataframe[col].astype('category') for col in ['last_modified', 'creation_time']: dataframe[col] = pd.to_datetime(dataframe[col], unit='s') return dataframe
biorack/metatlas
metatlas/datastructures/metatlas_objects.py
Python
bsd-3-clause
26,604
[ "RDKit" ]
8a0212186e9019b1574ee380552ba73b577b56901f8b95c0247f7c5fd953844e
#! /usr/bin/env python import numpy as np import matplotlib.pyplot as plt from argparse import ArgumentParser import warnings warnings.filterwarnings("ignore") def loadfiletoarray(file): data=np.loadtxt(file, usecols=[0,1]) return data def assignbins(dim, args): minimum=float(dim[0]) maximum=float(dim[1]) if args.bw: bw=float(args.bw) else : bw = 5 bins=np.arange(minimum,(maximum+bw),bw) return bins def prephist(hist2, cb_max): hist2=.5961*np.log(hist2) ####Convert to free energy in Kcal/mol hist2=np.max(hist2)-hist2 ## zero value to lowest energy state ##remove infinity values from free energy plot temphist=hist2 #max value to set infinity values to is cb_max for y in range(len(temphist[0,:])): for x in range(len(temphist[:,0])): if np.isinf(temphist[x,y]): temphist[x,y]=cb_max return temphist ########### MAIN ############# def main(): args = cmdlineparse() inputfile=loadfiletoarray(args.input) length=inputfile[:,0] rows = len(length) if args.Xdim: binsX= assignbins(args.Xdim, args) else: binsX= assignbins([-180,180], args) if args.Ydim: binsY= assignbins(args.Ydim, args) else: binsY= assignbins([-180,180], args) ##HISTOGRAM EVERYTHING hist2, edgesX, edgesY = np.histogram2d(inputfile[:,0], inputfile[:,1], bins=(binsX, binsY)) cb_max=8 ## MAX VALUE TO SET ALL INFINITY VALUES AND TO SET THE COLORBAR TOO hist2=prephist(hist2, cb_max) cbar_ticks=[0, cb_max*.25, cb_max*.5, cb_max*.75, cb_max] plt.figure(1, figsize=(11,8.5)) extent = [edgesX[0], edgesX[-1], edgesY[-1], edgesY[0]] plt.imshow(hist2.transpose(), extent=extent, interpolation='gaussian') cb = plt.colorbar(ticks=cbar_ticks, format=('%.1f')) imaxes = plt.gca() plt.axes(cb.ax) plt.clim(vmin=0,vmax=cb_max) plt.yticks(fontsize=18) plt.axes(imaxes) axis=(min(binsX), max(binsX), min(binsY), max(binsY)) plt.axis(axis) plt.xticks(size='18') plt.yticks(size='18') plt.savefig('2D_Free_energy_surface.png', bbox_inches=0) plt.show() def cmdlineparse(): parser = ArgumentParser(description="command line arguments") parser.add_argument("-input", dest="input", required=True, help="2D input file", metavar="<2D input file>") parser.add_argument("-Xdim", dest="Xdim", required=False, nargs="+", help="Xdimensions", metavar="<Xmin Xmax >") parser.add_argument("-Ydim", dest="Ydim", required=False, nargs="+", help="Ydimension", metavar="<Ymin Ymax >") parser.add_argument("-bw", dest="bw", required=False, help="Binwidth", metavar="<Binwidth >") args=parser.parse_args() return args if __name__ == '__main__': main()
navjeet0211/phd
misc/2DFreeEnergyPlot.py
Python
gpl-2.0
2,821
[ "Gaussian" ]
5e40663707eb53a1ab6bce16c8d7b905543d3455ad02046eff461a58e5273fa3
"""Basic setuptools script for DIRACDocs.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import glob # Actual setuptools from setuptools import setup, find_packages # Find the base dir where the setup.py lies BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "diracdoctools")) # Take all the packages but the scripts and tests ALL_PACKAGES = find_packages(where=BASE_DIR, exclude=["*test*"]) PACKAGE_DIR = dict(("%s" % p, os.path.join(BASE_DIR, p.replace(".", "/"))) for p in ALL_PACKAGES) # We rename the packages so that they contain diracdoctools ALL_PACKAGES = ["diracdoctools.%s" % p for p in ALL_PACKAGES] ALL_PACKAGES.insert(0, "diracdoctools") PACKAGE_DIR["diracdoctools"] = BASE_DIR # The scripts to be distributed SCRIPTS = glob.glob("%s/scripts/*.py" % BASE_DIR) setup( name="diracdoctools", version="6.19.2", url="https://github.com/DIRACGRID/DIRAC/docs", license="GPLv3", package_dir=PACKAGE_DIR, packages=ALL_PACKAGES, scripts=SCRIPTS, install_requires=["sphinx_rtd_theme", "sphinx_panels"], )
ic-hep/DIRAC
docs/setup.py
Python
gpl-3.0
1,139
[ "DIRAC" ]
1e90ca7bd3e36315acf2553f1705010261df6c639932cdb974e81e6bcac6f635
# -*- coding: utf-8 -*- #! \file ./doit/support/visitnode.py #! \author Jiří Kučera, <sanczes@gmail.com> #! \stamp 2016-01-08 17:50:32 (UTC+01:00, DST+00:00) #! \project DoIt!: Tools and Libraries for Building DSLs #! \license MIT #! \version 0.0.0 #! \fdesc @pyfile.docstr # """\ Visitable nodes.\ """ __license__ = """\ Copyright (c) 2014 - 2017 Jiří Kučera. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\ """ from doit.support.errors import doit_assert, not_implemented from doit.support.utils import deep_eq _assert = doit_assert class VisitableNode(object): """ """ __slots__ = [] def __init__(self): """ """ pass #-def def __eq__(self, other): """ """ return isinstance(other, self.__class__) #-def def __ne__(self, other): """ """ return not self.__eq__(other) #-def def visit(self, f, *args): """ """ not_implemented() #-def def traverse(self, f, *args): """ """ not_implemented() #-def #-class class VisitableLeaf(VisitableNode): """ """ __slots__ = [ '__value' ] def __init__(self, value): """ """ VisitableNode.__init__(self) self.__value = value #-def def __eq__(self, other): """ """ return isinstance(other, self.__class__) \ and VisitableNode.__eq__(self, other) \ and deep_eq(self.__value, other.__value) #-def def __ne__(self, other): """ """ return not self.__eq__(other) #-def def visit(self, f, *args): """ """ return f(self, self.__value, *args) #-def def traverse(self, f, *args): """ """ return f(self, self.__value, *args) #-def #-class class NullaryVisitableNode(VisitableNode): """ """ __slots__ = [] def __init__(self): """ """ VisitableNode.__init__(self) #-def def __eq__(self, other): """ """ return isinstance(other, self.__class__) \ and VisitableNode.__eq__(self, other) #-def def __ne__(self, other): """ """ return not self.__eq__(other) #-def def visit(self, f, *args): """ """ return f(self, *args) #-def def traverse(self, f, *args): """ """ return f(self, *args) #-def #-class class UnaryVisitableNode(VisitableNode): """ """ __slots__ = [ '__node' ] def __init__(self, node): """ """ _assert(isinstance(node, VisitableNode), "Visitable node expected") VisitableNode.__init__(self) self.__node = node #-def def __eq__(self, other): """ """ return isinstance(other, self.__class__) \ and VisitableNode.__eq__(self, other) \ and deep_eq(self.__node, other.__node) #-def def __ne__(self, other): """ """ return not self.__eq__(other) #-def def visit(self, f, *args): """ """ r = self.__node.visit(f, *args) return f(self, r, *args) #-def def traverse(self, f, *args): """ """ return f(self, self.__node, *args) #-def #-class class BinaryVisitableNode(VisitableNode): """ """ __slots__ = [ '__node1', '__node2' ] def __init__(self, node1, node2): """ """ _assert(isinstance(node1, VisitableNode), "Visitable node expected") _assert(isinstance(node2, VisitableNode), "Visitable node expected") VisitableNode.__init__(self) self.__node1 = node1 self.__node2 = node2 #-def def __eq__(self, other): """ """ return isinstance(other, self.__class__) \ and VisitableNode.__eq__(self, other) \ and deep_eq(self.__node1, other.__node1) \ and deep_eq(self.__node2, other.__node2) #-def def __ne__(self, other): """ """ return not self.__eq__(other) #-def def visit(self, f, *args): """ """ r1 = self.__node1.visit(f, *args) r2 = self.__node2.visit(f, *args) return f(self, r1, r2, *args) #-def def traverse(self, f, *args): """ """ return f(self, self.__node1, self.__node2, *args) #-def #-class class TernaryVisitableNode(VisitableNode): """ """ __slots__ = [ '__node1', '__node2', '__node3' ] def __init__(self, node1, node2, node3): """ """ _assert(isinstance(node1, VisitableNode), "Visitable node expected") _assert(isinstance(node2, VisitableNode), "Visitable node expected") _assert(isinstance(node3, VisitableNode), "Visitable node expected") VisitableNode.__init__(self) self.__node1 = node1 self.__node2 = node2 self.__node3 = node3 #-def def __eq__(self, other): """ """ return isinstance(other, self.__class__) \ and VisitableNode.__eq__(self, other) \ and deep_eq(self.__node1, other.__node1) \ and deep_eq(self.__node2, other.__node2) \ and deep_eq(self.__node3, other.__node3) #-def def __ne__(self, other): """ """ return not self.__eq__(other) #-def def visit(self, f, *args): """ """ r1 = self.__node1.visit(f, *args) r2 = self.__node2.visit(f, *args) r3 = self.__node3.visit(f, *args) return f(self, r1, r2, r3, *args) #-def def traverse(self, f, *args): """ """ return f(self, self.__node1, self.__node2, self.__node3, *args) #-def #-class
i386x/doit
doit/support/visitnode.py
Python
mit
6,883
[ "VisIt" ]
d46685475b72f08e9899905c54154bcd0d7d1f217411aae0ec7e2fae3b7b2afa
from PIL import Image, ImageDraw, ImageFont import datetime import os from twilio.rest import TwilioRestClient from flask import Flask, request from . import app account = app.config['TWILIO_ACCOUNT'] token = app.config['TWILIO_TOKEN'] site_url = app.config['SITE_URL'] static_path = app.config['STATIC_PATH'] client = TwilioRestClient( account=account, token=token ) @app.route('/', methods=['GET', 'POST']) def send_image(): """ Sends the text and image back to the original sender. """ if request.method == 'GET': return 'The deployment worked! Now copy your browser URL into the' + \ ' Twilio message text box for your phone number.' sender_number = request.form.get('From', '') twilio_number = request.form.get('To', '') user_text = request.form.get('Body', '').strip().split()[0] image_url, msg_text = mod_photo(user_text) send_mms_twiml(image_url, msg_text, sender_number, twilio_number) return 'ok' def mod_photo(user_text): """ Modifies a base image to add the sender's text (ideally, their name) """ base = Image.open( static_path + 'static/images/original/place_kitten.jpeg' ).convert('RGBA') txt = Image.new('RGBA', base.size, (143, 83, 157, 0)) fnt = ImageFont.truetype( static_path + 'static/fonts/OpenSans-Bold.ttf', 86 ) d = ImageDraw.Draw(txt) base_w, base_h = (640, 1136) text_w, text_h = d.textsize(user_text, fnt) d.text( (((base_w-text_w)/2), 295), '{}'.format(user_text), font=fnt, fill=(143, 83, 157, 255) ) image = Image.alpha_composite(base, txt) image_time_stamp = datetime.datetime.now() image.save( static_path + 'static/images/changed/place_kitten_{}_{}.jpeg'.format( user_text, image_time_stamp.strftime('%y_%m_%d_%I%M%S') ) ) try: msg_text = ( "Thanks, {}. Hope you liked my kitten :) " "Visit {} to search for more kittens!".format( user_text, 'http://google.com' ) ) image_url = ( '{}static/images/changed/place_kitten_{}_{}.jpeg'.format( site_url, user_text, image_time_stamp.strftime('%y_%m_%d_%I%M%S') ) ) except: msg_text = "#ohno! We had trouble creating your image. " + \ "Here's a cute kitten instead!" image_url = "http://placekitten.com/g/640/1136" return image_url, msg_text def send_mms_twiml(image_url, msg_text, sender_number, twilio_number): """ Creates the actual message object. """ client.messages.create( to=sender_number, from_=twilio_number, body=msg_text, media_url=image_url )
patrickbeeson/text-me
app/views.py
Python
mit
2,842
[ "VisIt" ]
dc3e8a3f2233a30d116f7b4f2e9337b7d5f76a962059ebf0dd58c80421ce739d
import os import os.path def visit(arg, dirname, names): print dirname, arg for name in names: subname = os.path.join(dirname, name) if os.path.isdir(subname): print ' %s/' % name else: print ' %s' % name print os.mkdir('example') os.mkdir('example/one') f = open('example/one/file.txt', 'wt') f.write('contents') f.close() f = open('example/two.txt', 'wt') f.write('contents') f.close() os.path.walk('example', visit, '(User data)')
razzius/PyClassLessons
instructors/lessons/practical_utils/examples/os-path-walk.py
Python
mit
497
[ "VisIt" ]
caeb460af8704dc383d1becb1a39d1937190ea85d92807fe96a8649c249b6514
""" A model ensemble built by sampling from the posterior of a model using, presumably, MCMC. Author: Ilias Bilionis Date: 5/1/2015 """ __all__ = ['ModelEnsemble'] import numpy as np from GPy import Model from GPy.inference.mcmc import HMC from . import expected_improvement class ModelEnsemble(object): """ A collection of models. :param model: The underlying model. :param param_list: The parameter list representing samples from the posterior of the model. (2D numpy array, rows are samples, columns are parameters) :param w: The weights corresponding to each parameter. """ # The underlying model _model = None # List of parameters _param_list = None # Weight corresponding to each model (normalized) _w = None @property def model(self): """ :getter: Get the model. """ return self._model @model.setter def model(self, value): """ :setter: The model. """ assert isinstance(value, Model) self._model = value @property def param_list(self): """ :getter: Get the parameter list. """ return self._param_list @param_list.setter def param_list(self, value): """ :setter: Set the parameter list. """ assert isinstance(value, np.ndarray) assert value.ndim == 2 self._param_list = value @property def w(self): """ :getter: Get the weights. """ return self._w @w.setter def w(self, value): """ :setter: Set the weights. """ assert isinstance(value, np.ndarray) assert value.ndim == 1 assert np.all(value >= 0.) self._w = value / np.sum(value) @property def num_particles(self): """ :getter: Get the number of particles in the ensemble. """ return self.param_list.shape[0] def get_model(self, i): """ Get the model with index ``i``. """ self.model.unfixed_param_array[:] = self.param_list[i, :] return self.model def __init__(self, model, param_list, w=None): """ Initialize the object. """ self.model = model self.param_list = param_list if w is None: w = np.ones(param_list.shape[0]) self.w = w def posterior_mean_samples(self, X): """ Return samples of the posterior mean. """ Y = [] for i in xrange(self.num_particles): model = self.get_model(i) y = model.predict(X)[0] Y.append(y[:, 0]) Y = np.array(Y) return Y def posterior_samples(self, X, size=10): """ Draw samples from the posterior of the ensemble. """ Y = [] for i in xrange(self.num_particles): model = self.get_model(i) y = model.posterior_samples(X, size=size).T Y.append(y) Y = np.vstack(Y) idx = np.arange(Y.shape[0]) return Y[np.random.permutation(idx), :] def predict_quantiles(self, X, quantiles=(50, 2.5, 97.5), size=1000): """ Get the predictive quantiles. """ if self.num_particles == 1: tmp = self.get_model(0).predict_quantiles(X, quantiles=quantiles) return np.array(tmp)[:, :, 0] else: Y = self.posterior_samples(X, size=size) return np.percentile(Y, quantiles, axis=0) def raw_predict(self, X): """ Return the prediction of each model at ``X``. """ Y = [] V = [] for i in xrange(self.num_particles): y, v = self.get_model(i).predict(X) Y.append(y) V.append(v) Y = np.array(Y) V = np.array(V) return Y, V def predict(self, X, **kwargs): """ Predict using the ensemble at ``X``. :returns: tuple containing the media, 0.025 quantile, 0.095 quantile """ return self.predict_quantiles(X) def eval_afunc(self, X, func, args=()): """ Evaluate an acquisition function at X using all models. """ res = [] X_ns = [] # Locations of max/min M_ns = [] # Values of max/min for i in xrange(self.num_particles): r, i_n, m_n = func(X, self.get_model(i), *args) res.append(r) X_ns.append(i_n) M_ns.append(m_n) res = np.array(res) X_ns = np.array(X_ns) M_ns = np.array(M_ns) return np.average(res, axis=0, weights=self.w), X_ns, M_ns def expected_improvement(self, X, **kwargs): """ Compute the expected improvement. """ return self.eval_afunc(X, expected_improvement, **kwargs) @staticmethod def train(model, num_samples=0, thin=1, burn=0, num_restarts=10, **kwargs): """ Train a Gaussian process model. :param model: The model to optimize. :param num_restarts: The number of restarts when maximizing the posterior. :param num_samples: The number of samples from the posterior. If zero, then we construct a single particle approximation to the posterior. If greater than zero, then we sample from the posterior of the hyper-parameters using Hybrid MC. :param thin: The number of samples to skip. :param burn: The number of samples to burn. :param **kwargs: Any parameters of GPy.inference.mcmc.HMC """ model.optimize_restarts(num_restarts=num_restarts, verbose=False) if num_samples == 0: param_list = np.array(model.unfixed_param_array)[None, :] else: hmc = HMC(model) tmp = hmc.sample(num_samples, **kwargs) param_list = tmp[burn::thin, :] w = np.ones(param_list.shape[0]) return ModelEnsemble(model, param_list, w=w)
PredictiveScienceLab/inverse-bgo
pydes/_model_ensemble.py
Python
mit
6,184
[ "Gaussian" ]
6546077b22b309313a64ae66dfb879ef610ce94a75a217bdec00ef4609f5462b
import rmgpy.quantity as quantity import logging from rmgpy.species import Species from rmgpy.data.solvation import SolventData, SoluteData, SoluteGroups, SolvationDatabase from rmgpy.reaction import Reaction class DiffusionLimited(): def __init__(self): # default is false, enabled if there is a solvent self.enabled = False def enable(self, solventData, solvationDatabase, comment=''): # diffusionLimiter is enabled if a solvent has been added to the RMG object. logging.info("Enabling diffusion-limited kinetics...") diffusionLimiter.enabled = True diffusionLimiter.database = solvationDatabase diffusionLimiter.solventData = solventData def getSolventViscosity(self, T): return self.solventData.getSolventViscosity(T) def getEffectiveRate(self, reaction, T): """ Return the ratio of k_eff to k_intrinsic, which is between 0 and 1. It is 1.0 if diffusion has no effect. For 1<=>2 reactions, the reverse rate is limited. For 2<=>2 reactions, the faster direction is limited. For 2<=>1 or 2<=>3 reactions, the forward rate is limited. """ intrinsicKinetics = reaction.kinetics reactants = len(reaction.reactants) products = len(reaction.products) k_forward = intrinsicKinetics.getRateCoefficient(T,P=100e5) Keq = reaction.getEquilibriumConstant(T) # Kc k_reverse = k_forward / Keq k_eff = k_forward if reactants == 1: if products == 1: k_eff = k_forward else: # two products; reverse rate is limited k_diff = self.getDiffusionLimit(T, reaction, forward=False) k_eff_reverse = k_reverse*k_diff/(k_reverse+k_diff) k_eff = k_eff_reverse * Keq else: # 2 reactants if products == 1 or products == 3: k_diff = self.getDiffusionLimit(T, reaction, forward=True) k_eff = k_forward*k_diff/(k_forward+k_diff) else: # 2 products if Keq > 1.0: # forward rate is faster and thus limited k_diff = self.getDiffusionLimit(T, reaction, forward=True) k_eff = k_forward*k_diff/(k_forward+k_diff) else: # reverse rate is faster and thus limited k_diff = self.getDiffusionLimit(T, reaction, forward=False) k_eff_reverse = k_reverse*k_diff/(k_reverse+k_diff) k_eff = k_eff_reverse * Keq return k_eff def getDiffusionLimit(self, T, reaction, forward=True): """ Return the diffusive limit on the rate coefficient, k_diff. This is the upper limit on the rate, in the specified direction. (ie. forward direction if forward=True [default] or reverse if forward=False) """ if forward: reacting = reaction.reactants else: reacting = reaction.products assert len(reacting)==2, "Can only calculate diffusion limit in a bimolecular direction" radii = 0.0 diffusivities = 0.0 for spec in reacting: soluteData = self.database.getSoluteData(spec) # calculate radius with the McGowan volume and assuming sphere radius = ((75*soluteData.V/3.14159)**(1/3))/100 diff = soluteData.getStokesDiffusivity(T, self.getSolventViscosity(T)) radii += radius diffusivities += diff N_a = 6.022e23 # Avogadro's Number k_diff = 4*3.14159*radii*diffusivities*N_a return k_diff # module level variable. There should only ever be one. It starts off disabled diffusionLimiter = DiffusionLimited()
comocheng/RMG-Py
rmgpy/kinetics/diffusionLimited.py
Python
mit
3,807
[ "Avogadro" ]
2efe345f481806827a55c36a5b42470ea0760ff043adf6f0c7f0849ea986c0d9
# # Based on standard python library functions but avoid # repeated stat calls. Its assumed the files will not change from under us # so we can cache stat calls. # import os import errno import stat as statmod class CachedPath(object): def __init__(self): self.statcache = {} self.lstatcache = {} self.normpathcache = {} return def updatecache(self, x): x = self.normpath(x) if x in self.statcache: del self.statcache[x] if x in self.lstatcache: del self.lstatcache[x] def normpath(self, path): if path in self.normpathcache: return self.normpathcache[path] newpath = os.path.normpath(path) self.normpathcache[path] = newpath return newpath def _callstat(self, path): if path in self.statcache: return self.statcache[path] try: st = os.stat(path) self.statcache[path] = st return st except os.error: self.statcache[path] = False return False # We might as well call lstat and then only # call stat as well in the symbolic link case # since this turns out to be much more optimal # in real world usage of this cache def callstat(self, path): path = self.normpath(path) self.calllstat(path) return self.statcache[path] def calllstat(self, path): path = self.normpath(path) if path in self.lstatcache: return self.lstatcache[path] #bb.error("LStatpath:" + path) try: lst = os.lstat(path) self.lstatcache[path] = lst if not statmod.S_ISLNK(lst.st_mode): self.statcache[path] = lst else: self._callstat(path) return lst except (os.error, AttributeError): self.lstatcache[path] = False self.statcache[path] = False return False # This follows symbolic links, so both islink() and isdir() can be true # for the same path ono systems that support symlinks def isfile(self, path): """Test whether a path is a regular file""" st = self.callstat(path) if not st: return False return statmod.S_ISREG(st.st_mode) # Is a path a directory? # This follows symbolic links, so both islink() and isdir() # can be true for the same path on systems that support symlinks def isdir(self, s): """Return true if the pathname refers to an existing directory.""" st = self.callstat(s) if not st: return False return statmod.S_ISDIR(st.st_mode) def islink(self, path): """Test whether a path is a symbolic link""" st = self.calllstat(path) if not st: return False return statmod.S_ISLNK(st.st_mode) # Does a path exist? # This is false for dangling symbolic links on systems that support them. def exists(self, path): """Test whether a path exists. Returns False for broken symbolic links""" if self.callstat(path): return True return False def lexists(self, path): """Test whether a path exists. Returns True for broken symbolic links""" if self.calllstat(path): return True return False def stat(self, path): return self.callstat(path) def lstat(self, path): return self.calllstat(path) def walk(self, top, topdown=True, onerror=None, followlinks=False): # Matches os.walk, not os.path.walk() # We may not have read permission for top, in which case we can't # get a list of the files the directory contains. os.path.walk # always suppressed the exception then, rather than blow up for a # minor reason when (say) a thousand readable directories are still # left to visit. That logic is copied here. try: # Note that listdir and error are globals in this module due # to earlier import-*. names = os.listdir(top) except error, err: if onerror is not None: onerror(err) return dirs, nondirs = [], [] for name in names: if self.isdir(os.path.join(top, name)): dirs.append(name) else: nondirs.append(name) if topdown: yield top, dirs, nondirs for name in dirs: new_path = os.path.join(top, name) if followlinks or not self.islink(new_path): for x in self.walk(new_path, topdown, onerror, followlinks): yield x if not topdown: yield top, dirs, nondirs ## realpath() related functions def __is_path_below(self, file, root): return (file + os.path.sep).startswith(root) def __realpath_rel(self, start, rel_path, root, loop_cnt, assume_dir): """Calculates real path of symlink 'start' + 'rel_path' below 'root'; no part of 'start' below 'root' must contain symlinks. """ have_dir = True for d in rel_path.split(os.path.sep): if not have_dir and not assume_dir: raise OSError(errno.ENOENT, "no such directory %s" % start) if d == os.path.pardir: # '..' if len(start) >= len(root): # do not follow '..' before root start = os.path.dirname(start) else: # emit warning? pass else: (start, have_dir) = self.__realpath(os.path.join(start, d), root, loop_cnt, assume_dir) assert(self.__is_path_below(start, root)) return start def __realpath(self, file, root, loop_cnt, assume_dir): while self.islink(file) and len(file) >= len(root): if loop_cnt == 0: raise OSError(errno.ELOOP, file) loop_cnt -= 1 target = os.path.normpath(os.readlink(file)) if not os.path.isabs(target): tdir = os.path.dirname(file) assert(self.__is_path_below(tdir, root)) else: tdir = root file = self.__realpath_rel(tdir, target, root, loop_cnt, assume_dir) try: is_dir = self.isdir(file) except: is_dir = False return (file, is_dir) def realpath(self, file, root, use_physdir = True, loop_cnt = 100, assume_dir = False): """ Returns the canonical path of 'file' with assuming a toplevel 'root' directory. When 'use_physdir' is set, all preceding path components of 'file' will be resolved first; this flag should be set unless it is guaranteed that there is no symlink in the path. When 'assume_dir' is not set, missing path components will raise an ENOENT error""" root = os.path.normpath(root) file = os.path.normpath(file) if not root.endswith(os.path.sep): # letting root end with '/' makes some things easier root = root + os.path.sep if not self.__is_path_below(file, root): raise OSError(errno.EINVAL, "file '%s' is not below root" % file) try: if use_physdir: file = self.__realpath_rel(root, file[(len(root) - 1):], root, loop_cnt, assume_dir) else: file = self.__realpath(file, root, loop_cnt, assume_dir)[0] except OSError, e: if e.errno == errno.ELOOP: # make ELOOP more readable; without catching it, there will # be printed a backtrace with 100s of OSError exceptions # else raise OSError(errno.ELOOP, "too much recursions while resolving '%s'; loop in '%s'" % (file, e.strerror)) raise return file
PhiInnovations/mdp28-linux-bsp
openembedded-core/meta/lib/oe/cachedpath.py
Python
mit
8,107
[ "VisIt" ]
02bc2ed6e635299561d4bc94c1f2d03880f1f7cfbd4f1c4cbed800efd1df2990
from scipy.ndimage import filters from PIL import Image #from numpy import * from pylab import * def Gausian_response(img,sigma=1): """ Compute Gaussian response function for each pixel in a graylevel image. """ # Gausian response img_sigma = zeros(img.shape) filters.gaussian_filter(img, (sigma,sigma), (0,0), img_sigma) return img_sigma img = array(Image.open('img/graffiti.jpg').convert('L')) subplot(2,3,1) set_cmap('gray') imshow(img) title('original image') subplot(2,3,2) imshow(Gausian_response(img,2)-Gausian_response(img,1)) title('G2-G1') subplot(2,3,3) imshow(Gausian_response(img,3)-Gausian_response(img,2)) title('G3-G2') subplot(2,3,4) imshow(Gausian_response(img,4)-Gausian_response(img,3)) title('G4-G3') subplot(2,3,5) imshow(Gausian_response(img,5)-Gausian_response(img,4)) title('G5-G4') subplot(2,3,6) c=0.8 imshow(Gausian_response(img,6)-Gausian_response(img,5)) title('G6-G5') show()
wasit7/cs634
2016/lab3_varying_sigma.py
Python
bsd-2-clause
960
[ "Gaussian" ]
90839919b4ebd24784dfcb9cfbcc7bc26dd3d730ab7ba64127d9b26ff8240eba
"""Fit single Sersic 1-D profile""" import numpy as np from numpy.random import multivariate_normal import matplotlib.pyplot as plt from matplotlib import rcParams from matplotlib.gridspec import GridSpec from scipy.optimize import curve_fit import corner import emcee from kungpao.model.component import Sersic from kungpao.model.parameters import ProfileParams ORG = plt.get_cmap('OrRd') ORG_2 = plt.get_cmap('YlOrRd') BLU = plt.get_cmap('PuBu') plt.rcParams['figure.dpi'] = 100.0 plt.rc('text', usetex=True) rcParams.update({'axes.linewidth': 1.5}) rcParams.update({'xtick.direction': 'in'}) rcParams.update({'ytick.direction': 'in'}) rcParams.update({'xtick.minor.visible': 'True'}) rcParams.update({'ytick.minor.visible': 'True'}) rcParams.update({'xtick.major.pad': '7.0'}) rcParams.update({'xtick.major.size': '8.0'}) rcParams.update({'xtick.major.width': '1.5'}) rcParams.update({'xtick.minor.pad': '7.0'}) rcParams.update({'xtick.minor.size': '4.0'}) rcParams.update({'xtick.minor.width': '1.5'}) rcParams.update({'ytick.major.pad': '7.0'}) rcParams.update({'ytick.major.size': '8.0'}) rcParams.update({'ytick.major.width': '1.5'}) rcParams.update({'ytick.minor.pad': '7.0'}) rcParams.update({'ytick.minor.size': '4.0'}) rcParams.update({'ytick.minor.width': '1.5'}) rcParams.update({'axes.titlepad': '10.0'}) rcParams.update({'font.size': 25}) __all__ = ['lnlike_prof', 'norm_prof', 'config_params', 'ln_probability', 'prof_curvefit', 'update_params', 'reinitialize_ball_covar', 'emcee_fit_one_sersic', 'organize_results', 'plot_mcmc_corner', 'plot_mcmc_trace', 'display_model_1d'] def lnlike_prof(theta, rad, rho, err, min_r=6.0, max_r=120.0): """LnLikelihood of the model profile. Parameters ---------- theta: tuple or list A set of model parameters: [n, I0, Re] rad: list or 1-D array Radius array. rho: list or 1-D array Surface mass density profile. err: list or 1-D array Uncertainties of surface mass density profile. min_r: float, optional Minimal radii for fitting. Default=6.0 max_r: float, optional Maximal radii for fitting. Default=120.0 Returns ------- The ln(likelihood) of the model profile. """ params = list(theta) # Radial mask flag = (rad >= min_r) & (rad <= max_r) var = err ** 2 chi2 = (Sersic(rad, params[0], params[1], params[2]) - rho) ** 2 / var chi2 = chi2[flag].sum() return -0.5 * (chi2 + np.log(2 * np.pi * var[flag].sum())) def norm_prof(rad, rho, min_r=6.0, max_r=120): """Generate the normalized profile and get the fitting range for I0. Parameters ---------- rad: list or 1-D array Radius array. rho: list or 1-D array Surface mass density profile. min_r: float, optional Minimal radii for fitting. Default=6.0 max_r: float, optional Maximal radii for fitting. Default=120.0 Returns ------- norm: float Nomalization factor. i0_min: float Minimum fitting range for i0 i0_max: float Maximum fitting range for i0 """ # Radial mask flag = (rad >= min_r) & (rad <= max_r) # Normalization factor norm = np.nanmedian(rho[flag]) # Get the min and max range for the I0 rho_min = np.percentile(rho[flag], 84) rho_max = np.percentile(rho, 99.5) return norm, rho_min / norm, rho_max / norm def config_params(rad, rho, err, min_r=6.0, max_r=120.0, param_config=None, err_inflation=1.0): """Config the model parameters. Parameters ---------- rad: list or 1-D array Radius array. rho: list or 1-D array Surface mass density profile. err: list or 1-D array Uncertainties of surface mass density profile. min_r: float, optional Minimal radii for fitting. Default: 6.0 max_r: float, optional Maximal radii for fitting. Default: 120.0 param_config: dict, optional Dictionary for parameters. Default: None err_inflation: float, optional Factor to inflate the uncertainties. Default: 1.0 Returns ------- rho_norm: 1-D array Normalized surface density profile. err_norm: 1-D array Normalized uncertainty profile after error inflation. params: Parameters object Objects to deal with parameters and priors. """ # Normalization factor, (min, max) of I0 norm, i0_min, i0_max = norm_prof(rad, rho, min_r=min_r, max_r=max_r) # Get the normalized profiles rho_norm = np.asarray(rho).flatten() / norm err_norm = np.asarray(err).flatten() / norm * err_inflation if param_config is None: # Radial mask flag = (rad >= min_r) & (rad <= max_r) i0_ini = np.percentile(rho_norm, 90) i0_std = np.std(rho_norm[flag]) # Define a dict for model parameters # Right now we just assume very simple priors: # n: Sersic index, flat prior between 1.0 and 8.0; initial guess is 3.0 # I0: Central intensity, flat prior between the min and max range decided on normalized profile. # Re: Effective radius, flat prior between the min and max fitting range; initial guess is 10.0 # TODO: This should be replaced with more appropriate priors param_config = { 'n': { 'name': 'n', 'label':r'$n_{\rm Ser}$', 'ini': 3.0, 'min': 1.0, 'max': 8.0, 'type': 'flat', 'sig': 1.0 }, 'I0': { 'name': 'I0', 'label':r'$I_{0}$', 'ini': i0_ini, 'min': i0_min, 'max': i0_max, 'type': 'flat', 'sig': i0_std }, 'Re': { 'name': 'Re', 'label':r'$R_{\rm e}$', 'ini': 10., 'min': min_r, 'max': max_r, 'type': 'flat', 'sig': 20. } } params = ProfileParams(param_config) return rho_norm, err_norm, params def ln_probability(theta, params, rad, rho, err, nested=False): """Probability function to sample in an MCMC. Parameters ---------- theta: tuple or list One set of model parameters. params: asap.Parameters object Object for model parameters. rad: list or 1-D array Radius array. rho: list or 1-D array Surface mass density profile. err: list or 1-D array Uncertainties of surface mass density profile. nested: bool, optional Using dynamical nested sampling or not. Default:False. Returns ------- The ln(likelihood) of the model given the input parameters. """ ln_prior = params.lnprior(theta, nested=nested) if not np.isfinite(ln_prior): return -np.inf return ln_prior + lnlike_prof(theta, rad, rho, err) def prof_curvefit(func, rad, rho, err, params, min_r=6.0, max_r=120.0): """Get the best fit result using scipy.curvefit. Parameters ---------- func: function Functional form of the profile to fit. e.g. Sersic. params: asap.Parameters object Object for model parameters. rad: list or 1-D array Radius array. rho: list or 1-D array Surface mass density profile. err: list or 1-D array Uncertainties of surface mass density profile. min_r: float, optional Minimal radii for fitting. Default=6.0 max_r: float, optional Maximal radii for fitting. Default=120.0 Returns ------- best_curvefit: array Best-fit parameters from curvefit. cov_curvefit: array Covariance matrix of the parameters. """ flag = (rad >= min_r) & (rad <= max_r) best_curvefit, cov_curvefit = curve_fit( func, rad[flag], rho[flag], p0=params.get_ini(), bounds=(params.get_low(), params.get_upp()), sigma=err[flag], absolute_sigma=False) return best_curvefit, cov_curvefit def update_params(pbest, pcov, nsig=5.0): """Update the parameter constraint based on the best-fit result from curvefit. Parameters ---------- pbest: 1-D array Best-fit parameters from curvefit. pcov: 2-D array Covariance matrix of the best-fit parameters from curvefit. nsig: float, optional N-sigma value to define the fitting range of parameters. Default: 5. Returns ------- params_update: Parameters object Updated parameter constraints and priors. """ # Convert the covariance matrix into error of parameters using just the # diagonal terms perr = np.sqrt(np.diag(pcov)) param_config = { 'n': { 'name': 'n', 'label':r'$n_{\rm Ser}$', 'ini': pbest[0], 'min': pbest[0] - nsig * perr[0], 'max': pbest[0] + nsig * perr[0], 'type': 'flat', 'sig': perr[0] }, 'I0': { 'name': 'I0', 'label':r'$I_{0}$', 'ini': pbest[1], 'min': pbest[1] - nsig * perr[1], 'max': pbest[1] + nsig * perr[1], 'type': 'flat', 'sig': perr[1] }, 'Re': { 'name': 'Re', 'label':r'$R_{\rm e}$', 'ini': pbest[2], 'min': pbest[2] - nsig * perr[2], 'max': pbest[2] + nsig * perr[2], 'type': 'flat', 'sig': perr[2] } } return ProfileParams(param_config) def reinitialize_ball_covar(pos, prob, threshold=50.0, center=None, disp_floor=0.0, **extras): """Estimate the parameter covariance matrix from the positions of a fraction of the current ensemble and sample positions from the multivariate gaussian corresponding to that covariance matrix. If ``center`` is not given the center will be the mean of the (fraction of) the ensemble. :param pos: The current positions of the ensemble, ndarray of shape (nwalkers, ndim) :param prob: The current probabilities of the ensemble, used to reject some fraction of walkers with lower probability (presumably stuck walkers). ndarray of shape (nwalkers,) :param threshold: default 50.0 Float in the range [0,100] giving the fraction of walkers to throw away based on their ``prob`` before estimating the covariance matrix. :param center: optional The center of the multivariate gaussian. If not given or ``None``, then the center will be estimated from the mean of the postions of the acceptable walkers. ndarray of shape (ndim,) :param limits: optional An ndarray of shape (2, ndim) giving lower and upper limits for each parameter. The newly generated values will be clipped to these limits. If the result consists only of the limit then a vector of small random numbers will be added to the result. :returns pnew: New positions for the sampler, ndarray of shape (nwalker, ndim) Notes ----- This is from `prospect.fitting.ensemble` by Ben Johnson: https://github.com/bd-j/prospector/blob/master/prospect/fitting/ensemble.py """ pos = np.atleast_2d(pos) nwalkers = prob.shape[0] good = prob > np.percentile(prob, threshold) if center is None: center = pos[good, :].mean(axis=0) Sigma = np.cov(pos[good, :].T) Sigma[np.diag_indices_from(Sigma)] += disp_floor**2 pnew = resample_until_valid(multivariate_normal, center, Sigma, nwalkers, **extras) return pnew def clip_ball(pos, limits, disp): """Clip to limits. If all samples below (above) limit, add (subtract) a uniform random number (scaled by ``disp``) to the limit. """ npos = pos.shape[0] pos = np.clip(pos, limits[0], limits[1]) for i, p in enumerate(pos.T): u = np.unique(p) if len(u) == 1: tiny = disp[i] * np.random.uniform(0, disp[i], npos) if u == limits[0, i]: pos[:, i] += tiny if u == limits[1, i]: pos[:, i] -= tiny return pos def resample_until_valid(sampling_function, center, sigma, nwalkers, limits=None, maxiter=1e3, prior_check=None): """Sample from the sampling function, with optional clipping to prior bounds and resampling in the case of parameter positions that are outside complicated custom priors. :param sampling_function: The sampling function to use, it must have the calling sequence ``sampling_function(center, sigma, size=size)`` :param center: The center of the distribution :param sigma: Array describing the scatter of the distribution in each dimension. Can be two-dimensional, e.g. to describe a covariant multivariate normal (if the sampling function takes such a thing). :param nwalkers: The number of valid samples to produce. :param limits: (optional) Simple limits on the parameters, passed to ``clip_ball``. :param prior_check: (optional) An object that has a ``prior_product()`` method which returns the prior ln(probability) for a given parameter position. :param maxiter: Maximum number of iterations to try resampling before giving up and returning a set of parameter positions at least one of which is not within the prior. :returns pnew: New parameter positions, ndarray of shape (nwalkers, ndim) Notes ----- This is from `prospect.fitting.ensemble` by Ben Johnson: https://github.com/bd-j/prospector/blob/master/prospect/fitting/ensemble.py """ invalid = np.ones(nwalkers, dtype=bool) pnew = np.zeros([nwalkers, len(center)]) for i in range(int(maxiter)): # replace invalid elements with new samples tmp = sampling_function(center, sigma, size=invalid.sum()) pnew[invalid, :] = tmp if limits is not None: # clip to simple limits if sigma.ndim > 1: diag = np.diag(sigma) else: diag = sigma pnew = clip_ball(pnew, limits, diag) if prior_check is not None: # check the prior lnp = np.array([prior_check.lnprior(pos, nested=False) for pos in pnew]) invalid = ~np.isfinite(lnp) if invalid.sum() == 0: # everything is valid, return return pnew else: # No prior check, return on first iteration return pnew # reached maxiter, return whatever exists so far print("initial position resampler hit ``maxiter``") return pnew def emcee_fit_one_sersic(rad, rho, err, min_r=6.0, max_r=120.0, pool=None, n_walkers=128, n_burnin=100, n_samples=100, output=None, moves_burnin=None, moves_final=None, verbose=True): """Fit a single Sersic model to a 1-D profile. Parameters ---------- Returns ------- """ # 3 parameters for a single Sersic model n_dim = 3 # Decide the behaviour of the sampler if moves_burnin is None: moves_burnin = emcee.moves.DESnookerMove() if moves_final is None: moves_final = emcee.moves.StretchMove(a=4) # Normalize the input profile and uncertainty, decide the fitting range, and # setup the initial parameter ranges for fitting. rho_norm, err_norm, params = config_params( rad, rho, err, min_r=min_r, max_r=max_r) # Fit the Sersic profile using scipy.curvefit() to get the simple # best-fit parameters (pbest) and the associated covariance matrix (pcov) # The later can be used to estimate parameter errors. pbest, pcov = prof_curvefit(Sersic, rad, rho_norm, err_norm, params) if verbose: print("Best-fit Sersic parameters from curvefit:", pbest) print("Error of Sersic parameters from curvefit:", np.sqrt(np.diag(pcov))) # Update the parameter ranges based on the best-fit result params_update = update_params(pbest, pcov, nsig=5.0) # Initial postioins of each walker params_ini = params_update.sample(nsamples=n_walkers) # Parameter limits params_limits = np.array([params_update.low, params_update.upp]) # Config the ensemble sampler args = [params_update, rad, rho_norm, err_norm] sampler_burnin = emcee.EnsembleSampler( n_walkers, n_dim, ln_probability, moves=moves_burnin, args=args, pool=pool) # Run burn-in step if verbose: print("# Running burn-in step...") burnin_results = sampler_burnin.run_mcmc( params_ini, n_burnin, store=True, progress=True) burnin = organize_results( burnin_results, sampler_burnin, n_dim, output=None, verbose=verbose, frac=0.1) # Find best walker position burnin_pos, burnin_prob, _ = burnin_results burnin_best = sampler_burnin.flatlnprobability.argmax() # Get the new initial positions for walkers initial_center = sampler_burnin.flatchain[burnin_best, :] new_ini = reinitialize_ball_covar( burnin_pos, burnin_prob, center=initial_center, limits=params_limits, disp_floor=0.1, prior_check=params_update, threshold=30) # Config the ensemble sampler if verbose: print("# Running final sampling step...") sampler_final = emcee.EnsembleSampler( n_walkers, n_dim, ln_probability, moves=moves_final, args=args, pool=pool) # Run the final sampling step sample_results = sampler_final.run_mcmc( new_ini, n_samples, store=True, progress=True) # Organize results results = organize_results( sample_results, sampler_final, n_dim, output=output, verbose=verbose, frac=0.1) # Add in the curve-fit results results['best_curvefit'] = pbest results['cov_curvefit'] = pcov results['err_curvefit'] = np.sqrt(np.diag(pcov)) return results, burnin def samples_stats(samples): """1D marginalized parameter constraints.""" return map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]), zip(*np.percentile(samples, [16, 50, 84], axis=0))) def organize_results(results, sampler, ndims, output=None, verbose=True, frac=0.1): """Organize the MCMC run results. Parameters ---------- """ position, lnprob, _ = results samples = sampler.chain[:, :, :].reshape((-1, ndims)) chains = sampler.chain lnprob = sampler.lnprobability params_stats = samples_stats(samples) # Best parameter using the best log(prob) ind_1, ind_2 = np.unravel_index(np.argmax(lnprob, axis=None), lnprob.shape) best = chains[ind_2, ind_1, :] # Best parameters using the mean of the last few samples _, n_step, n_dim = chains.shape mean = np.nanmean( chains[:, -int(n_step * frac):, :].reshape([-1, n_dim]), axis=0) if output: np.savez(output, samples=samples, lnprob=np.array(lnprob), best=np.array(best), mean=np.asarray(mean), chains=chains, position=np.asarray(position), acceptance=np.array(sampler.acceptance_fraction)) if verbose: print("#------------------------------------------------------") print("# Mean acceptance fraction", np.mean(sampler.acceptance_fraction)) print("#------------------------------------------------------") print("# Best ln(Probability): %11.5f" % np.max(lnprob)) print(best) print("#------------------------------------------------------") print("# Best parameters (mean):") print(mean) print("#------------------------------------------------------") for param_stats in params_stats: print(param_stats) print("#------------------------------------------------------") return {'samples': samples, 'lnprob': np.array(lnprob), 'best': np.array(best), 'mean': np.asarray(mean), 'chains': chains, 'position': np.asarray(position), 'acceptance': np.array(sampler.acceptance_fraction) } def plot_mcmc_corner(mcmc_samples, mcmc_labels, fontsize=26, labelsize=20, **corner_kwargs): """Corner plots for MCMC samples.""" fig = corner.corner( mcmc_samples, bins=40, color=ORG(0.7), smooth=2, labels=mcmc_labels, label_kwargs={'fontsize': fontsize}, quantiles=[0.16, 0.5, 0.84], levels=[0.16, 0.50, 0.84], plot_contours=True, fill_contours=True, show_titles=True, title_kwargs={"fontsize": labelsize}, hist_kwargs={"histtype": 'stepfilled', "alpha": 0.5, "edgecolor": "none"}, use_math_text=True, **corner_kwargs ) return fig def plot_mcmc_trace(mcmc_chains, mcmc_labels, mcmc_best=None, figsize=None, mcmc_burnin=None, burnin_alpha=0.2, trace_alpha=0.2): """Traceplot for MCMC results.""" if figsize is None: if mcmc_burnin is not None: fig = plt.figure(figsize=(12, 15)) else: fig = plt.figure(figsize=(10, 15)) else: fig = plt.figure(figsize=figsize) fig.subplots_adjust(hspace=0.0, wspace=0.0, bottom=0.027, top=0.97, left=0.06, right=0.94) # I want the plot of individual walkers to span 2 columns nparam = len(mcmc_labels) if mcmc_burnin is not None: gs = GridSpec(nparam, 5) else: gs = GridSpec(nparam, 3) if mcmc_best is not None: assert len(mcmc_best) == len(mcmc_labels) for ii, param in enumerate(mcmc_labels): # Getthe chains from burn-in process and the final sampling process param_chain = mcmc_chains[:, :, ii] if mcmc_burnin is not None: param_burnin = mcmc_burnin[:, :, ii] # Get the range of Y-axis y_min = np.min([np.min(param_chain), np.min(param_burnin)]) y_max = np.max([np.max(param_chain), np.max(param_burnin)]) else: y_min = np.min(param_chain) y_max = np.max(param_chain) # Maximum variance of the walkers max_var = max(np.var(param_chain[:, :], axis=1)) # Trace plot if mcmc_burnin is None: ax1 = plt.subplot(gs[ii, :2]) else: ax1 = plt.subplot(gs[ii, 2:4]) ax1.yaxis.grid(linewidth=1.5, linestyle='--', alpha=0.5) for walker in param_chain: ax1.plot(np.arange(len(walker)), walker, alpha=trace_alpha, drawstyle="steps", color=ORG_2(1.0 - np.var(walker) / max_var)) if mcmc_burnin is None: ax1.set_ylabel(param, fontsize=28, labelpad=18, color='k') # Don't show ticks on the y-axis ax1.tick_params(labelleft=False) # For the plot on the bottom, add an x-axis label. Hide all others if ii != (nparam - 1): ax1.tick_params(labelbottom=False) else: for tick in ax1.xaxis.get_major_ticks(): tick.label.set_fontsize(20) # Posterior histograms ax2 = plt.subplot(gs[ii, -1]) ax2.grid(linewidth=1.5, linestyle='--', alpha=0.5) ax2.hist(np.ravel(param_chain[:, :]), bins=np.linspace(ax1.get_ylim()[0], ax1.get_ylim()[1], 100), orientation='horizontal', alpha=0.7, facecolor=ORG_2(0.9), edgecolor="none") ax1.set_xlim(1, len(walker)) ax1.set_ylim(y_min, y_max) ax2.set_ylim(ax1.get_ylim()) ax1.get_xaxis().tick_bottom() ax2.xaxis.set_visible(False) ax2.yaxis.tick_right() ax2.yaxis.set_label_position("right") for tick in ax2.yaxis.get_major_ticks(): tick.label.set_fontsize(20) if mcmc_best is not None: ax1.axhline(mcmc_best[ii], linestyle='--', linewidth=2, c=BLU(1.0), alpha=0.8) ax2.axhline(mcmc_best[ii], linestyle='--', linewidth=2, c=BLU(1.0), alpha=0.8) # Trace plot for burnin if mcmc_burnin is not None: param_burnin = mcmc_burnin[:, :, ii] ax3 = plt.subplot(gs[ii, :2]) ax3.yaxis.grid(linewidth=1.5, linestyle='--', alpha=0.5) for walker in param_burnin: ax3.plot(np.arange(len(walker)), walker, drawstyle="steps", color=BLU(np.var(walker) / max_var), alpha=burnin_alpha) ax3.set_ylabel(param, fontsize=25, labelpad=18, color='k') # Don't show ticks on the y-axis ax3.tick_params(labelleft=False) ax3.set_xlim(1, len(walker)) ax3.set_ylim(y_min, y_max) ax3.get_xaxis().tick_bottom() # For the plot on the bottom, add an x-axis label. Hide all others if ii != (nparam - 1): ax1.xaxis.set_visible(False) if mcmc_burnin is not None: ax3.xaxis.set_visible(False) else: if mcmc_burnin is not None: for tick in ax3.xaxis.get_major_ticks(): tick.label.set_fontsize(20) if ii == 0: t = ax1.set_title(r"$\mathrm{Sampling}$", fontsize=28, color='k') t.set_y(1.01) t = ax2.set_title(r"$\mathrm{Posterior}$", fontsize=28, color='k') t.set_y(1.01) if mcmc_burnin is not None: t = ax3.set_title(r"$\mathrm{Burnin}$", fontsize=28, color='k') t.set_y(1.01) return fig def visual_emcee(results, burnin=None, fontsize=20, alpha=0.3): """Visualize the emcee result.""" params_label = [r'$n_{\rm Ser}$', r'$I_{0}$', r'$R_{\rm e}$'] from matplotlib import rcParams rcParams.update({'font.size': 20}) mod_corner = plot_mcmc_corner( results['chains'].reshape([-1, 3]), params_label, truths=results['best_curvefit'], truth_color='skyblue', fontsize=26, labelsize=22, **{'title_fmt': '.2f', 'ranges': None, 'plot_datapoints': False}) mod_trace = plot_mcmc_trace( results['chains'], params_label, mcmc_best=results['best_curvefit'], mcmc_burnin=burnin['chains'], burnin_alpha=alpha, trace_alpha=alpha, figsize=(8, 6)) return mod_corner, mod_trace def display_model_1d( rad, rho, err, min_r=6.0, max_r=100.0, log_r=True, x_lim=None, y_lim=None, res_lim=None, info=None, info_pos=None, model=None, models=None, samples=None, model_label=r'$\rm Model$', models_label=None, normed=False): """Display 1-D profile """ fig = plt.figure(constrained_layout=False, figsize=(7, 6)) fig.subplots_adjust(left=0.0, right=1.0, bottom=0.0, top=1.0, wspace=0.00, hspace=0.00) gs = GridSpec(3, 3, figure=fig) # Compare the profile ax1 = fig.add_subplot(gs[0:2, :]) ax1.grid(linestyle='--', alpha=0.4, linewidth=2) ax1.set_xscale("log", nonposx='clip') ax1.set_yscale("log", nonposy='clip') ax1.axvline(min_r, linewidth=4.0, linestyle='--', c='k', alpha=0.5) ax1.axvline(max_r, linewidth=4.0, linestyle='--', c='k', alpha=0.5) # Data ax1.fill_between(rad, rho-err, rho+err, alpha=0.4, label='__no_label__') ax1.plot(rad, rho, linewidth=4.0, alpha=1.0, label=r'$\rm Data$') # Sample profiles if samples is not None: prof_3sig_low, prof_3sig_upp = np.percentile(samples, [0.3, 99.7], axis=0) ax1.fill_between( rad, prof_3sig_low, prof_3sig_upp, facecolor='grey', edgecolor='k', alpha=0.7, label=r'$3\-\sigma$') prof_low, prof_upp = np.percentile(samples, [16, 84], axis=0) ax1.fill_between( rad, prof_low, prof_upp, facecolor='orangered', edgecolor='orangered', alpha=0.7, label=r'$1\-\sigma$') # Model if model is not None: ax1.plot(rad, model, linestyle='--', linewidth=5.0, alpha=0.8, label=model_label) # Plot multiple models if models is not None: if models_label is None: models_label = ['__no_label__'] * len(models) else: assert len(models) == len(models_label), "Wrong size of labels!" for mod, lab in zip(models, models_label): ax1.plot(rad, mod, linestyle='--', linewidth=4.0, alpha=0.8, label=lab) if x_lim is not None: _ = ax1.set_xlim(x_lim) if y_lim is not None: _ = ax1.set_ylim(y_lim) if normed: _ = ax1.set_ylabel(r'$\rm Normalized\ Surface\ Intensity$', fontsize=26) else: _ = ax1.set_ylabel(r'$\rm Surface\ Intensity$', fontsize=26) ax1.legend(loc='best', fontsize=22) if info is not None: if info_pos is None: info_pos = [0.30, 0.22] else: info_pos = list(info_pos) ax1.text(info_pos[0], info_pos[1], info, transform=ax1.transAxes, fontsize=25) # Residual ax2 = fig.add_subplot(gs[2, :], sharex=ax1) ax2.grid(linestyle='--', alpha=0.4, linewidth=2) ax2.set_xscale("log", nonposx='clip') ax2.axvline(min_r, linewidth=4.0, linestyle='--', c='k', alpha=0.5) ax2.axvline(max_r, linewidth=4.0, linestyle='--', c='k', alpha=0.5) ax2.axhline(0.0, linewidth=4.0, linestyle='-', c='k', alpha=0.3) # Sample profiles if samples is not None: prof_3sig_low, prof_3sig_upp = np.percentile(samples, [0.3, 99.7], axis=0) ax2.fill_between( rad, (prof_3sig_low - rho) / rho, (prof_3sig_upp - rho) / rho, facecolor='grey', edgecolor='k',alpha=0.7, label=r'$3\-\sigma$') prof_low, prof_upp = np.percentile(samples, [16, 84], axis=0) ax2.fill_between( rad, (prof_low - rho) / rho, (prof_upp - rho) / rho, facecolor='orangered', edgecolor='orangered', alpha=0.7, label=r'$1\-\sigma$') # Model if model is not None: ax2.plot(rad, (model - rho) / rho, linewidth=5.0, alpha=0.7) # Multiple models if models is not None: for mod in models: ax2.plot(rad, (mod - rho) / rho, linestyle='--', linewidth=4.0, alpha=0.8) if res_lim is not None: _ = ax2.set_ylim(res_lim) _ = ax2.set_xlabel(r'$\rm Radius$', fontsize=30) _ = ax2.set_ylabel(r'$\rm Residual$', fontsize=26)
dr-guangtou/KungPao
kungpao/model/sersic_1d.py
Python
gpl-3.0
30,330
[ "Gaussian" ]
c6ff230ef53a3f1d5f0133cdcf502872d59103f8ccb864e6c963730c4f084f4c
#!/usr/bin/env python ################################################## ## DEPENDENCIES import sys import os import os.path try: import builtins as builtin except ImportError: import __builtin__ as builtin from os.path import getmtime, exists import time import types from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple from Cheetah.Template import Template from Cheetah.DummyTransaction import * from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList from Cheetah.CacheRegion import CacheRegion import Cheetah.Filters as Filters import Cheetah.ErrorCatchers as ErrorCatchers ################################################## ## MODULE CONSTANTS VFFSL=valueFromFrameOrSearchList VFSL=valueFromSearchList VFN=valueForName currentTime=time.time __CHEETAH_version__ = '2.4.4' __CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0) __CHEETAH_genTime__ = 1364979192.324919 __CHEETAH_genTimestamp__ = 'Wed Apr 3 17:53:12 2013' __CHEETAH_src__ = '/home/fermi/Work/Model/tmsingle/openpli3.0/build-tmsingle/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-0.1+git1+279a2577c3bc6defebd4bf9e61a046dcf7f37c01-r0.72/git/plugin/controllers/views/web/mediaplayerwrite.tmpl' __CHEETAH_srcLastModified__ = 'Wed Apr 3 17:10:17 2013' __CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine' if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple: raise AssertionError( 'This template was compiled with Cheetah version' ' %s. Templates compiled before version %s must be recompiled.'%( __CHEETAH_version__, RequiredCheetahVersion)) ################################################## ## CLASSES class mediaplayerwrite(Template): ################################################## ## CHEETAH GENERATED METHODS def __init__(self, *args, **KWs): super(mediaplayerwrite, self).__init__(*args, **KWs) if not self._CHEETAH__instanceInitialized: cheetahKWArgs = {} allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split() for k,v in KWs.items(): if k in allowedKWs: cheetahKWArgs[k] = v self._initCheetahInstance(**cheetahKWArgs) def respond(self, trans=None): ## CHEETAH: main method generated for this template if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)): trans = self.transaction # is None unless self.awake() was called if not trans: trans = DummyTransaction() _dummyTrans = True else: _dummyTrans = False write = trans.response().write SL = self._CHEETAH__searchList _filter = self._CHEETAH__currentFilter ######################################## ## START - generated method body _orig_filter_98117577 = _filter filterName = u'WebSafe' if self._CHEETAH__filters.has_key("WebSafe"): _filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName] else: _filter = self._CHEETAH__currentFilter = \ self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter write(u'''<?xml version="1.0" encoding="UTF-8"?> <e2simplexmlresult> \t<e2state>''') _v = VFFSL(SL,"result",True) # u'$result' on line 4, col 11 if _v is not None: write(_filter(_v, rawExpr=u'$result')) # from line 4, col 11. write(u'''</e2state> \t<e2statetext>''') _v = VFFSL(SL,"message",True) # u'$message' on line 5, col 15 if _v is not None: write(_filter(_v, rawExpr=u'$message')) # from line 5, col 15. write(u'''</e2statetext> </e2simplexmlresult> ''') _filter = self._CHEETAH__currentFilter = _orig_filter_98117577 ######################################## ## END - generated method body return _dummyTrans and trans.response().getvalue() or "" ################################################## ## CHEETAH GENERATED ATTRIBUTES _CHEETAH__instanceInitialized = False _CHEETAH_version = __CHEETAH_version__ _CHEETAH_versionTuple = __CHEETAH_versionTuple__ _CHEETAH_genTime = __CHEETAH_genTime__ _CHEETAH_genTimestamp = __CHEETAH_genTimestamp__ _CHEETAH_src = __CHEETAH_src__ _CHEETAH_srcLastModified = __CHEETAH_srcLastModified__ _mainCheetahMethod_for_mediaplayerwrite= 'respond' ## END CLASS DEFINITION if not hasattr(mediaplayerwrite, '_initCheetahAttributes'): templateAPIClass = getattr(mediaplayerwrite, '_CHEETAH_templateClass', Template) templateAPIClass._addCheetahPlumbingCodeToClass(mediaplayerwrite) # CHEETAH was developed by Tavis Rudd and Mike Orr # with code, advice and input from many other volunteers. # For more information visit http://www.CheetahTemplate.org/ ################################################## ## if run from command line: if __name__ == '__main__': from Cheetah.TemplateCmdLineIface import CmdLineIface CmdLineIface(templateObj=mediaplayerwrite()).run()
pli3/Openwebif
plugin/controllers/views/web/mediaplayerwrite.py
Python
gpl-2.0
5,275
[ "VisIt" ]
aab057f6004d3461fb3d8672a75a47de478be78acfa1ed9ab5c3f8d11370e283
"""Implements the hashids algorithm in python. For more information, visit http://hashids.org/""" import warnings from functools import wraps from math import ceil __version__ = '1.3.1' RATIO_SEPARATORS = 3.5 RATIO_GUARDS = 12 try: StrType = basestring except NameError: StrType = str def _is_str(candidate): """Returns whether a value is a string.""" return isinstance(candidate, StrType) def _is_uint(number): """Returns whether a value is an unsigned integer.""" try: return number == int(number) and number >= 0 except ValueError: return False def _split(string, splitters): """Splits a string into parts at multiple characters""" part = '' for character in string: if character in splitters: yield part part = '' else: part += character yield part def _hash(number, alphabet): """Hashes `number` using the given `alphabet` sequence.""" hashed = '' len_alphabet = len(alphabet) while True: hashed = alphabet[number % len_alphabet] + hashed number //= len_alphabet if not number: return hashed def _unhash(hashed, alphabet): """Restores a number tuple from hashed using the given `alphabet` index.""" number = 0 len_alphabet = len(alphabet) for character in hashed: position = alphabet.index(character) number *= len_alphabet number += position return number def _reorder(string, salt): """Reorders `string` according to `salt`.""" len_salt = len(salt) if len_salt != 0: string = list(string) index, integer_sum = 0, 0 for i in range(len(string) - 1, 0, -1): integer = ord(salt[index]) integer_sum += integer j = (integer + index + integer_sum) % i string[i], string[j] = string[j], string[i] index = (index + 1) % len_salt string = ''.join(string) return string def _index_from_ratio(dividend, divisor): """Returns the ceiled ratio of two numbers as int.""" return int(ceil(float(dividend) / divisor)) def _ensure_length(encoded, min_length, alphabet, guards, values_hash): """Ensures the minimal hash length""" len_guards = len(guards) guard_index = (values_hash + ord(encoded[0])) % len_guards encoded = guards[guard_index] + encoded if len(encoded) < min_length: guard_index = (values_hash + ord(encoded[2])) % len_guards encoded += guards[guard_index] split_at = len(alphabet) // 2 while len(encoded) < min_length: alphabet = _reorder(alphabet, alphabet) encoded = alphabet[split_at:] + encoded + alphabet[:split_at] excess = len(encoded) - min_length if excess > 0: from_index = excess // 2 encoded = encoded[from_index:from_index+min_length] return encoded def _encode(values, salt, min_length, alphabet, separators, guards): """Helper function that does the hash building without argument checks.""" len_alphabet = len(alphabet) len_separators = len(separators) values_hash = sum(x % (i + 100) for i, x in enumerate(values)) encoded = lottery = alphabet[values_hash % len(alphabet)] for i, value in enumerate(values): alphabet_salt = (lottery + salt + alphabet)[:len_alphabet] alphabet = _reorder(alphabet, alphabet_salt) last = _hash(value, alphabet) encoded += last value %= ord(last[0]) + i encoded += separators[value % len_separators] encoded = encoded[:-1] # cut off last separator return (encoded if len(encoded) >= min_length else _ensure_length(encoded, min_length, alphabet, guards, values_hash)) def _decode(hashid, salt, alphabet, separators, guards): """Helper method that restores the values encoded in a hashid without argument checks.""" parts = tuple(_split(hashid, guards)) hashid = parts[1] if 2 <= len(parts) <= 3 else parts[0] if not hashid: return lottery_char = hashid[0] hashid = hashid[1:] hash_parts = _split(hashid, separators) for part in hash_parts: alphabet_salt = (lottery_char + salt + alphabet)[:len(alphabet)] alphabet = _reorder(alphabet, alphabet_salt) yield _unhash(part, alphabet) def _deprecated(func, name): """A decorator that warns about deprecation when the passed-in function is invoked.""" @wraps(func) def with_warning(*args, **kwargs): warnings.warn( ('The %s method is deprecated and will be removed in v2.*.*' % name), DeprecationWarning ) return func(*args, **kwargs) return with_warning class Hashids(object): """Hashes and restores values using the "hashids" algorithm.""" ALPHABET = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890' def __init__(self, salt='', min_length=0, alphabet=ALPHABET): """ Initializes a Hashids object with salt, minimum length, and alphabet. :param salt: A string influencing the generated hash ids. :param min_length: The minimum length for generated hashes :param alphabet: The characters to use for the generated hash ids. """ self._min_length = max(int(min_length), 0) self._salt = salt separators = ''.join(x for x in 'cfhistuCFHISTU' if x in alphabet) alphabet = ''.join(x for i, x in enumerate(alphabet) if alphabet.index(x) == i and x not in separators) len_alphabet, len_separators = len(alphabet), len(separators) if len_alphabet + len_separators < 16: raise ValueError('Alphabet must contain at least 16 ' 'unique characters.') separators = _reorder(separators, salt) min_separators = _index_from_ratio(len_alphabet, RATIO_SEPARATORS) number_of_missing_separators = min_separators - len_separators if number_of_missing_separators > 0: separators += alphabet[:number_of_missing_separators] alphabet = alphabet[number_of_missing_separators:] len_alphabet = len(alphabet) alphabet = _reorder(alphabet, salt) num_guards = _index_from_ratio(len_alphabet, RATIO_GUARDS) if len_alphabet < 3: guards = separators[:num_guards] separators = separators[num_guards:] else: guards = alphabet[:num_guards] alphabet = alphabet[num_guards:] self._alphabet = alphabet self._guards = guards self._separators = separators # Support old API self.decrypt = _deprecated(self.decode, "decrypt") self.encrypt = _deprecated(self.encode, "encrypt") def encode(self, *values): """Builds a hash from the passed `values`. :param values The values to transform into a hashid >>> hashids = Hashids('arbitrary salt', 16, 'abcdefghijkl0123456') >>> hashids.encode(1, 23, 456) '1d6216i30h53elk3' """ if not (values and all(_is_uint(x) for x in values)): return '' return _encode(values, self._salt, self._min_length, self._alphabet, self._separators, self._guards) def decode(self, hashid): """Restore a tuple of numbers from the passed `hashid`. :param hashid The hashid to decode >>> hashids = Hashids('arbitrary salt', 16, 'abcdefghijkl0123456') >>> hashids.decode('1d6216i30h53elk3') (1, 23, 456) """ if not hashid or not _is_str(hashid): return () try: numbers = tuple(_decode(hashid, self._salt, self._alphabet, self._separators, self._guards)) return numbers if hashid == self.encode(*numbers) else () except ValueError: return () def encode_hex(self, hex_str): """Converts a hexadecimal string (e.g. a MongoDB id) to a hashid. :param hex_str The hexadecimal string to encodes >>> Hashids.encode_hex('507f1f77bcf86cd799439011') 'y42LW46J9luq3Xq9XMly' """ numbers = (int('1' + hex_str[i:i+12], 16) for i in range(0, len(hex_str), 12)) try: return self.encode(*numbers) except ValueError: return '' def decode_hex(self, hashid): """Restores a hexadecimal string (e.g. a MongoDB id) from a hashid. :param hashid The hashid to decode >>> Hashids.decode_hex('y42LW46J9luq3Xq9XMly') '507f1f77bcf86cd799439011' """ return ''.join(('%x' % x)[1:] for x in self.decode(hashid))
davidaurelio/hashids-python
hashids.py
Python
mit
8,790
[ "VisIt" ]
6cf0fe3b35b5cc10f8fa708a05c6453789ea7ecaef15676e3538aff133a9d74f
from uuid import uuid4 from collections.abc import Sequence from enum import IntEnum from random import randint, randrange from math import atan2, sqrt, pi import sys class Game_States(IntEnum): MAIN_MENU = 0 NEW_GAME = 1 IN_GAME = 2 OPTIONS = 3 HISTORY = 4 class LOS_Shape(IntEnum): EUCLID = 0 SQUARE = 1 COLOR = { 'blue': '#89CCEE', 'purple': '#332288', 'turqoise': '#44AA99', 'green': '#117733', 'brown': '#999933', 'yellow': '#DDCC77', 'orange': '#CC6677', 'red': '#882255', 'pink': '#AA4499', 'white': '#EEEEEE', 'black': '#000000', 'grey': '#191919', } class Thing: def __init__(self, x, y, glyph, color, physical, visible=True): self.x = x self.y = y self.glyph = glyph if type(color) is 'str': self.color = COLOR[color] else: self.color = color self.physical = physical self.visible = visible def __eq__(self, other): return ((self.glyph, self.color, self.physical, self.visible) == (other.glyph, other.color, other.physical, other.visible)) def build_char(self, within_fov): if within_fov: bkcolor = "" else: bkcolor = "[bkcolor={}]".format(COLOR['grey']) elements = [bkcolor, "[color={}]".format(self.color), self.glyph] self.char = "".join(e for e in elements) class Actor(Thing): def __init__(self, world, name, x, y, glyph, color, physical, visible=True, max_health=None, cur_health=None, max_mana=None, cur_mana=None, attack=None, defense=None, apparel=None): # Engine Stats Thing.__init__(self, x, y, glyph, color, physical) self.name = name self.visible = visible self.inventory = [] self.uuid = uuid4() # Generic Stats self.max_health = max_health self.cur_health = cur_health or max_health self.max_mana = max_mana self.cur_mana = cur_mana or max_mana self.attack = attack self.defense = defense self.base_radius = 8 self.radius = self.base_radius self.los_shape = LOS_Shape.SQUARE self.fog_toggle = True self.viewed_map = set() self.apparel = set() # Finalization Methods world.register(self) def change_los(self): if self.los_shape < len(LOS_Shape) - 1: self.los_shape = LOS_Shape(self.los_shape + 1) else: self.los_shape = LOS_Shape(0) if self.los_shape == LOS_Shape.EUCLID: self.radius = int(sqrt(((self.base_radius * 2) ** 2) / pi)) else: self.radius = self.base_radius def build_char(self, within_fov): super().build_char(within_fov) apparel = "[+]" + [x for x in self.apparel] if self.apparel else "" self.char = "".join((self.char, apparel)) def move(self, world, tx, ty): tick, moved = world.move_actor(self, tx, ty) if moved: self.x += tx self.y += ty return tick def place(self, start_loc): self.x, self.y = start_loc def adjacent(self, world): adjacent = [] for x in range(self.x - 1, self.x + 2): for y in range(self.y - 1, self.y + 2): if x == self.x and y == self.y: continue else: adjacent.append((x, y)) return adjacent class Item(Thing): def __init__(self, name, x, y, glyph, color, physical): Thing.__init__(self, x, y, glyph, color, physical) self.name = name self.uuid = uuid4() class Prop(Thing): def __init__(self, x, y, glyph, color, physical): Thing.__init__(self, x, y, glyph, color, physical) self.is_door = False self.door_status = False self.uuid = uuid4() def update(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) class Tile(Thing): def __init__(self, x, y, glyph, color, bkcolor, physical): Thing.__init__(self, x, y, glyph, color, physical) self.bkcolor = bkcolor self.occupied = None self.prop = None self.item = None self.uuid = uuid4() def __eq__(self, other): return ((self.glyph, self.color, self.physical, self.door[0]) == (other.glyph, other.color, other.physical, other.door[0])) def __str__(self): return "x: {}, " "y: {}, " "glyph: {}, " "char: {}, " "color: {}, " "physical: {}, " "occupied: {}, " "prop: {}, " "item: {}, " "uuid: {}".format(self.x, self.y, self.glyph, self.char, self.color, self.physical, self.occupied, self.prop, self.item, self.uuid) def update(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def build_door(self): self.update(glyph='.', physical=False) self.prop = Prop(x=self.x, y=self.y, glyph='+', color=COLOR['white'], physical=True) self.prop.update(is_door=True, door_status=True) def check_door(self): if self.prop and self.prop.is_door: return True def toggle_door(self): self.prop.door_status = not self.prop.door_status self.prop.glyph = ["-", "+"][self.prop.door_status] self.prop.physical = not self.prop.physical def build_char(self, fov_map, fog_toggle=True): # Only for debugging purposes. In production, this won't be accessible. if fog_toggle: if (self.x, self.y) in fov_map: within_fov = True else: within_fov = False else: within_fov = True if self.occupied: self.occupied.build_char(within_fov) elif self.item: self.item.build_char(within_fov) elif self.prop: self.prop.build_char(within_fov) else: super().build_char(within_fov) if within_fov: bkcolor = "[bkcolor={}]".format(self.bkcolor) else: bkcolor = "[bkcolor={}]".format(COLOR['grey']) color = "[color={}]".format(self.color) elements = [bkcolor, color, self.glyph] self.char = "".join(e for e in elements) class Map(Sequence): def __init__(self, name, width, height, min_rooms, max_rooms, num_exits, level, region): self.name = name self.width = width self.height = height self.min_rooms = min_rooms self.max_rooms = max_rooms self.num_exits = num_exits self.level = level self.layout = [] self.fov_map = set() self.rooms = [] self.passages = [] self.region = region self.start_loc = None self.generate_map(width, height, num_exits) def __getitem__(self, key): return self.layout[key] def __len__(self): return self.width * self.height def __contains__(self, item): for tiles in self.layout: for tile in tiles: if isinstance(item, Tile): if item == tile: return True elif isinstance(item, Actor): if item == tile.occupied: return True elif isinstance(item, Item): if item == tile.item: return True elif isinstance(item, Prop): if item == tile.prop: return True return False def register(self, actor): self[actor.x][actor.y].occupied = actor def move_actor(self, actor, tx, ty): dx, dy = actor.x + tx, actor.y + ty if tx == 0 and ty == 0: return (True, False) if not (dx >= self.width) or (dy >= self.height): if not self[dx][dy].physical: if self[dx][dy].check_door() and self[dx][dy].prop.door_status: self[dx][dy].toggle_door() return (True, False) elif not self[dx][dy].occupied: self[actor.x][actor.y].occupied = None self[dx][dy].occupied = actor return (True, True) return (False, False) def line(self, x0, y0, x1, y1, halt=False): """Bresenham's line algorithm""" dx = abs(x1 - x0) dy = abs(y1 - y0) x, y = x0, y0 sx = -1 if x0 > x1 else 1 sy = -1 if y0 > y1 else 1 plot = [] if dx > dy: err = dx / 2.0 if halt: while x != x1: plot.append((x, y)) err -= dy if err < 0: y += sy err += dx x += sx else: while 0 <= x < self.width: plot.append((x, y)) err -= dy if err < 0: y += sy err += dx x += sx else: err = dy / 2.0 if halt: while y != y1: plot.append((x, y)) err -= dx if err < 0: x += sx err += dy y += sy else: while 0 <= y < self.height: plot.append((x, y)) err -= dx if err < 0: x += sx err += dy y += sy plot.append((x, y)) return plot def calculate_fov(self, actor): """ Adapted from http://ncase.me/sight-and-light/ Notable changes: Instead of building a polygon from all possible corners, instead build from intersecting elements at the edge of the radius of line-of-sight. Unchanged is the construction of a polygon for the boundary and filling it in using point-in-polygon. """ """ Below is the previous version of the code. I have it in git, but I want it available if I decide to reuse it for some reason. corners = [] wall_points = [] for room in self.rooms: wall_points.extend(room.wall_points) for corner in room.corners: if corner not in corners: corners.append(corner) lines = [] for corner in corners: lines.append(self.line(actor.x, actor.y corner[0], corner[1], True)) # if more x than y, it's to the left and right # add one above and below if abs(corner[0] - actor.x) > abs(corner[1] - actor.y): lines.append(self.line(actor.x, actor.y, corner[0], corner[1] - 1)) lines.append(self.line(actor.x, actor.y, corner[0], corner[1] + 1)) # otherwise, it's more y than x, so add one left and right else: lines.append(self.line(actor.x, actor.y corner[0] - 1, corner[1])) lines.append(self.line(actor.x, actor.y corner[0] + 1, corner[1])) # check orthogonal directions lines.append(self.line(actor.x, actor.y, actor.x - 1, actor.y)) lines.append(self.line(actor.x, actor.y, actor.x + 1, actor.y)) lines.append(self.line(actor.x, actor.y, actor.x, actor.y - 1)) lines.append(self.line(actor.x, actor.y, actor.x, actor.y + 1)) # check diagonal directions lines.append(self.line(actor.x, actor.y, actor.x - 1, actor.y - 1)) lines.append(self.line(actor.x, actor.y, actor.x + 1, actor.y - 1)) lines.append(self.line(actor.x, actor.y, actor.x - 1, actor.y + 1)) lines.append(self.line(actor.x, actor.y, actor.x + 1, actor.y + 1)) """ wall_points = [] for room in self.rooms: wall_points.extend(room.wall_points) vision_boundary = RectRoom(actor.x - actor.radius, actor.y - actor.radius, actor.x + actor.radius, actor.y + actor.radius) lines = [] line = vision_boundary.wall_points for point in line: lines.append(self.line(actor.x, actor.y, point[0], point[1])) polygon = [] for line in lines: for step in line: intersection = None # No friggin need to repeatedly check the origin. if (actor.x, actor.y) == step: continue # Only for debugging purposes. Will probably just be square # in the final game. Who knows, though? yolo # Also, these math bits are weird. I don't know that I need # them. if actor.los_shape == LOS_Shape.EUCLID: distance = sqrt((step[0] - actor.x) ** 2 + (step[1] - actor.y) ** 2) else: distance = max(abs(step[0] - actor.x), abs(step[1] - actor.y)) if distance < actor.radius: if step in wall_points: if not self[step[0]][step[1]].check_door(): intersection = step else: if self[step[0]][step[1]].prop.door_status: intersection = step elif distance >= actor.radius: intersection = step if not intersection: continue break if intersection: if intersection not in polygon: polygon.append(intersection) def algo(point): nonlocal actor return (atan2(point[1] - actor.y, point[0] - actor.x) + 2 * pi) % (2 * pi) polygon.sort(key=algo) poly_walls = [] for i in range(len(polygon) + 1): if i == 0: continue elif i == len(polygon): v1 = polygon[-1] v2 = polygon[0] else: v1 = polygon[i - 1] v2 = polygon[i] line = self.line(v1[0], v1[1], v2[0], v2[1], True) for tile in line: if tile not in poly_walls: poly_walls.extend(line) # bounding box: it's the enclosing box of possible tiles that need # to be checked for point-in-polygon. Probably a faster method, # but yolo. bb = (min(poly_walls)[0], min(poly_walls, key=lambda x: x[1])[1], max(poly_walls)[0], max(poly_walls, key=lambda x: x[1])[1]) fov = [] vertx, verty = zip(*poly_walls) for y in range(bb[1], bb[3] + 1): for x in range(bb[0], bb[2] + 1): # Sanity check: Obviously the walls are visible. if (x, y) in poly_walls: fov.append((x, y)) elif self.point_in_poly(x, y, vertx, verty): fov.append((x, y)) actor.viewed_map.update(fov) self.fov_map = fov def point_in_poly(self, x, y, vertx, verty): """ Adapted from: https://www.ecse.rpi.edu/Homepages/wrf/Research/Short_Notes/pnpoly.html I could have changed it to use a bounding box, but I think the zip(*poly_walls) method is the fastest way to do it. Works well enough for me right now. """ c = False j = len(vertx) - 1 for i in range(len(vertx)): if ((verty[i] > y) != (verty[j] > y)): if (x < (vertx[j] - vertx[i]) * (y - verty[i]) / (verty[j] - verty[i]) + vertx[i]): c = not c j = i return c def generate_map(self, width, height, num_exits): self.clear_map() self.generate_ground() self.carve_rooms() self.carve_passages() self.build_features() def clear_map(self): self.layout.clear() self.rooms.clear() self.passages.clear() self.start_loc = None def generate_ground(self): self.layout = [[Tile(x=x, y=y, glyph='.', color=COLOR['green'], bkcolor=COLOR['black'], physical=False) for y in range(self.height)] for x in range(self.width)] self.fov = [[False for y in range(self.height)] for x in range(self.width)] def carve_rooms(self): cur_max = randint(self.min_rooms, self.max_rooms) while len(self.rooms) < cur_max: w, h = randint(2, 10), randint(2, 10) x, y = randint(0, self.width - w), randint(0, self.height - h) new_room = RectRoom(x, y, w, h) failed = False for other_room in self.rooms: if new_room.intersect(other_room): failed = True break if (new_room.x_right >= self.width) or (new_room.y_bottom >= self.height): failed = True if not failed: self.rooms.append(new_room) if not self.start_loc: self.start_loc = new_room.center() for x, y in new_room.wall_points: self[x][y].update(glyph='#', color='grey', physical=True) for y in range(new_room.y_top + 1, new_room.y_bottom): for x in range(new_room.x_left + 1, new_room.x_right): self[x][y].update(glyph='.', color='amber') new_room = RectRoom(0, 0, self.width - 1, self.height - 1) for x, y in new_room.wall_points: self[x][y].update(glyph='#', color='grey', physical=True) self.rooms.append(new_room) def carve_passages(self): pass def build_features(self): for idx, room in enumerate(self.rooms): side = randint(0, 3) if side is 0: x, y = (randrange(room.x_left + 1, room.x_right), room.y_top) elif side is 1: x, y = (room.x_left, randrange(room.y_top + 1, room.y_bottom)) elif side is 2: x, y = (randrange(room.x_left + 1, room.x_right), room.y_bottom) elif side is 3: x, y = (room.x_right, randrange(room.y_top + 1, room.y_bottom)) if idx < len(self.rooms) - 1: self[x][y].build_door() class RectRoom: def __init__(self, x, y, w, h): self.x_left = x self.y_top = y self.x_right = x + w self.y_bottom = y + h self.corners() self.walls() def corners(self): top_left = (self.x_left, self.y_top) top_right = (self.x_right, self.y_top) bottom_right = (self.x_right, self.y_bottom) bottom_left = (self.x_left, self.y_bottom) self.corners = (top_left, top_right, bottom_right, bottom_left) self.corner_points = [] self.corner_points.extend((top_left, top_right, bottom_right, bottom_left)) def walls(self): top = self.line(self.x_left, self.y_top, self.x_right, self.y_top) right = self.line(self.x_right, self.y_top, self.x_right, self.y_bottom) bottom = self.line(self.x_left, self.y_bottom, self.x_right, self.y_bottom) left = self.line(self.x_left, self.y_top, self.x_left, self.y_bottom) self.walls = (top, right, bottom, left) self.wall_points = [] for wall in self.walls: self.wall_points.extend(wall) def line(self, x0, y0, x1, y1): """Bresenham's line algorithm""" dx = abs(x1 - x0) dy = abs(y1 - y0) x, y = x0, y0 sx = -1 if x0 > x1 else 1 sy = -1 if y0 > y1 else 1 plot = [] if dx > dy: err = dx / 2.0 while x != x1: plot.append((x, y)) err -= dy if err < 0: y += sy err += dx x += sx else: err = dy / 2.0 while y != y1: plot.append((x, y)) err -= dx if err < 0: x += sx err += dy y += sy plot.append((x, y)) return plot def center(self): x = (self.x_left + self.x_right) // 2 y = (self.y_top + self.y_bottom) // 2 return (x, y) def intersect(self, other): return (self.x_left <= other.x_right and self.x_right >= other.x_left and self.y_top <= other.y_bottom and self.y_bottom >= other.y_top) class RightAnglePassage: def __init__(self, start_loc, end_loc, width): self.x1, self.y1 = start_loc self.x2, self.y2 = end_loc self.width = width def length(self): return (abs(self.x2 - self.x1) + abs(self.y2 - self.y1))
NoahTheDuke/roguelike
Thing.py
Python
mit
22,320
[ "Amber" ]
22dfcc60993566518ec73128c929d80354f302fda8f7f61b8084199cea875c88
#!/usr/bin/env python # -*- coding: utf-8 -*- # <sure - utility belt for automated testing in python> # Copyright (C) <2010-2013> Gabriel Falcão <gabriel@nacaolivre.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. "utility belt for automated testing in python for python" import ast import os import sys import codecs from setuptools import setup, find_packages # These python versions of explicitly not supported # by sure. This is nostly because of the incompatiblities # with unicode strings. If there is an urgent reason why # to support it after all or if you have a quick fix # please open an issue on GitHub. EXPL_NOT_SUPPORTED_VERSIONS = ((3, 0), (3, 1), (3, 2)) if sys.version_info[0:2] in EXPL_NOT_SUPPORTED_VERSIONS: raise SystemExit("sure does explicitly not support the following python versions " "due to big incompatibilities: {0}".format(EXPL_NOT_SUPPORTED_VERSIONS)) PROJECT_ROOT = os.path.dirname(__file__) class VersionFinder(ast.NodeVisitor): def __init__(self): self.version = None def visit_Assign(self, node): try: if node.targets[0].id == 'version': self.version = node.value.s except: pass def read_version(): """Read version from sure/__init__.py without loading any files""" finder = VersionFinder() path = os.path.join(PROJECT_ROOT, 'sure', '__init__.py') with codecs.open(path, 'r', encoding='utf-8') as fp: file_data = fp.read().encode('utf-8') finder.visit(ast.parse(file_data)) return finder.version def local_text_file(*f): path = os.path.join(PROJECT_ROOT, *f) with open(path, 'rt') as fp: file_data = fp.read() return file_data def read_readme(): """Read README content. If the README.rst file does not exist yet (this is the case when not releasing) only the short description is returned. """ try: return local_text_file('README.rst') except IOError: return __doc__ install_requires = ['mock', 'six'] tests_require = ['nose'] if __name__ == '__main__': setup(name='sure', version=read_version(), description=__doc__, long_description=read_readme(), url='http://github.com/gabrielfalcao/sure', author='Gabriel Falcao', author_email='gabriel@nacaolivre.org', maintainer='Timo Furrer', maintainer_email='tuxtimo@gmail.com', include_package_data=True, packages=find_packages(exclude=['*tests*']), install_requires=install_requires, tests_require=tests_require, test_suite='nose.collector', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Operating System :: MacOS :: MacOS X', 'Operating System :: POSIX', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: Implementation', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Software Development :: Testing' ] )
timofurrer/sure
setup.py
Python
gpl-3.0
4,358
[ "VisIt" ]
5d5055c918437fcbc9525cea8c6bae93fc0ce2b88d60370d0870f492de60f08e
"""Install next gen sequencing analysis tools not currently packaged. """ from __future__ import print_function import os import re from fabric.api import * from fabric.contrib.files import * import yaml from shared import (_if_not_installed, _make_tmp_dir, _get_install, _get_install_local, _make_copy, _configure_make, _java_install, _python_cmd, _symlinked_java_version_dir, _fetch_and_unpack, _python_make, _get_lib_dir, _get_include_dir, _apply_patch) from cloudbio.custom import shared, versioncheck from cloudbio import libraries from cloudbio.flavor.config import get_config_file @_if_not_installed(["twoBitToFa", "gtfToGenePred"]) def install_ucsc_tools(env): """Useful executables from UCSC. todo: install from source to handle 32bit and get more programs http://hgdownload.cse.ucsc.edu/admin/jksrc.zip """ tools = ["liftOver", "faToTwoBit", "bedToBigBed", "bigBedInfo", "bigBedSummary", "bigBedToBed", "bedGraphToBigWig", "bigWigInfo", "bigWigSummary", "bigWigToBedGraph", "bigWigToWig", "fetchChromSizes", "wigToBigWig", "faSize", "twoBitInfo", "twoBitToFa", "faCount", "gtfToGenePred"] url = "http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/" _download_executables(env, url, tools) @_if_not_installed("blat") def install_kent_tools(env): """ Please note that the Blat source and executables are freely available for academic, nonprofit and personal use. Commercial licensing information is available on the Kent Informatics website (http://www.kentinformatics.com/). """ tools = ["blat", "gfClient", "gfServer"] url = "http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/blat/" _download_executables(env, url, tools) def _download_executables(env, base_url, tools): install_dir = shared._get_bin_dir(env) with _make_tmp_dir() as work_dir: with cd(work_dir): for tool in tools: final_tool = os.path.join(install_dir, tool) if not env.safe_exists(final_tool) and shared._executable_not_on_path(tool): shared._remote_fetch(env, "%s%s" % (base_url, tool)) env.safe_sudo("cp -f %s %s" % (tool, install_dir)) final_path = os.path.join(install_dir, tool) env.safe_sudo("chmod uga+rx %s" % final_path) # --- Alignment tools def install_featurecounts(env): """ featureCounts from the subread package for counting reads mapping to genomic features """ default_version = "1.4.4" version = env.get("tool_version", default_version) if versioncheck.up_to_date(env, "featureCounts", version, stdout_flag="Version"): return platform = "MacOS" if env.distribution == "macosx" else "Linux" url = ("http://downloads.sourceforge.net/project/subread/" "subread-%s/subread-%s-%s-x86_64.tar.gz" % (version, version, platform)) _get_install(url, env, _make_copy("find . -type f -perm -100 -name 'featureCounts'", do_make=False)) @_if_not_installed("bowtie") def install_bowtie(env): """The bowtie short read aligner. http://bowtie-bio.sourceforge.net/index.shtml """ default_version = "1.0.0" version = env.get("tool_version", default_version) url = "http://downloads.sourceforge.net/project/bowtie-bio/bowtie/%s/" \ "bowtie-%s-src.zip" % (version, version) _get_install(url, env, _make_copy("find . -perm -100 -name 'bowtie*'")) @_if_not_installed("bowtie2") def install_bowtie2(env): """bowtie2 short read aligner, with gap support. http://bowtie-bio.sourceforge.net/bowtie2/index.shtml """ default_version = "2.1.0" version = env.get("tool_version", default_version) url = "http://downloads.sourceforge.net/project/bowtie-bio/bowtie2/%s/" \ "bowtie2-%s-source.zip" % (version, version) _get_install(url, env, _make_copy("find . -perm -100 -name 'bowtie2*'")) @_if_not_installed("bfast") def install_bfast(env): """BFAST: Blat-like Fast Accurate Search Tool. http://sourceforge.net/apps/mediawiki/bfast/index.php?title=Main_Page """ default_version = "0.7.0a" version = env.get("tool_version", default_version) major_version_regex = "\d+\.\d+\.\d+" major_version = re.search(major_version_regex, version).group(0) url = "http://downloads.sourceforge.net/project/bfast/bfast/%s/bfast-%s.tar.gz"\ % (major_version, version) _get_install(url, env, _configure_make) @_if_not_installed("perm") def install_perm(env): """Efficient mapping of short sequences accomplished with periodic full sensitive spaced seeds. https://code.google.com/p/perm/ """ default_version = "4" version = env.get("tool_version", default_version) url = "http://perm.googlecode.com/files/PerM%sSource.tar.gz" % version def gcc44_makefile_patch(): gcc_cmd = "g++44" with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True): result = env.safe_run("%s -v" % gcc_cmd) print(result.return_code) if result.return_code == 0: env.safe_sed("makefile", "g\+\+", gcc_cmd) _get_install(url, env, _make_copy("ls -1 perm", gcc44_makefile_patch)) @_if_not_installed("snap") def install_snap(env): """Scalable Nucleotide Alignment Program http://snap.cs.berkeley.edu/ """ version = "0.15" url = "http://github.com/downloads/amplab/snap/" \ "snap-%s-linux.tar.gz" % version _get_install(url, env, _make_copy("find . -perm -100 -type f", do_make=False)) def install_stampy(env): """Stampy: mapping of short reads from illumina sequencing machines onto a reference genome. http://www.well.ox.ac.uk/project-stampy """ version = "1.0.21" #version = base_version #revision = "1654" #version = "{0}r{1}".format(base_version, revision) #url = "http://www.well.ox.ac.uk/bioinformatics/Software/" \ # "stampy-%s.tgz" % (version) # Ugh -- Stampy now uses a 'Stampy-latest' download target url = "http://www.well.ox.ac.uk/bioinformatics/Software/" \ "Stampy-latest.tgz" def _clean_makefile(env): env.safe_sed("makefile", " -Wl", "") _get_install_local(url, env, _make_copy(), dir_name="stampy-{0}".format(version), post_unpack_fn=_clean_makefile) @_if_not_installed("gmap") def install_gmap(env): """GMAP and GSNAP: A Genomic Mapping and Alignment Program for mRNA EST and short reads. http://research-pub.gene.com/gmap/ """ version = "2012-11-09" url = "http://research-pub.gene.com/gmap/src/gmap-gsnap-%s.tar.gz" % version _get_install(url, env, _configure_make) def _wget_with_cookies(ref_url, dl_url): env.safe_run("wget --cookies=on --keep-session-cookies --save-cookies=cookie.txt %s" % (ref_url)) env.safe_run("wget --referer=%s --cookies=on --load-cookies=cookie.txt " "--keep-session-cookies --save-cookies=cookie.txt %s" % (ref_url, dl_url)) @_if_not_installed("novoalign") def install_novoalign(env): """Novoalign short read aligner using Needleman-Wunsch algorithm with affine gap penalties. http://www.novocraft.com/main/index.php """ base_version = "V3.00.02" cs_version = "V1.03.02" _url = "http://www.novocraft.com/downloads/%s/" % base_version ref_url = "http://www.novocraft.com/main/downloadpage.php" base_url = "%s/novocraft%s.gcc.tar.gz" % (_url, base_version) cs_url = "%s/novoalignCS%s.gcc.tar.gz" % (_url, cs_version) install_dir = shared._get_bin_dir(env) with _make_tmp_dir() as work_dir: with cd(work_dir): _wget_with_cookies(ref_url, base_url) env.safe_run("tar -xzvpf novocraft%s.gcc.tar.gz" % base_version) with cd("novocraft"): for fname in ["isnovoindex", "novo2maq", "novo2paf", "novo2sam.pl", "novoalign", "novobarcode", "novoindex", "novope2bed.pl", "novorun.pl", "novoutil"]: env.safe_sudo("mv %s %s" % (fname, install_dir)) with _make_tmp_dir() as work_dir: with cd(work_dir): _wget_with_cookies(ref_url, cs_url) env.safe_run("tar -xzvpf novoalignCS%s.gcc.tar.gz" % cs_version) with cd("novoalignCS"): for fname in ["novoalignCS"]: env.safe_sudo("mv %s %s" % (fname, install_dir)) @_if_not_installed("novosort") def install_novosort(env): """Multithreaded sort and merge for BAM files. http://www.novocraft.com/wiki/tiki-index.php?page=Novosort """ base_version = "V3.00.02" version = "V1.00.02" url = "http://www.novocraft.com/downloads/%s/novosort%s.gcc.tar.gz" % (base_version, version) ref_url = "http://www.novocraft.com/main/downloadpage.php" install_dir = shared._get_bin_dir(env) with _make_tmp_dir() as work_dir: with cd(work_dir): _wget_with_cookies(ref_url, url) env.safe_run("tar -xzvpf novosort%s.gcc.tar.gz" % version) with cd("novosort"): for fname in ["novosort"]: env.safe_sudo("mv %s %s" % (fname, install_dir)) @_if_not_installed("lastz") def install_lastz(env): """LASTZ sequence alignment program. http://www.bx.psu.edu/miller_lab/dist/README.lastz-1.02.00/README.lastz-1.02.00a.html """ default_version = "1.02.00" version = env.get("tool_version", default_version) url = "http://www.bx.psu.edu/miller_lab/dist/" \ "lastz-%s.tar.gz" % version def _remove_werror(env): env.safe_sed("src/Makefile", " -Werror", "") _get_install(url, env, _make_copy("find . -perm -100 -name 'lastz'"), post_unpack_fn=_remove_werror) @_if_not_installed("MosaikAligner") def install_mosaik(env): """MOSAIK: reference-guided aligner for next-generation sequencing technologies http://code.google.com/p/mosaik-aligner/ """ version = "2.2.3" url = "https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/mosaik-aligner/" \ "MOSAIK-%s-Linux-x64.tar" % version _get_install(url, env, _make_copy("find . -perm -100 -type f", do_make=False)) # --- Utilities def install_samtools(env): """SAM Tools provide various utilities for manipulating alignments in the SAM format. http://samtools.sourceforge.net/ """ default_version = "0.1.19" version = env.get("tool_version", default_version) if versioncheck.up_to_date(env, "samtools", version, stdout_flag="Version:"): env.logger.info("samtools version {0} is up to date; not installing" .format(version)) return url = "http://downloads.sourceforge.net/project/samtools/samtools/" \ "%s/samtools-%s.tar.bz2" % (version, version) def _safe_ncurses_make(env): """Combine samtools, removing ncurses refs if not present on system. """ with settings(warn_only=True): result = env.safe_run("make") # no ncurses, fix Makefile and rebuild if result.failed: env.safe_sed("Makefile", "-D_CURSES_LIB=1", "-D_CURSES_LIB=0") env.safe_sed("Makefile", "-lcurses", "# -lcurses") env.safe_run("make clean") env.safe_run("make") install_dir = shared._get_bin_dir(env) for fname in env.safe_run_output("ls -1 samtools bcftools/bcftools bcftools/vcfutils.pl misc/wgsim").split("\n"): env.safe_sudo("cp -f %s %s" % (fname.rstrip("\r"), install_dir)) _get_install(url, env, _safe_ncurses_make) @_if_not_installed("vtools") def install_varianttools(env): """Annotation, selection, and analysis of variants in the context of next-gen sequencing analysis. http://varianttools.sourceforge.net/ """ version = "1.0.6" url = "http://downloads.sourceforge.net/project/varianttools/" \ "{ver}/variant_tools-{ver}-src.tar.gz".format(ver=version) _get_install(url, env, _python_make) @_if_not_installed("dwgsim") def install_dwgsim(env): """DWGSIM: simulating NGS data and evaluating mappings and variant calling. http://sourceforge.net/apps/mediawiki/dnaa/index.php?title=Main_Page """ version = "0.1.10" samtools_version = "0.1.18" url = "http://downloads.sourceforge.net/project/dnaa/dwgsim/" \ "dwgsim-{0}.tar.gz".format(version) samtools_url = "http://downloads.sourceforge.net/project/samtools/samtools/" \ "{ver}/samtools-{ver}.tar.bz2".format(ver=samtools_version) def _get_samtools(env): shared._remote_fetch(env, samtools_url) env.safe_run("tar jxf samtools-{0}.tar.bz2".format(samtools_version)) env.safe_run("ln -s samtools-{0} samtools".format(samtools_version)) _get_install(url, env, _make_copy("ls -1 dwgsim dwgsim_eval scripts/dwgsim_pileup_eval.pl"), post_unpack_fn=_get_samtools) @_if_not_installed("fastq_screen") def install_fastq_screen(env): """A screening application for high througput sequence data. http://www.bioinformatics.babraham.ac.uk/projects/fastq_screen/ """ version = "0.4" url = "http://www.bioinformatics.babraham.ac.uk/projects/fastq_screen/" \ "fastq_screen_v%s.tar.gz" % version install_dir = shared._symlinked_shared_dir("fastqc_screen", version, env) executable = "fastq_screen" if install_dir: with _make_tmp_dir() as work_dir: with cd(work_dir): out_file = shared._remote_fetch(env, url) env.safe_run("tar -xzvpf %s" % out_file) with cd("fastq_screen_v%s" % version): env.safe_sudo("mv * %s" % install_dir) env.safe_sudo("ln -s %s/%s %s/bin/%s" % (install_dir, executable, env.system_install, executable)) def install_bedtools(env): """A flexible suite of utilities for comparing genomic features. https://code.google.com/p/bedtools/ """ version = "2.17.0" if versioncheck.up_to_date(env, "bedtools --version", version, stdout_flag="bedtools"): return url = "https://bedtools.googlecode.com/files/" \ "BEDTools.v%s.tar.gz" % version _get_install(url, env, _make_copy("ls -1 bin/*")) _shrec_run = """ #!/usr/bin/perl use warnings; use strict; use FindBin qw($RealBin); use Getopt::Long; my @java_args; my @args; foreach (@ARGV) { if (/^\-X/) {push @java_args,$_;} else {push @args,$_;}} system("java -cp $RealBin @java_args Shrec @args"); """ @_if_not_installed("shrec") def install_shrec(env): """Shrec is a bioinformatics tool for error correction of HTS read data. http://sourceforge.net/projects/shrec-ec/ """ version = "2.2" url = "http://downloads.sourceforge.net/project/shrec-ec/SHREC%%20%s/bin.zip" % version install_dir = _symlinked_java_version_dir("shrec", version, env) if install_dir: shrec_script = "%s/shrec" % install_dir with _make_tmp_dir() as work_dir: with cd(work_dir): out_file = shared._remote_fetch(env, url) env.safe_run("unzip %s" % out_file) env.safe_sudo("mv *.class %s" % install_dir) for line in _shrec_run.split("\n"): if line.strip(): env.safe_append(shrec_script, line, use_sudo=env.use_sudo) env.safe_sudo("chmod a+rwx %s" % shrec_script) env.safe_sudo("ln -s %s %s/bin/shrec" % (shrec_script, env.system_install)) def install_echo(env): """ECHO: A reference-free short-read error correction algorithm http://uc-echo.sourceforge.net/ """ version = "1_12" url = "http://downloads.sourceforge.net/project/uc-echo/source%20release/" \ "echo_v{0}.tgz".format(version) _get_install_local(url, env, _make_copy()) # -- Analysis def install_picard(env): """Command-line utilities that manipulate BAM files with a Java API. http://picard.sourceforge.net/ """ version = "1.96" url = "http://downloads.sourceforge.net/project/picard/" \ "picard-tools/%s/picard-tools-%s.zip" % (version, version) _java_install("picard", version, url, env) def install_alientrimmer(env): """ Adapter removal tool http://www.ncbi.nlm.nih.gov/pubmed/23912058 """ version = "0.3.2" url = ("ftp://ftp.pasteur.fr/pub/gensoft/projects/AlienTrimmer/" "AlienTrimmer_%s.tar.gz" % version) _java_install("AlienTrimmer", version, url, env) def install_rnaseqc(env): """Quality control metrics for RNA-seq data https://www.broadinstitute.org/cancer/cga/rna-seqc """ version = "1.1.7" url = ("https://github.com/chapmanb/RNA-SeQC/releases/download/" "v%s/RNA-SeQC_v%s.jar" % (version, version)) install_dir = _symlinked_java_version_dir("RNA-SeQC", version, env) if install_dir: with _make_tmp_dir() as work_dir: with cd(work_dir): out_file = shared._remote_fetch(env, url) env.safe_sudo("mv %s %s" % (out_file, install_dir)) def install_varscan(env): """Variant detection in massively parallel sequencing data http://varscan.sourceforge.net/ """ version = "2.3.7" url = "http://downloads.sourceforge.net/project/varscan/VarScan.v%s.jar" % version install_dir = _symlinked_java_version_dir("varscan", version, env) if install_dir: with _make_tmp_dir() as work_dir: with cd(work_dir): out_file = shared._remote_fetch(env, url) env.safe_sudo("mv %s %s" % (out_file, install_dir)) def install_mutect(env): version = "1.1.5" url = "https://github.com/broadinstitute/mutect/releases/download/" \ "%s/muTect-%s-bin.zip" % (version, version) install_dir = _symlinked_java_version_dir("mutect", version, env) if install_dir: with _make_tmp_dir() as work_dir: with cd(work_dir): out_file = shared._remote_fetch(env, url) env.safe_run("unzip %s" % out_file) env.safe_sudo("mv *.jar version.txt LICENSE* %s" % install_dir) @_if_not_installed("bam") def install_bamutil(env): """Utilities for working with BAM files, from U of M Center for Statistical Genetics. http://genome.sph.umich.edu/wiki/BamUtil """ version = "1.0.7" url = "http://genome.sph.umich.edu/w/images/5/5d/BamUtilLibStatGen.%s.tgz" % version _get_install(url, env, _make_copy("ls -1 bamUtil/bin/bam"), dir_name="bamUtil_%s" % version) @_if_not_installed("tabix") def install_tabix(env): """Generic indexer for TAB-delimited genome position files http://samtools.sourceforge.net/tabix.shtml """ version = "0.2.6" url = "http://downloads.sourceforge.net/project/samtools/tabix/tabix-%s.tar.bz2" % version _get_install(url, env, _make_copy("ls -1 tabix bgzip")) @_if_not_installed("disambiguate.py") def install_disambiguate(env): """a tool for disambiguating reads aligning to multiple genomes https://github.com:mjafin/disambiguate """ repository = "git clone https://github.com/mjafin/disambiguate.git" _get_install(repository, env, _python_make) def install_grabix(env): """a wee tool for random access into BGZF files https://github.com/arq5x/grabix """ version = "0.1.6" revision = "ba792bc872d38d3cb5a69b2de00e39a6ac367d69" try: uptodate = versioncheck.up_to_date(env, "grabix", version, stdout_flag="version:") # Old versions will not have any version information except IOError: uptodate = False if uptodate: return repository = "git clone https://github.com/arq5x/grabix.git" _get_install(repository, env, _make_copy("ls -1 grabix"), revision=revision) @_if_not_installed("pbgzip") def install_pbgzip(env): """Parallel blocked bgzip -- compatible with bgzip but with thread support. https://github.com/nh13/samtools/tree/master/pbgzip """ repository = "git clone https://github.com/chapmanb/samtools.git" revision = "2cce3ffa97" def _build(env): with cd("pbgzip"): env.safe_run("make") install_dir = shared._get_bin_dir(env) env.safe_sudo("cp -f pbgzip %s" % (install_dir)) _get_install(repository, env, _build, revision=revision) @_if_not_installed("bamtools") def install_bamtools(env): """command-line toolkit for working with BAM data https://github.com/pezmaster31/bamtools """ version = "3fe66b9" repository = "git clone --recursive https://github.com/pezmaster31/bamtools.git" def _cmake_bamtools(env): env.safe_run("mkdir build") with cd("build"): env.safe_run("cmake ..") env.safe_run("make") env.safe_sudo("cp bin/* %s" % shared._get_bin_dir(env)) env.safe_sudo("cp lib/* %s" % shared._get_lib_dir(env)) _get_install(repository, env, _cmake_bamtools, revision=version) @_if_not_installed("ogap") def install_ogap(env): """gap opening realigner for BAM data streams https://github.com/ekg/ogap """ version = "652c525" repository = "git clone --recursive https://github.com/ekg/ogap.git" _get_install(repository, env, _make_copy("ls ogap"), revision=version) def install_tophat(env): """TopHat is a fast splice junction mapper for RNA-Seq reads http://ccb.jhu.edu/software/tophat/index.shtml """ default_version = "2.0.9" version = env.get("tool_version", default_version) if versioncheck.is_version(env, "tophat", version, args="--version", stdout_flag="TopHat"): env.logger.info("tophat version {0} is up to date; not installing" .format(version)) return platform = "OSX" if env.distribution == "macosx" else "Linux" url = "http://ccb.jhu.edu/software/tophat/downloads/" \ "tophat-%s.%s_x86_64.tar.gz" % (version, platform) _get_install(url, env, _make_copy("find . -perm -100 -type f", do_make=False)) install_tophat2 = install_tophat # --- Assembly @_if_not_installed("ABYSS") def install_abyss(env): """Assembly By Short Sequences - a de novo, parallel, paired-end sequence assembler. http://www.bcgsc.ca/platform/bioinfo/software/abyss """ # XXX check for no sparehash on non-ubuntu systems default_version = "2.0.2" version = env.get("tool_version", default_version) url = "http://www.bcgsc.ca/platform/bioinfo/software/abyss/releases/%s/abyss-%s.tar.gz" % (version, version) def _remove_werror_get_boost(env): env.safe_sed("configure", " -Werror", "") # http://osdir.com/ml/abyss-users-science/2011-10/msg00108.html url = "http://downloads.sourceforge.net/project/boost/boost/1.63.0/boost_1_63_0.tar.bz2" dl_file = shared._remote_fetch(env, url) env.safe_run("tar jxf %s" % dl_file) env.safe_run("ln -s boost_1_63_0/boost boost") _get_install(url, env, _configure_make, post_unpack_fn=_remove_werror_get_boost) def install_transabyss(env): """Analyze ABySS multi-k-assembled shotgun transcriptome data. http://www.bcgsc.ca/platform/bioinfo/software/trans-abyss """ version = "1.4.4" url = "http://www.bcgsc.ca/platform/bioinfo/software/trans-abyss/" \ "releases/%s/trans-ABySS-v%s.tar.gz" % (version, version) _get_install_local(url, env, _make_copy(do_make=False)) @_if_not_installed("velvetg") def install_velvet(env): """Sequence assembler for very short reads. http://www.ebi.ac.uk/~zerbino/velvet/ """ default_version = "1.2.10" version = env.get("tool_version", default_version) url = "http://www.ebi.ac.uk/~zerbino/velvet/velvet_%s.tgz" % version def _fix_library_order(env): """Fix library order problem in recent gcc versions http://biostar.stackexchange.com/questions/13713/ error-installing-velvet-assembler-1-1-06-on-ubuntu-server """ env.safe_sed("Makefile", "Z_LIB_FILES=-lz", "Z_LIB_FILES=-lz -lm") _get_install(url, env, _make_copy("find . -perm -100 -name 'velvet*'"), post_unpack_fn=_fix_library_order) @_if_not_installed("Ray") def install_ray(env): """Ray -- Parallel genome assemblies for parallel DNA sequencing http://denovoassembler.sourceforge.net/ """ default_version = "2.2.0" version = env.get("tool_version", default_version) url = "http://downloads.sourceforge.net/project/denovoassembler/Ray-v%s.tar.bz2" % version def _ray_do_nothing(env): return _get_install(url, env, _make_copy("find . -name Ray"), post_unpack_fn=_ray_do_nothing) def install_trinity(env): """Efficient and robust de novo reconstruction of transcriptomes from RNA-seq data. http://trinityrnaseq.github.io/ """ version = "2.3.2" url = "https://github.com/trinityrnaseq/trinityrnaseq/archive/" \ "Trinity-v%s.tar.gz" % version dir_name = "trinityrnaseq-%s" % version _get_install_local(url, env, _make_copy(), dir_name=dir_name) def install_cortex_var(env): """De novo genome assembly and variation analysis from sequence data. http://cortexassembler.sourceforge.net/index_cortex_var.html """ version = "1.0.5.21" url = "http://downloads.sourceforge.net/project/cortexassembler/cortex_var/" \ "latest/CORTEX_release_v{0}.tgz".format(version) def _cortex_build(env): env.safe_sed("Makefile", "\-L/full/path/\S*", "-L{0}/lib -L/usr/lib -L/usr/local/lib".format(env.system_install)) env.safe_sed("Makefile", "^IDIR_GSL =.*$", "IDIR_GSL={0}/include -I/usr/include -I/usr/local/include".format(env.system_install)) env.safe_sed("Makefile", "^IDIR_GSL_ALSO =.*$", "IDIR_GSL_ALSO={0}/include/gsl -I/usr/include/gsl -I/usr/local/include/gsl".format( env.system_install)) with cd("libs/gsl-1.15"): env.safe_run("make clean") with cd("libs/htslib"): env.safe_run("make clean") env.safe_run("make") for cols in ["1", "2", "3", "4", "5"]: for kmer in ["31", "63", "95"]: env.safe_run("make MAXK={0} NUM_COLS={1} cortex_var".format(kmer, cols)) with cd("scripts/analyse_variants/needleman_wunsch"): env.safe_sed("Makefile", "string_buffer.c", "string_buffer.c -lz") # Fix incompatibilities with gzfile struct in zlib 1.2.6+ for fix_gz in ["libs/string_buffer/string_buffer.c", "libs/bioinf/bioinf.c", "libs/string_buffer/string_buffer.h", "libs/bioinf/bioinf.h"]: env.safe_sed(fix_gz, "gzFile \*", "gzFile ") env.safe_sed(fix_gz, "gzFile\*", "gzFile") env.safe_run("make") _get_install_local(url, env, _cortex_build) def install_bcbio_variation(env): """Toolkit to analyze genomic variation data with comparison and ensemble approaches. https://github.com/chapmanb/bcbio.variation """ version = "0.2.6" url = "https://github.com/chapmanb/bcbio.variation/releases/download/" \ "v%s/bcbio.variation-%s-standalone.jar" % (version, version) install_dir = _symlinked_java_version_dir("bcbio_variation", version, env) if install_dir: with _make_tmp_dir() as work_dir: with cd(work_dir): jar_file = shared._remote_fetch(env, url) env.safe_sudo("mv %s %s" % (jar_file, install_dir)) # --- ChIP-seq @_if_not_installed("macs14") def install_macs(env): """Model-based Analysis for ChIP-Seq. http://liulab.dfci.harvard.edu/MACS/ """ default_version = "1.4.2" version = env.get("tool_version", default_version) url = "https://github.com/downloads/taoliu/MACS/" \ "MACS-%s.tar.gz" % version _get_install(url, env, _python_make) # --- Structural variation @_if_not_installed("hydra") def install_hydra(env): """Hydra detects structural variation breakpoints in both unique and duplicated genomic regions. https://code.google.com/p/hydra-sv/ """ version = "0.5.3" url = "https://storage.googleapis.com/google-code-archive-downloads/v2/code.google.com/hydra-sv/Hydra.v{0}.tar.gz".format(version) def clean_libs(env): env.safe_run("make clean") _get_install(url, env, _make_copy("ls -1 bin/* scripts/*"), post_unpack_fn=clean_libs) def install_freec(env): """Control-FREEC: a tool for detection of copy number changes and allelic imbalances. http://bioinfo-out.curie.fr/projects/freec/ """ version = "6.4" if env.distribution in ["ubuntu", "debian"]: if env.is_64bit: url = "http://bioinfo-out.curie.fr/projects/freec/src/FREEC_Linux64.tar.gz" else: url = "http://bioinfo-out.curie.fr/projects/freec/src/FREEC_LINUX32.tar.gz" if not versioncheck.up_to_date(env, "freec", version, stdout_index=1): _get_install(url, env, _make_copy("find . -name 'freec'"), dir_name=".") @_if_not_installed("CRISP.py") def install_crisp(env): """Detect SNPs and short indels from pooled sequencing data. https://sites.google.com/site/vibansal/software/crisp/ """ version = "5" url = "https://sites.google.com/site/vibansal/software/crisp/" \ "CRISP-linux-v{0}.tar.gz".format(version) def _make_executable(): env.safe_run("chmod a+x *.py") _get_install(url, env, _make_copy("ls -1 CRISP.py crisp_to_vcf.py", premake_cmd=_make_executable, do_make=False)) @_if_not_installed("run_pipeline.pl") def install_tassel(env): """TASSEL: evaluate traits associations, evolutionary patterns, and linkage disequilibrium. http://www.maizegenetics.net/index.php?option=com_content&task=view&id=89&/Itemid=119 """ version = "5" build_id = "1140d3fceb75" url = "https://bitbucket.org/tasseladmin/tassel-{0}-standalone/get/{1}.zip".format(version, build_id) executables = ["start_tassel.pl", "run_pipeline.pl"] install_dir = _symlinked_java_version_dir("tassel", version, env) if install_dir: with _make_tmp_dir() as work_dir: with cd(work_dir): dl_file = shared._remote_fetch(env, url) env.safe_run("unzip %s" % dl_file) with cd("tasseladmin-tassel-{0}-standalone-{1}".format(version, build_id)): for x in executables: env.safe_sed(x, "^my \$top.*;", "use FindBin qw($RealBin); my $top = $RealBin;") env.safe_sudo("chmod a+rwx %s" % x) env.safe_sudo("mv * %s" % install_dir) for x in executables: env.safe_sudo("ln -s %s/%s %s/bin/%s" % (install_dir, x, env.system_install, x)) @_if_not_installed("ustacks") def install_stacks(env): """Stacks: build loci out of a set of short-read sequenced samples. http://creskolab.uoregon.edu/stacks/ """ version = "0.9999" url = "http://creskolab.uoregon.edu/stacks/source/" \ "stacks-{0}.tar.gz".format(version) _get_install(url, env, _configure_make) @_if_not_installed("seqlogo") def install_weblogo(env): """Weblogo http://weblogo.berkeley.edu/ """ version = "2.8.2" url = "http://weblogo.berkeley.edu/release/weblogo.%s.tar.gz" % version _get_install(url, env, _make_copy("find . -perm -100 -type f", do_make=False)) def _cp_pm(env): for perl_module in ["template.pm", "logo.pm", "template.eps"]: env.safe_sudo("cp %s %s/lib/perl5" % (perl_module, env.system_install)) _get_install(url, env, _cp_pm(env))
chapmanb/cloudbiolinux
cloudbio/custom/bio_nextgen.py
Python
mit
32,360
[ "Bowtie" ]
edde522c0e674c924a71984dce866bae8be3a0e5fd8e992bd51ba7fd288591e4
from past.builtins import basestring from copy import copy import sympy from sympy import sympify from sympy.logic.boolalg import BooleanTrue, BooleanFalse from sympy.functions.elementary.piecewise import ExprCondPair from ...expressions import reserved_identifiers from nineml.visitors import BaseVisitor, BaseVisitorWithContext from nineml.units import Dimension from nineml.abstraction.ports import SendPortBase from nineml.abstraction.expressions import Expression from nineml.exceptions import NineMLNameError import operator from functools import reduce class ComponentClassInterfaceInferer(BaseVisitor): """ Used to infer output |EventPorts|, |StateVariables| & |Parameters|.""" def __init__(self, component_class): super(ComponentClassInterfaceInferer, self).__init__() # Parameters: # Use visitation to collect all atoms that are not aliases and not # state variables self.component_class = component_class self.declared_symbols = copy(reserved_identifiers) self.atoms = set() self.input_event_port_names = set() self.event_out_port_names = set() self.visit(self.component_class) # Visit class and populate declared_symbols and atoms sets self.parameter_names = self.atoms - self.declared_symbols def action_alias(self, alias, **kwargs): # @UnusedVariable self.declared_symbols.add(alias.lhs) self.atoms.update(alias.rhs_atoms) def action_constant(self, constant, **kwargs): # @UnusedVariable self.declared_symbols.add(constant.name) def default_action(self, obj, nineml_cls, **kwargs): pass class ComponentRequiredDefinitions(BaseVisitor): """ Gets lists of required parameters, states, ports, random variables, constants and expressions (in resolved order of execution). """ def __init__(self, component_class, expressions): BaseVisitor.__init__(self) # Expression can either be a single expression or an iterable of # expressions self.parameters = [] self.ports = [] self.constants = [] self.random_variables = [] self.expressions = [] self._required_stack = [] self._push_required_symbols(expressions) self.component_class = component_class self.visit(component_class) def __repr__(self): return ("Parameters: {}\nPorts: {}\nConstants: {}\nAliases:\n{}" .format(', '.join(self.parameter_names), ', '.join(self.port_names), ', '.join(self.constant_names), ', '.join(self.expression_names))) def _push_required_symbols(self, expression): required_atoms = set() try: for expr in expression: try: required_atoms.update(expr.rhs_atoms) except AttributeError: # If a output port instead of expr required_atoms.add(expr.name) except TypeError: required_atoms.update(expression.rhs_atoms) # Strip builtin symbols from required atoms required_atoms.difference_update(reserved_identifiers) self._required_stack.append(required_atoms) def _is_required(self, element): return element.name in self._required_stack[-1] def action_parameter(self, parameter, **kwargs): # @UnusedVariable if self._is_required(parameter): self.parameters.append(parameter) def action_analogreceiveport(self, port, **kwargs): # @UnusedVariable if self._is_required(port): self.ports.append(port) def action_analogreduceport(self, port, **kwargs): # @UnusedVariable if self._is_required(port): self.ports.append(port) def action_constant(self, constant, **kwargs): # @UnusedVariable if self._is_required(constant): self.constants.append(constant) def action_alias(self, alias, **kwargs): # @UnusedVariable if (self._is_required(alias) and alias.name not in (e.name for e in self.expressions)): # Since aliases may be dependent on other aliases/piecewises the # order they are executed is important so we make sure their # dependencies are added first self._push_required_symbols(alias) self.visit(self.component_class) self._required_stack.pop() self.expressions.append(alias) def default_action(self, obj, nineml_cls, **kwargs): pass @property def parameter_names(self): return (p.name for p in self.parameters) @property def port_names(self): return (p.name for p in self.ports) @property def random_variable_names(self): return (rv.name for rv in self.random_variables) @property def constant_names(self): return (c.name for c in self.constants) @property def expression_names(self): return (e.name for e in self.expressions) class ComponentExpressionExtractor(BaseVisitor): def __init__(self): super(ComponentExpressionExtractor, self).__init__() self.expressions = [] def action_alias(self, alias, **kwargs): # @UnusedVariable self.expressions.append(alias.rhs) def default_action(self, obj, nineml_cls, **kwargs): pass class ComponentDimensionResolver(BaseVisitorWithContext): """ Used to calculate the unit dimension of elements within a component class """ reserved_symbol_dims = {sympy.Symbol('t'): sympy.Symbol('t')} def __init__(self, component_class): super(ComponentDimensionResolver, self).__init__() self.component_class = component_class self._dims = {} # Insert declared dimensions into dimensionality database for a in component_class.attributes_with_dimension: if not isinstance(a, SendPortBase): self._dims[sympify(a)] = sympify(a.dimension) for a in component_class.attributes_with_units: self._dims[sympify(a)] = sympify(a.units.dimension) self.visit(component_class) @property def base_nineml_children(self): return self.as_class.nineml_children def dimension_of(self, element): if isinstance(element, basestring): element = self.component_class.element( element, child_types=self.base_nineml_children) return Dimension.from_sympy(self._flatten(element)) def _flatten(self, expr, **kwargs): # @UnusedVariable expr = sympify(expr) if expr in self.reserved_symbol_dims: flattened = self._flatten_reserved(expr, **kwargs) elif isinstance(expr, sympy.Symbol): flattened = self._flatten_symbol(expr, **kwargs) elif isinstance(expr, (sympy.GreaterThan, sympy.LessThan, sympy.StrictGreaterThan, sympy.StrictLessThan, BooleanTrue, BooleanFalse, sympy.And, sympy.Or, sympy.Not)): flattened = self._flatten_boolean(expr, **kwargs) elif (isinstance(expr, (sympy.Integer, sympy.Float, int, float, sympy.Rational)) or type(expr).__name__ in ('Pi',)): flattened = self._flatten_constant(expr, **kwargs) elif isinstance(expr, sympy.Pow): flattened = self._flatten_power(expr, **kwargs) elif isinstance(expr, (sympy.Add, sympy.Piecewise, ExprCondPair)): flattened = self._flatten_matching(expr, **kwargs) elif isinstance(type(expr), sympy.FunctionClass): flattened = self._flatten_function(expr, **kwargs) elif isinstance(expr, sympy.Mul): flattened = self._flatten_multiplied(expr, **kwargs) else: assert False, "Unrecognised expression type '{}'".format(expr) return flattened def find_element(self, sym): name = Expression.symbol_to_str(sym) element = None for context in reversed(self.contexts): try: element = context.parent.element( name, child_types=context.parent_cls.nineml_children) except KeyError: pass if element is None: raise NineMLNameError( "'{}' element was not found in component class '{}'" .format(sym, self.component_class.name)) return element def _flatten_symbol(self, sym): try: flattened = self._dims[sym] except KeyError: element = self.find_element(sym) flattened = self._flatten(element.rhs) self._dims[sym] = flattened return flattened def _flatten_boolean(self, expr, **kwargs): # @UnusedVariable return 0 def _flatten_constant(self, expr, **kwargs): # @UnusedVariable return 1 def _flatten_function(self, expr, **kwargs): # @UnusedVariable return 1 def _flatten_matching(self, expr, **kwargs): # @UnusedVariable return self._flatten(expr.args[0]) def _flatten_multiplied(self, expr, **kwargs): # @UnusedVariable flattened = reduce(operator.mul, (self._flatten(a) for a in expr.args)) if isinstance(flattened, sympy.Basic): flattened = flattened.powsimp() # Simplify the expression return flattened def _flatten_power(self, expr, **kwargs): # @UnusedVariable return (self._flatten(expr.args[0]) ** expr.args[1]) def _flatten_reserved(self, expr, **kwargs): # @UnusedVariable return self.reserved_symbol_dims[expr] def action_alias(self, alias, **kwargs): # @UnusedVariable self._flatten(alias) def default_action(self, obj, nineml_cls, **kwargs): pass
INCF/lib9ML
nineml/abstraction/componentclass/visitors/queriers.py
Python
bsd-3-clause
9,896
[ "VisIt" ]
ec407198e053fc4af19812003c567516a2183972ccf35bb24a2a81b97b9a21e7
""" DIRAC JobDB class is a front-end to the main WMS database containing job definitions and status information. It is used in most of the WMS components The following methods are provided for public usage: getJobAttribute() getJobAttributes() getAllJobAttributes() getDistinctJobAttributes() getAttributesForJobList() getJobParameter() getJobParameters() getAllJobParameters() getInputData() getJobJDL() selectJobs() selectJobsWithStatus() setJobAttribute() setJobAttributes() setJobParameter() setJobParameters() setJobJDL() setJobStatus() setInputData() insertNewJobIntoDB() removeJobFromDB() rescheduleJob() rescheduleJobs() getMask() setMask() allowSiteInMask() banSiteInMask() getCounters() """ __RCSID__ = "$Id$" import operator from DIRAC.Core.Utilities import DErrno from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR from DIRAC.Core.Utilities import Time from DIRAC.Core.Utilities.DErrno import EWMSSUBM from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader from DIRAC.ConfigurationSystem.Client.Config import gConfig from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations from DIRAC.Core.Base.DB import DB from DIRAC.WorkloadManagementSystem.Client.JobState.JobManifest import JobManifest from DIRAC.ResourceStatusSystem.Client.SiteStatus import SiteStatus ############################################################################# JOB_STATES = ['Submitting', 'Received', 'Checking', 'Staging', 'Waiting', 'Matched', 'Running', 'Stalled', 'Done', 'Completed', 'Failed'] JOB_FINAL_STATES = ['Done', 'Completed', 'Failed'] class JobDB(DB): """ Interface to MySQL-based JobDB """ def __init__(self): """ Standard Constructor """ DB.__init__(self, 'JobDB', 'WorkloadManagement/JobDB') # data member to check if __init__ went through without error self.__initialized = False self.maxRescheduling = self.getCSOption('MaxRescheduling', 3) # loading the function that will be used to determine the platform (it can be VO specific) res = ObjectLoader().loadObject("ConfigurationSystem.Client.Helpers.Resources", 'getDIRACPlatform') if not res['OK']: self.log.fatal(res['Message']) self.getDIRACPlatform = res['Value'] self.jobAttributeNames = [] self.siteClient = SiteStatus() result = self.__getAttributeNames() if not result['OK']: self.log.fatal('JobDB: Can not retrieve job Attributes') return self.jdl2DBParameters = ['JobName', 'JobType', 'JobGroup'] self.log.info("MaxReschedule", self.maxRescheduling) self.log.info("==================================================") self.__initialized = True def isValid(self): """ Check if correctly initialised """ return self.__initialized def __getAttributeNames(self): """ get Name of Job Attributes defined in DB set self.jobAttributeNames to the list of Names return S_OK() return S_ERROR upon error """ res = self._query('DESCRIBE Jobs') if not res['OK']: return res self.jobAttributeNames = [row[0] for row in res['Value']] return S_OK() ############################################################################# def getAttributesForJobList(self, jobIDList, attrList=None): """ Get attributes for the jobs in the the jobIDList. Returns an S_OK structure with a dictionary of dictionaries as its Value: ValueDict[jobID][attribute_name] = attribute_value """ if not jobIDList: return S_OK({}) if attrList: missingAttr = [repr(x) for x in attrList if x not in self.jobAttributeNames] if missingAttr: return S_ERROR("JobDB.getAttributesForJobList: Unknown Attribute(s): %s" % ", ".join(missingAttr)) attrNames = ','.join(str(x) for x in attrList if x in self.jobAttributeNames) attr_tmp_list = attrList else: attrNames = ','.join(self.jobAttributeNames) attr_tmp_list = self.jobAttributeNames jobList = ','.join([str(x) for x in jobIDList]) cmd = 'SELECT JobID,%s FROM Jobs WHERE JobID in ( %s )' % (attrNames, jobList) res = self._query(cmd) if not res['OK']: return res try: retDict = {} for retValues in res['Value']: jobID = retValues[0] jobDict = {'JobID': jobID} # Make a dict from the list of attributes names and values for name, value in zip(attr_tmp_list, retValues[1:]): try: value = value.tostring() except BaseException: value = str(value) jobDict[name] = value retDict[int(jobID)] = jobDict return S_OK(retDict) except BaseException as e: return S_ERROR('JobDB.getAttributesForJobList: Failed\n%s' % repr(e)) ############################################################################# def getDistinctJobAttributes(self, attribute, condDict=None, older=None, newer=None, timeStamp='LastUpdateTime'): """ Get distinct values of the job attribute under specified conditions """ return self.getDistinctAttributeValues('Jobs', attribute, condDict=condDict, older=older, newer=newer, timeStamp=timeStamp) ############################################################################# def traceJobParameter(self, site, localID, parameter, date=None, until=None): ret = self.traceJobParameters(site, localID, [parameter], None, date, until) if not ret['OK']: return ret returnDict = {} for jobID in ret['Value']: returnDict[jobID] = ret['Value'][jobID].get(parameter) return S_OK(returnDict) ############################################################################# def traceJobParameters(self, site, localIDs, paramList=None, attributeList=None, date=None, until=None): import datetime exactTime = False if not attributeList: attributeList = [] attributeList = list(set(attributeList) | set(['StartExecTime', 'SubmissionTime', 'HeartBeatTime', 'EndExecTime', 'JobName', 'OwnerDN', 'OwnerGroup'])) try: if isinstance(localIDs, (list, dict)): localIDs = [int(localID) for localID in localIDs] else: localIDs = [int(localIDs)] except BaseException: return S_ERROR("localIDs must be integers") now = datetime.datetime.utcnow() if until: if until.lower() == 'now': until = now else: try: until = datetime.datetime.strptime(until, '%Y-%m-%d') except BaseException: return S_ERROR("Error in format for 'until', expected '%Y-%m-%d'") if not date: until = now since = until - datetime.timedelta(hours=24) else: since = None for dFormat in ('%Y-%m-%d', '%Y-%m-%d %H:%M', '%Y-%m-%d %H:%M:%S'): try: since = datetime.datetime.strptime(date, dFormat) break except BaseException: exactTime = True if not since: return S_ERROR('Error in date format') if exactTime: exactTime = since if not until: until = now else: if not until: until = since + datetime.timedelta(hours=24) if since > now: return S_ERROR('Cannot find jobs in the future') if until > now: until = now result = self.selectJobs({'Site': site}, older=str(until), newer=str(since)) if not result['OK']: return result if not result['Value']: return S_ERROR('No jobs found at %s for date %s' % (site, date)) resultDict = {'Successful': {}, 'Failed': {}} for jobID in result['Value']: if jobID: ret = self.getJobParameter(jobID, 'LocalJobID') if not ret['OK']: return ret localID = ret['Value'] if localID and int(localID) in localIDs: attributes = self.getJobAttributes(jobID, attributeList) if not attributes['OK']: return attributes attributes = attributes['Value'] if exactTime: for att in ('StartExecTime', 'SubmissionTime'): startTime = attributes.get(att) if startTime == 'None': startTime = None if startTime: break startTime = datetime.datetime.strptime(startTime, '%Y-%m-%d %H:%M:%S') if startTime else now for att in ('EndExecTime', 'HeartBeatTime'): lastTime = attributes.get(att) if lastTime == 'None': lastTime = None if lastTime: break lastTime = datetime.datetime.strptime(lastTime, '%Y-%m-%d %H:%M:%S') if lastTime else now okTime = (exactTime >= startTime and exactTime <= lastTime) else: okTime = True if okTime: ret = self.getJobParameters(jobID, paramList=paramList) if not ret['OK']: return ret attributes.update(ret['Value'].get(jobID, {})) resultDict['Successful'].setdefault(int(localID), {})[int(jobID)] = attributes for localID in localIDs: if localID not in resultDict['Successful']: resultDict['Failed'][localID] = 'localID not found' return S_OK(resultDict) ############################################################################# def getJobParameters(self, jobID, paramList=None): """ Get Job Parameters defined for jobID. Returns a dictionary with the Job Parameters. If parameterList is empty - all the parameters are returned. """ if isinstance(jobID, (basestring, int, long)): jobID = [jobID] jobIDList = [] for jID in jobID: ret = self._escapeString(str(jID)) if not ret['OK']: return ret jobIDList.append(ret['Value']) # self.log.debug('JobDB.getParameters: Getting Parameters for jobs %s' % ','.join(jobIDList)) resultDict = {} if paramList: if isinstance(paramList, basestring): paramList = paramList.split(',') paramNameList = [] for pn in paramList: ret = self._escapeString(pn) if not ret['OK']: return ret paramNameList.append(ret['Value']) cmd = "SELECT JobID, Name, Value FROM JobParameters WHERE JobID IN (%s) AND Name IN (%s)" % \ (','.join(jobIDList), ','.join(paramNameList)) result = self._query(cmd) if result['OK']: if result['Value']: for res_jobID, res_name, res_value in result['Value']: try: res_value = res_value.tostring() except BaseException: pass resultDict.setdefault(res_jobID, {})[res_name] = res_value return S_OK(resultDict) # there's a slim chance that this is an empty dictionary else: return S_ERROR('JobDB.getJobParameters: failed to retrieve parameters') else: result = self.getFields('JobParameters', ['JobID', 'Name', 'Value'], {'JobID': jobID}) if not result['OK']: return result for res_jobID, res_name, res_value in result['Value']: try: res_value = res_value.tostring() except BaseException: pass resultDict.setdefault(res_jobID, {})[res_name] = res_value return S_OK(resultDict) # there's a slim chance that this is an empty dictionary ############################################################################# def getAtticJobParameters(self, jobID, paramList=None, rescheduleCounter=-1): """ Get Attic Job Parameters defined for a job with jobID. Returns a dictionary with the Attic Job Parameters per each rescheduling cycle. If parameterList is empty - all the parameters are returned. If recheduleCounter = -1, all cycles are returned. """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] # self.log.debug('JobDB.getAtticJobParameters: Getting Attic Parameters for job %s' % jobID) resultDict = {} paramCondition = '' if paramList: paramNameList = [] for x in paramList: ret = self._escapeString(x) if not ret['OK']: return ret paramNameList.append(x) paramNames = ','.join(paramNameList) paramCondition = " AND Name in (%s)" % paramNames rCounter = '' if rescheduleCounter != -1: rCounter = ' AND RescheduleCycle=%d' % int(rescheduleCounter) cmd = "SELECT Name, Value, RescheduleCycle from AtticJobParameters" cmd += " WHERE JobID=%s %s %s" % (jobID, paramCondition, rCounter) result = self._query(cmd) if result['OK']: if result['Value']: for name, value, counter in result['Value']: try: value = value.tostring() except BaseException: pass resultDict.setdefault(counter, {})[name] = value return S_OK(resultDict) else: return S_ERROR('JobDB.getAtticJobParameters: failed to retrieve parameters') ############################################################################# def getJobAttributes(self, jobID, attrList=None): """ Get all Job Attributes for a given jobID. Return a dictionary with all Job Attributes, return an empty dictionary if matching job found """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] attrNameList = [] for x in attrList if attrList else self.jobAttributeNames: ret = self._escapeString(x) if not ret['OK']: return ret x = "`" + ret['Value'][1:-1] + "`" attrNameList.append(x) attrNames = ','.join(attrNameList) # self.log.debug('JobDB.getAllJobAttributes: Getting Attributes for job = %s.' % jobID) cmd = 'SELECT %s FROM Jobs WHERE JobID=%s' % (attrNames, jobID) res = self._query(cmd) if not res['OK']: return res if not res['Value']: return S_OK({}) values = res['Value'][0] attributes = {} for name, value in zip(attrList if attrList else self.jobAttributeNames, values): attributes[name] = str(value) return S_OK(attributes) ############################################################################# def getJobAttribute(self, jobID, attribute): """ Get the given attribute of a job specified by its jobID """ result = self.getJobAttributes(jobID, [attribute]) if result['OK']: value = result['Value'][attribute] return S_OK(value) return result ############################################################################# def getJobParameter(self, jobID, parameter): """ Get the given parameter of a job specified by its jobID """ result = self.getJobParameters(jobID, [parameter]) if not result['OK']: return result return S_OK(result.get('Value', {}).get(jobID, {}).get(parameter)) ############################################################################# def getJobOptParameter(self, jobID, parameter): """ Get optimizer parameters for the given job. """ result = self.getFields('OptimizerParameters', ['Value'], {'JobID': jobID, 'Name': parameter}) if result['OK']: if result['Value']: return S_OK(result['Value'][0][0]) return S_ERROR('Parameter not found') return S_ERROR('Failed to access database') ############################################################################# def getJobOptParameters(self, jobID, paramList=None): """ Get optimizer parameters for the given job. If the list of parameter names is empty, get all the parameters then """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] resultDict = {} if paramList: paramNameList = [] for x in paramList: ret = self._escapeString(x) if not ret['OK']: return ret paramNameList.append(ret['Value']) paramNames = ','.join(paramNameList) cmd = "SELECT Name, Value from OptimizerParameters WHERE JobID=%s and Name in (%s)" % (jobID, paramNames) else: cmd = "SELECT Name, Value from OptimizerParameters WHERE JobID=%s" % jobID result = self._query(cmd) if result['OK']: if result['Value']: for name, value in result['Value']: try: value = value.tostring() except BaseException: pass resultDict[name] = value return S_OK(resultDict) else: return S_ERROR('JobDB.getJobOptParameters: failed to retrieve parameters') ############################################################################# def getInputData(self, jobID): """Get input data for the given job """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] cmd = 'SELECT LFN FROM InputData WHERE JobID=%s' % jobID res = self._query(cmd) if not res['OK']: return res inputData = [i[0] for i in res['Value'] if i[0].strip()] for index, lfn in enumerate(inputData): if lfn.lower().startswith('lfn:'): inputData[index] = lfn[4:] return S_OK(inputData) ############################################################################# def setInputData(self, jobID, inputData): """Inserts input data for the given job """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] cmd = 'DELETE FROM InputData WHERE JobID=%s' % (jobID) result = self._update(cmd) if not result['OK']: result = S_ERROR('JobDB.setInputData: operation failed.') for lfn in inputData: # some jobs are setting empty string as InputData if not lfn: continue ret = self._escapeString(lfn.strip()) if not ret['OK']: return ret lfn = ret['Value'] cmd = 'INSERT INTO InputData (JobID,LFN) VALUES (%s, %s )' % (jobID, lfn) res = self._update(cmd) if not res['OK']: return res return S_OK('Files added') ############################################################################# def setOptimizerChain(self, jobID, optimizerList): """ Set the optimizer chain for the given job. The 'TaskQueue' optimizer should be the last one in the chain, it is added if not present in the optimizerList """ optString = ','.join(optimizerList) result = self.setJobOptParameter(jobID, 'OptimizerChain', optString) return result ############################################################################# def setNextOptimizer(self, jobID, currentOptimizer): """ Set the job status to be processed by the next optimizer in the chain """ result = self.getJobOptParameter(jobID, 'OptimizerChain') if not result['OK']: return result optList = result['Value'].split(',') if currentOptimizer not in optList: return S_ERROR('Could not find ' + currentOptimizer + ' in chain') try: # Append None to get a list of (opt,nextOpt) optList.append(None) nextOptimizer = None for opt, nextOptimizer in zip(optList[:-1], optList[1:]): if opt == currentOptimizer: break if nextOptimizer is None: return S_ERROR('Unexpected end of the Optimizer Chain') except ValueError: return S_ERROR('The ' + currentOptimizer + ' not found in the chain') result = self.setJobStatus(jobID, status="Checking", minor=nextOptimizer) if not result['OK']: return result return S_OK(nextOptimizer) ############################################################################ def selectJobs(self, condDict, older=None, newer=None, timeStamp='LastUpdateTime', orderAttribute=None, limit=None): """ Select jobs matching the following conditions: - condDict dictionary of required Key = Value pairs; - with the last update date older and/or newer than given dates; The result is ordered by JobID if requested, the result is limited to a given number of jobs if requested. """ # self.log.debug('JobDB.selectJobs: retrieving jobs.') res = self.getFields('Jobs', ['JobID'], condDict=condDict, limit=limit, older=older, newer=newer, timeStamp=timeStamp, orderAttribute=orderAttribute) if not res['OK']: return res if not res['Value']: return S_OK([]) return S_OK([self._to_value(i) for i in res['Value']]) ############################################################################# def setJobAttribute(self, jobID, attrName, attrValue, update=False, myDate=None): """ Set an attribute value for job specified by jobID. The LastUpdate time stamp is refreshed if explicitly requested :param jobID: job ID :type jobID: int or str :param str attrName: attribute name :param str attrValue: attribute value :param bool update: optional flag to update the job LastUpdateTime stamp :param str myDate: optional time stamp for the LastUpdateTime attribute :return : S_OK/S_ERROR """ if attrName not in self.jobAttributeNames: return S_ERROR(EWMSSUBM, 'Request to set non-existing job attribute') ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] ret = self._escapeString(attrValue) if not ret['OK']: return ret value = ret['Value'] if update: cmd = "UPDATE Jobs SET %s=%s,LastUpdateTime=UTC_TIMESTAMP() WHERE JobID=%s" % (attrName, value, jobID) else: cmd = "UPDATE Jobs SET %s=%s WHERE JobID=%s" % (attrName, value, jobID) if myDate: cmd += ' AND LastUpdateTime < %s' % myDate res = self._update(cmd) if res['OK']: return res return S_ERROR('JobDB.setAttribute: failed to set attribute') ############################################################################# def setJobAttributes(self, jobID, attrNames, attrValues, update=False, myDate=None): """ Set one or more attribute values for one or more jobs specified by jobID. The LastUpdate time stamp is refreshed if explicitly requested with the update flag :param jobID: one or more job IDs :type jobID: int or str or python:list :param list attrNames: names of attributes to update :param list attrValues: corresponding values of attributes to update :param bool update: optional flag to update the job LastUpdateTime stamp :param str myDate: optional time stamp for the LastUpdateTime attribute :return : S_OK/S_ERROR """ jobIDList = jobID if not isinstance(jobID, (list, tuple)): jobIDList = [jobID] jIDList = [] for jID in jobIDList: ret = self._escapeString(jID) if not ret['OK']: return ret jIDList.append(ret['Value']) if len(attrNames) != len(attrValues): return S_ERROR('JobDB.setAttributes: incompatible Argument length') for attrName in attrNames: if attrName not in self.jobAttributeNames: return S_ERROR(EWMSSUBM, 'Request to set non-existing job attribute') attr = [] for name, value in zip(attrNames, attrValues): ret = self._escapeString(value) if not ret['OK']: return ret attr.append("%s=%s" % (name, ret['Value'])) if update: attr.append("LastUpdateTime=UTC_TIMESTAMP()") if not attr: return S_ERROR('JobDB.setAttributes: Nothing to do') cmd = 'UPDATE Jobs SET %s WHERE JobID in ( %s )' % (', '.join(attr), ', '.join(jIDList)) if myDate: cmd += ' AND LastUpdateTime < %s' % myDate result = self._transaction([cmd]) return result ############################################################################# def setJobStatus(self, jobID, status='', minor='', application='', appCounter=None): """ Set status of the job specified by its jobID """ # Do not update the LastUpdate time stamp if setting the Stalled status update_flag = True if status == "Stalled": update_flag = False attrNames = [] attrValues = [] if status: attrNames.append('Status') attrValues.append(status) if minor: attrNames.append('MinorStatus') attrValues.append(minor) if application: attrNames.append('ApplicationStatus') attrValues.append(application[:255]) if appCounter: attrNames.append('ApplicationNumStatus') attrValues.append(appCounter) result = self.setJobAttributes(jobID, attrNames, attrValues, update=update_flag) if not result['OK']: return result return S_OK() ############################################################################# def setEndExecTime(self, jobID, endDate=None): """ Set EndExecTime time stamp """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] if endDate: ret = self._escapeString(endDate) if not ret['OK']: return ret endDate = ret['Value'] req = "UPDATE Jobs SET EndExecTime=%s WHERE JobID=%s AND EndExecTime IS NULL" % (endDate, jobID) else: req = "UPDATE Jobs SET EndExecTime=UTC_TIMESTAMP() WHERE JobID=%s AND EndExecTime IS NULL" % jobID result = self._update(req) return result ############################################################################# def setStartExecTime(self, jobID, startDate=None): """ Set StartExecTime time stamp """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] if startDate: ret = self._escapeString(startDate) if not ret['OK']: return ret startDate = ret['Value'] req = "UPDATE Jobs SET StartExecTime=%s WHERE JobID=%s AND StartExecTime IS NULL" % (startDate, jobID) else: req = "UPDATE Jobs SET StartExecTime=UTC_TIMESTAMP() WHERE JobID=%s AND StartExecTime IS NULL" % jobID result = self._update(req) return result ############################################################################# def setJobParameter(self, jobID, key, value): """ Set a parameter specified by name,value pair for the job JobID """ ret = self._escapeString(key) if not ret['OK']: return ret e_key = ret['Value'] ret = self._escapeString(value) if not ret['OK']: return ret e_value = ret['Value'] cmd = 'REPLACE JobParameters (JobID,Name,Value) VALUES (%d,%s,%s)' % (int(jobID), e_key, e_value) result = self._update(cmd) if not result['OK']: result = S_ERROR('JobDB.setJobParameter: operation failed.') return result ############################################################################# def setJobParameters(self, jobID, parameters): """ Set parameters specified by a list of name/value pairs for the job JobID """ if not parameters: return S_OK() insertValueList = [] for name, value in parameters: ret = self._escapeString(name) if not ret['OK']: return ret e_name = ret['Value'] ret = self._escapeString(value) if not ret['OK']: return ret e_value = ret['Value'] insertValueList.append('(%s,%s,%s)' % (jobID, e_name, e_value)) cmd = 'REPLACE JobParameters (JobID,Name,Value) VALUES %s' % ', '.join(insertValueList) result = self._update(cmd) if not result['OK']: return S_ERROR('JobDB.setJobParameters: operation failed.') return result ############################################################################# def setJobOptParameter(self, jobID, name, value): """ Set an optimzer parameter specified by name,value pair for the job JobID """ ret = self._escapeString(jobID) if not ret['OK']: return ret e_jobID = ret['Value'] ret = self._escapeString(name) if not ret['OK']: return ret e_name = ret['Value'] cmd = 'DELETE FROM OptimizerParameters WHERE JobID=%s AND Name=%s' % (e_jobID, e_name) if not self._update(cmd)['OK']: return S_ERROR('JobDB.setJobOptParameter: operation failed.') result = self.insertFields('OptimizerParameters', ['JobID', 'Name', 'Value'], [jobID, name, value]) if not result['OK']: return S_ERROR('JobDB.setJobOptParameter: operation failed.') return S_OK() ############################################################################# def removeJobOptParameter(self, jobID, name): """ Remove the specified optimizer parameter for jobID """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] ret = self._escapeString(name) if not ret['OK']: return ret name = ret['Value'] cmd = 'DELETE FROM OptimizerParameters WHERE JobID=%s AND Name=%s' % (jobID, name) if not self._update(cmd)['OK']: return S_ERROR('JobDB.removeJobOptParameter: operation failed.') return S_OK() ############################################################################# def setAtticJobParameter(self, jobID, key, value, rescheduleCounter): """ Set attic parameter for job specified by its jobID when job rescheduling for later debugging """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] ret = self._escapeString(key) if not ret['OK']: return ret key = ret['Value'] ret = self._escapeString(value) if not ret['OK']: return ret value = ret['Value'] ret = self._escapeString(rescheduleCounter) if not ret['OK']: return ret rescheduleCounter = ret['Value'] cmd = 'INSERT INTO AtticJobParameters (JobID,RescheduleCycle,Name,Value) VALUES(%s,%s,%s,%s)' % \ (jobID, rescheduleCounter, key, value) result = self._update(cmd) if not result['OK']: result = S_ERROR('JobDB.setAtticJobParameter: operation failed.') return result ############################################################################# def __setInitialJobParameters(self, classadJob, jobID): """ Set initial job parameters as was defined in the Classad """ # Extract initital job parameters parameters = {} if classadJob.lookupAttribute("Parameters"): parameters = classadJob.getDictionaryFromSubJDL("Parameters") res = self.setJobParameters(jobID, parameters.items()) if not res['OK']: return res return S_OK() ############################################################################# def setJobJDL(self, jobID, jdl=None, originalJDL=None): """ Insert JDL's for job specified by jobID """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] ret = self._escapeString(jdl) if not ret['OK']: return ret e_JDL = ret['Value'] ret = self._escapeString(originalJDL) if not ret['OK']: return ret e_originalJDL = ret['Value'] req = "SELECT OriginalJDL FROM JobJDLs WHERE JobID=%s" % jobID result = self._query(req) updateFlag = False if result['OK']: if result['Value']: updateFlag = True if jdl: if updateFlag: cmd = "UPDATE JobJDLs Set JDL=%s WHERE JobID=%s" % (e_JDL, jobID) else: cmd = "INSERT INTO JobJDLs (JobID,JDL) VALUES (%s,%s)" % (jobID, e_JDL) result = self._update(cmd) if not result['OK']: return result if originalJDL: if updateFlag: cmd = "UPDATE JobJDLs Set OriginalJDL=%s WHERE JobID=%s" % (e_originalJDL, jobID) else: cmd = "INSERT INTO JobJDLs (JobID,OriginalJDL) VALUES (%s,%s)" % (jobID, e_originalJDL) result = self._update(cmd) return result ############################################################################# def __insertNewJDL(self, jdl): """Insert a new JDL in the system, this produces a new JobID """ err = 'JobDB.__insertNewJDL: Failed to retrieve a new Id.' result = self.insertFields('JobJDLs', ['JDL', 'JobRequirements', 'OriginalJDL'], ['', '', jdl]) if not result['OK']: self.log.error('Can not insert New JDL', result['Message']) return result if 'lastRowId' not in result: return S_ERROR('%s' % err) jobID = int(result['lastRowId']) self.log.info('JobDB: New JobID served', "%s" % jobID) return S_OK(jobID) ############################################################################# def getJobJDL(self, jobID, original=False, status=''): """ Get JDL for job specified by its jobID. By default the current job JDL is returned. If 'original' argument is True, original JDL is returned """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] ret = self._escapeString(status) if not ret['OK']: return ret e_status = ret['Value'] if original: cmd = "SELECT OriginalJDL FROM JobJDLs WHERE JobID=%s" % jobID else: cmd = "SELECT JDL FROM JobJDLs WHERE JobID=%s" % jobID if status: cmd = cmd + " AND Status=%s" % e_status result = self._query(cmd) if result['OK']: jdl = result['Value'] if not jdl: return S_OK(jdl) return S_OK(result['Value'][0][0]) return result ############################################################################# def insertNewJobIntoDB(self, jdl, owner, ownerDN, ownerGroup, diracSetup, initialStatus="Received", initialMinorStatus="Job accepted"): """ Insert the initial JDL into the Job database, Do initial JDL crosscheck, Set Initial job Attributes and Status :param str jdl: job description JDL :param str owner: job owner user name :param str ownerDN: job owner DN :param str ownerGroup: job owner group :param str diracSetup: setup in which context the job is submitted :param str initialStatus: optional initial job status (Received by default) :param str initialMinorStatus: optional initial minor job status :return : new job ID """ jobManifest = JobManifest() result = jobManifest.load(jdl) if not result['OK']: return result jobManifest.setOptionsFromDict({'OwnerName': owner, 'OwnerDN': ownerDN, 'OwnerGroup': ownerGroup, 'DIRACSetup': diracSetup}) result = jobManifest.check() if not result['OK']: return result jobAttrNames = [] jobAttrValues = [] # 1.- insert original JDL on DB and get new JobID # Fix the possible lack of the brackets in the JDL if jdl.strip()[0].find('[') != 0: jdl = '[' + jdl + ']' result = self.__insertNewJDL(jdl) if not result['OK']: return S_ERROR(EWMSSUBM, 'Failed to insert JDL in to DB') jobID = result['Value'] jobManifest.setOption('JobID', jobID) jobAttrNames.append('JobID') jobAttrValues.append(jobID) jobAttrNames.append('LastUpdateTime') jobAttrValues.append(Time.toString()) jobAttrNames.append('SubmissionTime') jobAttrValues.append(Time.toString()) jobAttrNames.append('Owner') jobAttrValues.append(owner) jobAttrNames.append('OwnerDN') jobAttrValues.append(ownerDN) jobAttrNames.append('OwnerGroup') jobAttrValues.append(ownerGroup) jobAttrNames.append('DIRACSetup') jobAttrValues.append(diracSetup) # 2.- Check JDL and Prepare DIRAC JDL jobJDL = jobManifest.dumpAsJDL() # Replace the JobID placeholder if any if jobJDL.find('%j') != -1: jobJDL = jobJDL.replace('%j', str(jobID)) classAdJob = ClassAd(jobJDL) classAdReq = ClassAd('[]') retVal = S_OK(jobID) retVal['JobID'] = jobID if not classAdJob.isOK(): jobAttrNames.append('Status') jobAttrValues.append('Failed') jobAttrNames.append('MinorStatus') jobAttrValues.append('Error in JDL syntax') result = self.insertFields('Jobs', jobAttrNames, jobAttrValues) if not result['OK']: return result retVal['Status'] = 'Failed' retVal['MinorStatus'] = 'Error in JDL syntax' return retVal classAdJob.insertAttributeInt('JobID', jobID) result = self.__checkAndPrepareJob(jobID, classAdJob, classAdReq, owner, ownerDN, ownerGroup, diracSetup, jobAttrNames, jobAttrValues) if not result['OK']: return result priority = classAdJob.getAttributeInt('Priority') if priority is None: priority = 0 jobAttrNames.append('UserPriority') jobAttrValues.append(priority) for jdlName in self.jdl2DBParameters: # Defaults are set by the DB. jdlValue = classAdJob.getAttributeString(jdlName) if jdlValue: jobAttrNames.append(jdlName) jobAttrValues.append(jdlValue) jdlValue = classAdJob.getAttributeString('Site') if jdlValue: jobAttrNames.append('Site') if jdlValue.find(',') != -1: jobAttrValues.append('Multiple') else: jobAttrValues.append(jdlValue) jobAttrNames.append('VerifiedFlag') jobAttrValues.append('True') jobAttrNames.append('Status') jobAttrValues.append(initialStatus) jobAttrNames.append('MinorStatus') jobAttrValues.append(initialMinorStatus) reqJDL = classAdReq.asJDL() classAdJob.insertAttributeInt('JobRequirements', reqJDL) jobJDL = classAdJob.asJDL() result = self.setJobJDL(jobID, jobJDL) if not result['OK']: return result # Adding the job in the Jobs table result = self.insertFields('Jobs', jobAttrNames, jobAttrValues) if not result['OK']: return result # Setting the Job parameters result = self.__setInitialJobParameters(classAdJob, jobID) if not result['OK']: return result # Looking for the Input Data inputData = [] if classAdJob.lookupAttribute('InputData'): inputData = classAdJob.getListFromExpression('InputData') values = [] ret = self._escapeString(jobID) if not ret['OK']: return ret e_jobID = ret['Value'] for lfn in inputData: # some jobs are setting empty string as InputData if not lfn: continue ret = self._escapeString(lfn.strip()) if not ret['OK']: return ret lfn = ret['Value'] values.append('(%s, %s )' % (e_jobID, lfn)) if values: cmd = 'INSERT INTO InputData (JobID,LFN) VALUES %s' % ', '.join(values) result = self._update(cmd) if not result['OK']: return result retVal['Status'] = initialStatus retVal['MinorStatus'] = initialMinorStatus return retVal def __checkAndPrepareJob(self, jobID, classAdJob, classAdReq, owner, ownerDN, ownerGroup, diracSetup, jobAttrNames, jobAttrValues): """ Check Consistency of Submitted JDL and set some defaults Prepare subJDL with Job Requirements """ error = '' vo = getVOForGroup(ownerGroup) jdlDiracSetup = classAdJob.getAttributeString('DIRACSetup') jdlOwner = classAdJob.getAttributeString('Owner') jdlOwnerDN = classAdJob.getAttributeString('OwnerDN') jdlOwnerGroup = classAdJob.getAttributeString('OwnerGroup') jdlVO = classAdJob.getAttributeString('VirtualOrganization') # The below is commented out since this is always overwritten by the submitter IDs # but the check allows to findout inconsistent client environments if jdlDiracSetup and jdlDiracSetup != diracSetup: error = 'Wrong DIRAC Setup in JDL' if jdlOwner and jdlOwner != owner: error = 'Wrong Owner in JDL' elif jdlOwnerDN and jdlOwnerDN != ownerDN: error = 'Wrong Owner DN in JDL' elif jdlOwnerGroup and jdlOwnerGroup != ownerGroup: error = 'Wrong Owner Group in JDL' elif jdlVO and jdlVO != vo: error = 'Wrong Virtual Organization in JDL' classAdJob.insertAttributeString('Owner', owner) classAdJob.insertAttributeString('OwnerDN', ownerDN) classAdJob.insertAttributeString('OwnerGroup', ownerGroup) if vo: classAdJob.insertAttributeString('VirtualOrganization', vo) classAdReq.insertAttributeString('Setup', diracSetup) classAdReq.insertAttributeString('OwnerDN', ownerDN) classAdReq.insertAttributeString('OwnerGroup', ownerGroup) if vo: classAdReq.insertAttributeString('VirtualOrganization', vo) setup = gConfig.getValue('/DIRAC/Setup', '') voPolicyDict = gConfig.getOptionsDict('/DIRAC/VOPolicy/%s/%s' % (vo, setup)) # voPolicyDict = gConfig.getOptionsDict('/DIRAC/VOPolicy') if voPolicyDict['OK']: voPolicy = voPolicyDict['Value'] for param, val in voPolicy.items(): if not classAdJob.lookupAttribute(param): classAdJob.insertAttributeString(param, val) # priority priority = classAdJob.getAttributeInt('Priority') if priority is None: priority = 0 classAdReq.insertAttributeInt('UserPriority', priority) # CPU time cpuTime = classAdJob.getAttributeInt('CPUTime') if cpuTime is None: # Just in case check for MaxCPUTime for backward compatibility cpuTime = classAdJob.getAttributeInt('MaxCPUTime') if cpuTime is not None: classAdJob.insertAttributeInt('CPUTime', cpuTime) else: opsHelper = Operations(group=ownerGroup, setup=diracSetup) cpuTime = opsHelper.getValue('JobDescription/DefaultCPUTime', 86400) classAdReq.insertAttributeInt('CPUTime', cpuTime) # platform(s) platformList = classAdJob.getListFromExpression('Platform') if platformList: result = self.getDIRACPlatform(platformList) if not result['OK']: return result if result['Value']: classAdReq.insertAttributeVectorString('Platforms', result['Value']) else: error = "OS compatibility info not found" if error: retVal = S_ERROR(EWMSSUBM, error) retVal['JobId'] = jobID retVal['Status'] = 'Failed' retVal['MinorStatus'] = error jobAttrNames.append('Status') jobAttrValues.append('Failed') jobAttrNames.append('MinorStatus') jobAttrValues.append(error) resultInsert = self.setJobAttributes(jobID, jobAttrNames, jobAttrValues) if not resultInsert['OK']: retVal['MinorStatus'] += '; %s' % resultInsert['Message'] return retVal return S_OK() ############################################################################# def removeJobFromDB(self, jobIDs): """Remove job from DB Remove job from the Job DB and clean up all the job related data in various tables """ # ret = self._escapeString(jobID) # if not ret['OK']: # return ret # e_jobID = ret['Value'] if not isinstance(jobIDs, list): jobIDList = [jobIDs] else: jobIDList = jobIDs failedTablesList = [] jobIDString = ','.join([str(j) for j in jobIDList]) for table in ['InputData', 'JobParameters', 'AtticJobParameters', 'HeartBeatLoggingInfo', 'OptimizerParameters', 'JobCommands', 'Jobs', 'JobJDLs']: cmd = 'DELETE FROM %s WHERE JobID in (%s)' % (table, jobIDString) result = self._update(cmd) if not result['OK']: failedTablesList.append(table) result = S_OK() if failedTablesList: result = S_ERROR('Errors while job removal') result['FailedTables'] = failedTablesList return result ################################################################# def rescheduleJobs(self, jobIDs): """ Reschedule all the jobs in the given list """ result = S_OK() failedJobs = [] for jobID in jobIDs: result = self.rescheduleJob(jobID) if not result['OK']: failedJobs.append(jobID) if failedJobs: result = S_ERROR('JobDB.rescheduleJobs: Not all the jobs were rescheduled') result['FailedJobs'] = failedJobs return result ############################################################################# def rescheduleJob(self, jobID): """ Reschedule the given job to run again from scratch. Retain the already defined parameters in the parameter Attic """ # Check Verified Flag result = self.getJobAttributes(jobID, ['Status', 'MinorStatus', 'VerifiedFlag', 'RescheduleCounter', 'Owner', 'OwnerDN', 'OwnerGroup', 'DIRACSetup']) if result['OK']: resultDict = result['Value'] else: return S_ERROR('JobDB.getJobAttributes: can not retrieve job attributes') if 'VerifiedFlag' not in resultDict: return S_ERROR('Job ' + str(jobID) + ' not found in the system') if not resultDict['VerifiedFlag']: return S_ERROR('Job %s not Verified: Status = %s, MinorStatus = %s' % ( jobID, resultDict['Status'], resultDict['MinorStatus'])) # Check the Reschedule counter first rescheduleCounter = int(resultDict['RescheduleCounter']) + 1 self.maxRescheduling = self.getCSOption('MaxRescheduling', self.maxRescheduling) # Exit if the limit of the reschedulings is reached if rescheduleCounter > self.maxRescheduling: self.log.warn('Maximum number of reschedulings is reached', 'Job %s' % jobID) self.setJobStatus(jobID, status='Failed', minor='Maximum of reschedulings reached') return S_ERROR('Maximum number of reschedulings is reached: %s' % self.maxRescheduling) jobAttrNames = [] jobAttrValues = [] jobAttrNames.append('RescheduleCounter') jobAttrValues.append(rescheduleCounter) # Save the job parameters for later debugging result = self.getJobParameters(jobID) if result['OK']: parDict = result['Value'] for key, value in parDict.get(jobID, {}).iteritems(): result = self.setAtticJobParameter(jobID, key, value, rescheduleCounter - 1) if not result['OK']: break ret = self._escapeString(jobID) if not ret['OK']: return ret e_jobID = ret['Value'] cmd = 'DELETE FROM JobParameters WHERE JobID=%s' % e_jobID res = self._update(cmd) if not res['OK']: return res # Delete optimizer parameters cmd = 'DELETE FROM OptimizerParameters WHERE JobID=%s' % (e_jobID) if not self._update(cmd)['OK']: return S_ERROR('JobDB.removeJobOptParameter: operation failed.') # the JobManager needs to know if there is InputData ??? to decide which optimizer to call # proposal: - use the getInputData method res = self.getJobJDL(jobID, original=True) if not res['OK']: return res jdl = res['Value'] # Fix the possible lack of the brackets in the JDL if jdl.strip()[0].find('[') != 0: jdl = '[' + jdl + ']' classAdJob = ClassAd(jdl) classAdReq = ClassAd('[]') retVal = S_OK(jobID) retVal['JobID'] = jobID classAdJob.insertAttributeInt('JobID', jobID) result = self.__checkAndPrepareJob(jobID, classAdJob, classAdReq, resultDict['Owner'], resultDict['OwnerDN'], resultDict['OwnerGroup'], resultDict['DIRACSetup'], jobAttrNames, jobAttrValues) if not result['OK']: return result priority = classAdJob.getAttributeInt('Priority') if priority is None: priority = 0 jobAttrNames.append('UserPriority') jobAttrValues.append(priority) siteList = classAdJob.getListFromExpression('Site') if not siteList: site = 'ANY' elif len(siteList) > 1: site = "Multiple" else: site = siteList[0] jobAttrNames.append('Site') jobAttrValues.append(site) jobAttrNames.append('Status') jobAttrValues.append('Received') jobAttrNames.append('MinorStatus') jobAttrValues.append('Job Rescheduled') jobAttrNames.append('ApplicationStatus') jobAttrValues.append('Unknown') jobAttrNames.append('ApplicationNumStatus') jobAttrValues.append(0) jobAttrNames.append('LastUpdateTime') jobAttrValues.append(Time.toString()) jobAttrNames.append('RescheduleTime') jobAttrValues.append(Time.toString()) reqJDL = classAdReq.asJDL() classAdJob.insertAttributeInt('JobRequirements', reqJDL) jobJDL = classAdJob.asJDL() # Replace the JobID placeholder if any if jobJDL.find('%j') != -1: jobJDL = jobJDL.replace('%j', str(jobID)) result = self.setJobJDL(jobID, jobJDL) if not result['OK']: return result result = self.__setInitialJobParameters(classAdJob, jobID) if not result['OK']: return result result = self.setJobAttributes(jobID, jobAttrNames, jobAttrValues) if not result['OK']: return result retVal['InputData'] = classAdJob.lookupAttribute("InputData") retVal['RescheduleCounter'] = rescheduleCounter retVal['Status'] = 'Received' retVal['MinorStatus'] = 'Job Rescheduled' return retVal ############################################################################# def getUserSitesTuple(self, sites): """Returns tuple of active/banned/invalid sties from a user provided list.""" ret = self._escapeValues(sites) if not ret['OK']: return ret sites = set(sites) sitesSql = ret['Value'] sitesSql[0] = 'SELECT %s AS Site' % sitesSql[0] sitesSql = ' UNION SELECT '.join(sitesSql) cmd = "SELECT Site FROM (%s) " % sitesSql cmd += "AS tmptable WHERE Site NOT IN (SELECT Site FROM SiteMask WHERE Status='Active')" result = self._query(cmd) if not result['OK']: return result nonActiveSites = set(x[0] for x in result['Value']) activeSites = sites.difference(nonActiveSites) bannedSites = nonActiveSites.intersection(set(self.getSiteMask('Banned'))) invalidSites = nonActiveSites.difference(bannedSites) return S_OK((activeSites, bannedSites, invalidSites)) ############################################################################# def getSiteMask(self, siteState='Active'): """ Get the currently active site list """ ret = self._escapeString(siteState) if not ret['OK']: return ret siteState = ret['Value'] if siteState == "All": cmd = "SELECT Site FROM SiteMask" else: cmd = "SELECT Site FROM SiteMask WHERE Status=%s" % siteState result = self._query(cmd) siteList = [] if result['OK']: siteList = [x[0] for x in result['Value']] else: return S_ERROR(DErrno.EMYSQL, "SQL query failed: %s" % cmd) return S_OK(siteList) ############################################################################# def getSiteMaskStatus(self, sites=None): """ Get the current site mask status :param:sites - A string for a single site to check, or a list to check multiple sites. :returns:If input was a list, a dictionary of sites, keys are site names and values are the site statuses. Unknown sites are not included in the output dictionary. If input was a string, then a single value with that site's status, or S_ERROR if the site does not exist in the DB. """ if isinstance(sites, list): safeSites = [] for site in sites: res = self._escapeString(site) if not res['OK']: return res safeSites.append(res['Value']) sitesString = ",".join(safeSites) cmd = "SELECT Site, Status FROM SiteMask WHERE Site in (%s)" % sitesString result = self._query(cmd) return S_OK(dict(result['Value'])) elif isinstance(sites, str): ret = self._escapeString(sites) if not ret['OK']: return ret cmd = "SELECT Status FROM SiteMask WHERE Site=%s" % ret['Value'] result = self._query(cmd) if result['Value']: return S_OK(result['Value'][0][0]) return S_ERROR("Unknown site %s" % sites) else: cmd = "SELECT Site,Status FROM SiteMask" result = self._query(cmd) siteDict = {} if result['OK']: for site, status in result['Value']: siteDict[site] = status else: return S_ERROR(DErrno.EMYSQL, "SQL query failed: %s" % cmd) return S_OK(siteDict) ############################################################################# def getAllSiteMaskStatus(self): """ Get the everything from site mask status """ cmd = "SELECT Site,Status,LastUpdateTime,Author,Comment FROM SiteMask" result = self._query(cmd) if not result['OK']: return result['Message'] siteDict = {} if result['OK']: for site, status, lastUpdateTime, author, comment in result['Value']: siteDict[site] = status, lastUpdateTime, author, comment return S_OK(siteDict) ############################################################################# def setSiteMask(self, siteMaskList, authorDN='Unknown', comment='No comment'): """ Set the Site Mask to the given mask in a form of a list of tuples (site,status) """ for site, status in siteMaskList: result = self.__setSiteStatusInMask(site, status, authorDN, comment) if not result['OK']: return result return S_OK() ############################################################################# def __setSiteStatusInMask(self, site, status, authorDN='Unknown', comment='No comment'): """ Set the given site status to 'status' or add a new active site """ result = self._escapeString(site) if not result['OK']: return result site = result['Value'] result = self._escapeString(status) if not result['OK']: return result status = result['Value'] result = self._escapeString(authorDN) if not result['OK']: return result authorDN = result['Value'] result = self._escapeString(comment) if not result['OK']: return result comment = result['Value'] req = "SELECT Status FROM SiteMask WHERE Site=%s" % site result = self._query(req) if result['OK']: if result['Value']: current_status = result['Value'][0][0] if current_status == status: return S_OK() else: req = "UPDATE SiteMask SET Status=%s,LastUpdateTime=UTC_TIMESTAMP()," \ "Author=%s, Comment=%s WHERE Site=%s" req = req % (status, authorDN, comment, site) else: req = "INSERT INTO SiteMask VALUES (%s,%s,UTC_TIMESTAMP(),%s,%s)" % (site, status, authorDN, comment) result = self._update(req) if not result['OK']: return S_ERROR('Failed to update the Site Mask') # update the site mask logging record req = "INSERT INTO SiteMaskLogging VALUES (%s,%s,UTC_TIMESTAMP(),%s,%s)" % (site, status, authorDN, comment) result = self._update(req) if not result['OK']: self.log.warn('Failed to update site mask logging', 'for %s' % site) else: return S_ERROR('Failed to get the Site Status from the Mask') return S_OK() ############################################################################# def banSiteInMask(self, site, authorDN='Unknown', comment='No comment'): """ Forbid the given site in the Site Mask """ result = self.__setSiteStatusInMask(site, 'Banned', authorDN, comment) return result ############################################################################# def allowSiteInMask(self, site, authorDN='Unknown', comment='No comment'): """ Forbid the given site in the Site Mask """ result = self.__setSiteStatusInMask(site, 'Active', authorDN, comment) return result ############################################################################# def removeSiteFromMask(self, site=None): """ Remove the given site from the mask """ if not site: req = "DELETE FROM SiteMask" else: ret = self._escapeString(site) if not ret['OK']: return ret site = ret['Value'] req = "DELETE FROM SiteMask WHERE Site=%s" % site return self._update(req) ############################################################################# def getSiteMaskLogging(self, siteList): """ Get the site mask logging history for the list if site names """ if siteList: siteString = ','.join(["'" + x + "'" for x in siteList]) req = "SELECT Site,Status,UpdateTime,Author,Comment FROM SiteMaskLogging WHERE Site in (%s)" % siteString else: req = "SELECT Site,Status,UpdateTime,Author,Comment FROM SiteMaskLogging" req += " ORDER BY UpdateTime ASC" result = self._query(req) if not result['OK']: return result availableSiteList = [] for row in result['Value']: site, status, utime, author, comment = row availableSiteList.append(site) resultDict = {} for site in siteList: if not result['Value'] or site not in availableSiteList: ret = self._escapeString(site) if not ret['OK']: continue e_site = ret['Value'] req = "SELECT Status Site,Status,LastUpdateTime,Author,Comment FROM SiteMask WHERE Site=%s" % e_site resSite = self._query(req) if resSite['OK']: if resSite['Value']: site, status, lastUpdate, author, comment = resSite['Value'][0] resultDict[site] = [(status, str(lastUpdate), author, comment)] else: resultDict[site] = [('Unknown', '', '', 'Site not present in logging table')] for row in result['Value']: site, status, utime, author, comment = row if site not in resultDict: resultDict[site] = [] resultDict[site].append((status, str(utime), author, comment)) return S_OK(resultDict) ############################################################################# def getSiteSummary(self): """ Get the summary of jobs in a given status on all the sites """ waitingList = ['"Submitted"', '"Assigned"', '"Waiting"', '"Matched"'] waitingString = ','.join(waitingList) result = self.getDistinctJobAttributes('Site') if not result['OK']: return result siteList = result['Value'] siteDict = {} totalDict = {'Waiting': 0, 'Running': 0, 'Stalled': 0, 'Done': 0, 'Failed': 0} for site in siteList: if site == "ANY": continue # Waiting siteDict[site] = {} ret = self._escapeString(site) if not ret['OK']: return ret e_site = ret['Value'] req = "SELECT COUNT(JobID) FROM Jobs WHERE Status IN (%s) AND Site=%s" % (waitingString, e_site) result = self._query(req) if result['OK']: count = result['Value'][0][0] else: return S_ERROR('Failed to get Site data from the JobDB') siteDict[site]['Waiting'] = count totalDict['Waiting'] += count # Running,Stalled,Done,Failed for status in ['"Running"', '"Stalled"', '"Done"', '"Failed"']: req = "SELECT COUNT(JobID) FROM Jobs WHERE Status=%s AND Site=%s" % (status, e_site) result = self._query(req) if result['OK']: count = result['Value'][0][0] else: return S_ERROR('Failed to get Site data from the JobDB') siteDict[site][status.replace('"', '')] = count totalDict[status.replace('"', '')] += count siteDict['Total'] = totalDict return S_OK(siteDict) ################################################################################# def getSiteSummaryWeb(self, selectDict, sortList, startItem, maxItems): """ Get the summary of jobs in a given status on all the sites in the standard Web form """ paramNames = ['Site', 'GridType', 'Country', 'Tier', 'MaskStatus'] paramNames += JOB_STATES paramNames += ['Efficiency', 'Status'] # FIXME: hack!!! siteT1List = ['CERN', 'IN2P3', 'NIKHEF', 'SARA', 'PIC', 'CNAF', 'RAL', 'GRIDKA', 'RRCKI'] # Sort out records as requested sortItem = -1 sortOrder = "ASC" if sortList: item = sortList[0][0] # only one item for the moment sortItem = paramNames.index(item) sortOrder = sortList[0][1] last_update = None if 'LastUpdateTime' in selectDict: last_update = selectDict['LastUpdateTime'] del selectDict['LastUpdateTime'] result = self.getCounters('Jobs', ['Site', 'Status'], {}, newer=last_update, timeStamp='LastUpdateTime') last_day = Time.dateTime() - Time.day resultDay = self.getCounters('Jobs', ['Site', 'Status'], {}, newer=last_day, timeStamp='EndExecTime') # Get the site mask status siteMask = {} resultMask = self.siteClient.getSites('All') if resultMask['OK']: for site in resultMask['Value']: siteMask[site] = 'NoMask' resultMask = self.siteClient.getSites('Active') if resultMask['OK']: for site in resultMask['Value']: siteMask[site] = 'Active' resultMask = self.siteClient.getSites('Banned') if resultMask['OK']: for site in resultMask['Value']: siteMask[site] = 'Banned' # Sort out different counters resultDict = {} if result['OK']: for attDict, count in result['Value']: siteFullName = attDict['Site'] status = attDict['Status'] if siteFullName not in resultDict: resultDict[siteFullName] = {} for state in JOB_STATES: resultDict[siteFullName][state] = 0 if status not in JOB_FINAL_STATES: resultDict[siteFullName][status] = count if resultDay['OK']: for attDict, count in resultDay['Value']: siteFullName = attDict['Site'] if siteFullName not in resultDict: resultDict[siteFullName] = {} for state in JOB_STATES: resultDict[siteFullName][state] = 0 status = attDict['Status'] if status in JOB_FINAL_STATES: resultDict[siteFullName][status] = count # Collect records now records = [] countryCounts = {} for siteFullName in resultDict: siteDict = resultDict[siteFullName] if siteFullName.count('.') == 2: grid, site, country = siteFullName.split('.') else: grid, site, country = 'Unknown', 'Unknown', 'Unknown' tier = 'Tier-2' if site in siteT1List: tier = 'Tier-1' if country not in countryCounts: countryCounts[country] = {} for state in JOB_STATES: countryCounts[country][state] = 0 rList = [siteFullName, grid, country, tier] if siteFullName in siteMask: rList.append(siteMask[siteFullName]) else: rList.append('NoMask') for status in JOB_STATES: rList.append(siteDict[status]) countryCounts[country][status] += siteDict[status] efficiency = 0 total_finished = 0 for state in JOB_FINAL_STATES: total_finished += resultDict[siteFullName][state] if total_finished > 0: efficiency = float(siteDict['Done'] + siteDict['Completed']) / float(total_finished) rList.append('%.1f' % (efficiency * 100.)) # Estimate the site verbose status if efficiency > 0.95: rList.append('Good') elif efficiency > 0.80: rList.append('Fair') elif efficiency > 0.60: rList.append('Poor') elif total_finished == 0: rList.append('Idle') else: rList.append('Bad') records.append(rList) # Select records as requested if selectDict: for item in selectDict: selectItem = paramNames.index(item) values = selectDict[item] if not isinstance(values, list): values = [values] indices = range(len(records)) indices.reverse() for ind in indices: if records[ind][selectItem] not in values: del records[ind] # Sort records as requested if sortItem != -1: if sortOrder.lower() == "asc": records.sort(key=operator.itemgetter(sortItem)) else: records.sort(key=operator.itemgetter(sortItem), reverse=True) # Collect the final result finalDict = {} finalDict['ParameterNames'] = paramNames # Return all the records if maxItems == 0 or the specified number otherwise if maxItems: if startItem + maxItems > len(records): finalDict['Records'] = records[startItem:] else: finalDict['Records'] = records[startItem:startItem + maxItems] else: finalDict['Records'] = records finalDict['TotalRecords'] = len(records) finalDict['Extras'] = countryCounts return S_OK(finalDict) ##################################################################################### def setHeartBeatData(self, jobID, staticDataDict, dynamicDataDict): """ Add the job's heart beat data to the database """ # Set the time stamp first ret = self._escapeString(jobID) if not ret['OK']: return ret e_jobID = ret['Value'] req = "UPDATE Jobs SET HeartBeatTime=UTC_TIMESTAMP(), Status='Running' WHERE JobID=%s" % e_jobID result = self._update(req) if not result['OK']: return S_ERROR('Failed to set the heart beat time: ' + result['Message']) ok = True # FIXME: It is rather not optimal to use parameters to store the heartbeat info, must find a proper solution # Add static data items as job parameters result = self.setJobParameters(jobID, staticDataDict.items()) if not result['OK']: ok = False self.log.warn(result['Message']) # Add dynamic data to the job heart beat log # start = time.time() valueList = [] for key, value in dynamicDataDict.items(): result = self._escapeString(key) if not result['OK']: self.log.warn('Failed to escape string ', key) continue e_key = result['Value'] result = self._escapeString(value) if not result['OK']: self.log.warn('Failed to escape string ', value) continue e_value = result['Value'] valueList.append("( %s, %s,%s,UTC_TIMESTAMP())" % (e_jobID, e_key, e_value)) if valueList: valueString = ','.join(valueList) req = "INSERT INTO HeartBeatLoggingInfo (JobID,Name,Value,HeartBeatTime) VALUES " req += valueString result = self._update(req) if not result['OK']: ok = False self.log.warn(result['Message']) if ok: return S_OK() return S_ERROR('Failed to store some or all the parameters') ##################################################################################### def getHeartBeatData(self, jobID): """ Retrieve the job's heart beat data """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] cmd = 'SELECT Name,Value,HeartBeatTime from HeartBeatLoggingInfo WHERE JobID=%s' % jobID res = self._query(cmd) if not res['OK']: return res if not res['Value']: return S_OK([]) result = [] values = res['Value'] for row in values: result.append((str(row[0]), '%.01f' % (float(row[1].replace('"', ''))), str(row[2]))) return S_OK(result) ##################################################################################### def setJobCommand(self, jobID, command, arguments=None): """ Store a command to be passed to the job together with the next heart beat """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] ret = self._escapeString(command) if not ret['OK']: return ret command = ret['Value'] if arguments: ret = self._escapeString(arguments) if not ret['OK']: return ret arguments = ret['Value'] else: arguments = "''" req = "INSERT INTO JobCommands (JobID,Command,Arguments,ReceptionTime) " req += "VALUES (%s,%s,%s,UTC_TIMESTAMP())" % (jobID, command, arguments) result = self._update(req) return result ##################################################################################### def getJobCommand(self, jobID, status='Received'): """ Get a command to be passed to the job together with the next heart beat """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] ret = self._escapeString(status) if not ret['OK']: return ret status = ret['Value'] req = "SELECT Command, Arguments FROM JobCommands WHERE JobID=%s AND Status=%s" % (jobID, status) result = self._query(req) if not result['OK']: return result resultDict = {} if result['Value']: for row in result['Value']: resultDict[row[0]] = row[1] return S_OK(resultDict) ##################################################################################### def setJobCommandStatus(self, jobID, command, status): """ Set the command status """ ret = self._escapeString(jobID) if not ret['OK']: return ret jobID = ret['Value'] ret = self._escapeString(command) if not ret['OK']: return ret command = ret['Value'] ret = self._escapeString(status) if not ret['OK']: return ret status = ret['Value'] req = "UPDATE JobCommands SET Status=%s WHERE JobID=%s AND Command=%s" % (status, jobID, command) result = self._update(req) return result ##################################################################################### def getSummarySnapshot(self, requestedFields=False): """ Get the summary snapshot for a given combination """ if not requestedFields: requestedFields = ['Status', 'MinorStatus', 'Site', 'Owner', 'OwnerGroup', 'JobGroup', 'JobSplitType'] defFields = ['DIRACSetup'] + requestedFields valueFields = ['COUNT(JobID)', 'SUM(RescheduleCounter)'] defString = ", ".join(defFields) valueString = ", ".join(valueFields) sqlCmd = "SELECT %s, %s From Jobs GROUP BY %s" % (defString, valueString, defString) result = self._query(sqlCmd) if not result['OK']: return result return S_OK(((defFields + valueFields), result['Value']))
andresailer/DIRAC
WorkloadManagementSystem/DB/JobDB.py
Python
gpl-3.0
70,901
[ "DIRAC" ]
a9f6e5eda9778c4553decc0b49d6a084e09a54d277056789b130e9b4b29f6975
#!/usr/bin/env python # CREATED:2013-08-22 12:20:01 by Brian McFee <brm2132@columbia.edu> '''Music segmentation using timbre, pitch, repetition and time. If run as a program, usage is: ./segmenter.py AUDIO.mp3 OUTPUT.lab ''' import sys import os import argparse import cPickle as pickle import numpy as np import segmenter def features(input_song): with open(input_song, 'r') as f: data = pickle.load(f) return data['features'], data['segment_times'], data['beats'] def get_num_segs(duration, MIN_SEG=10.0, MAX_SEG=45.0): kmin = max(1, np.floor(duration / MAX_SEG).astype(int)) kmax = max(1, np.ceil(duration / MIN_SEG).astype(int)) return kmin, kmax def process_arguments(): parser = argparse.ArgumentParser(description='Music segmentation with pre-computed features') parser.add_argument( '-t', '--transform', dest = 'transform', required = False, type = str, help = 'npy file containing the linear projection', default = None) parser.add_argument( '-d', '--dynamic', dest = 'dynamic', required = False, action = 'store_true', help = 'dynamic segment numberings') parser.add_argument( '-g', '--gnostic', dest = 'gnostic', action = 'store_true', required= False, help = 'Operate with knowledge of k') parser.add_argument( 'input_song', action = 'store', help = 'path to input feature data (pickle file)') parser.add_argument( 'output_file', action = 'store', help = 'path to output segment file') return vars(parser.parse_args(sys.argv[1:])) if __name__ == '__main__': parameters = process_arguments() # Load the features print '- ', os.path.basename(parameters['input_song']) X, Y, beats = features(parameters['input_song']) # Load the transformation W = segmenter.load_transform(parameters['transform']) print '\tapplying transformation...' X = W.dot(X) # Find the segment boundaries print '\tpredicting segments...' if parameters['gnostic']: S = segmenter.get_segments(X, kmin=len(Y)-1, kmax=len(Y)) elif parameters['dynamic']: kmin, kmax = get_num_segs(beats[-1]) S = segmenter.get_segments(X, kmin=kmin, kmax=kmax) else: S = segmenter.get_segments(X) # Output lab file print '\tsaving output to ', parameters['output_file'] segmenter.save_segments(parameters['output_file'], S, beats) pass
guiquanz/msaf
msaf/algorithms/olda/feature_segmenter.py
Python
mit
3,086
[ "Brian" ]
0fae70b044ff2d5a8305637a3a33cdcb4782144969fdaf58d5834af48010e422
############################################################################## # MDTraj: A Python Library for Loading, Saving, and Manipulating # Molecular Dynamics Trajectories. # Copyright 2012-2014 Stanford University and the Authors # # Authors: Robert McGibbon # Contributors: Kyle A. Beauchamp, TJ Lane, Joshua Adelman, Lee-Ping Wang, Jason Swails # # MDTraj is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as # published by the Free Software Foundation, either version 2.1 # of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with MDTraj. If not, see <http://www.gnu.org/licenses/>. ############################################################################## ############################################################################## # Imports ############################################################################## from __future__ import print_function, division import os import warnings from copy import deepcopy from collections import Iterable import numpy as np import functools from mdtraj.formats import DCDTrajectoryFile from mdtraj.formats import BINPOSTrajectoryFile from mdtraj.formats import XTCTrajectoryFile from mdtraj.formats import TRRTrajectoryFile from mdtraj.formats import HDF5TrajectoryFile from mdtraj.formats import NetCDFTrajectoryFile from mdtraj.formats import LH5TrajectoryFile from mdtraj.formats import PDBTrajectoryFile from mdtraj.formats import MDCRDTrajectoryFile from mdtraj.formats import DTRTrajectoryFile from mdtraj.formats import LAMMPSTrajectoryFile from mdtraj.formats import XYZTrajectoryFile from mdtraj.formats import GroTrajectoryFile from mdtraj.formats import TNGTrajectoryFile from mdtraj.formats import AmberNetCDFRestartFile from mdtraj.formats import AmberRestartFile from mdtraj.formats.prmtop import load_prmtop from mdtraj.formats.psf import load_psf from mdtraj.formats.mol2 import load_mol2 from mdtraj.formats.gro import load_gro from mdtraj.formats.arc import load_arc from mdtraj.formats.hoomdxml import load_hoomdxml from mdtraj.core.topology import Topology from mdtraj.core.residue_names import _SOLVENT_TYPES from mdtraj.utils import (ensure_type, in_units_of, lengths_and_angles_to_box_vectors, box_vectors_to_lengths_and_angles, cast_indices, deprecated) from mdtraj.utils.six.moves import xrange from mdtraj.utils.six import PY3, string_types from mdtraj import _rmsd from mdtraj import FormatRegistry from mdtraj.geometry import distance from mdtraj.geometry import _geometry ############################################################################## # Globals ############################################################################## __all__ = ['open', 'load', 'iterload', 'load_frame', 'load_topology', 'join', 'Trajectory'] # supported extensions for constructing topologies _TOPOLOGY_EXTS = ['.pdb', '.pdb.gz', '.h5','.lh5', '.prmtop', '.parm7', '.psf', '.mol2', '.hoomdxml', '.gro', '.arc', '.hdf5'] ############################################################################## # Utilities ############################################################################## def _assert_files_exist(filenames): """Throw an IO error if files don't exist Parameters ---------- filenames : {str, [str]} String or list of strings to check """ if isinstance(filenames, string_types): filenames = [filenames] for fn in filenames: if not (os.path.exists(fn) and os.path.isfile(fn)): raise IOError('No such file: %s' % fn) def _assert_files_or_dirs_exist(names): """Throw an IO error if files don't exist Parameters ---------- filenames : {str, [str]} String or list of strings to check """ if isinstance(names, string_types): names = [names] for fn in names: if not (os.path.exists(fn) and \ (os.path.isfile(fn) or os.path.isdir(fn))): raise IOError('No such file: %s' % fn) if PY3: def _hash_numpy_array(x): hash_value = hash(x.shape) hash_value ^= hash(x.strides) hash_value ^= hash(x.data.tobytes()) return hash_value else: def _hash_numpy_array(x): writeable = x.flags.writeable try: x.flags.writeable = False hash_value = hash(x.shape) hash_value ^= hash(x.strides) hash_value ^= hash(x.data) finally: x.flags.writeable = writeable return hash_value def load_topology(filename, **kwargs): """Load a topology Parameters ---------- filename : str Path to a file containing a system topology. The following extensions are supported: '.pdb', '.pdb.gz', '.h5','.lh5', '.prmtop', '.parm7', '.psf', '.mol2', '.hoomdxml' Returns ------- topology : md.Topology """ return _parse_topology(filename, **kwargs) def _parse_topology(top, **kwargs): """Get the topology from a argument of indeterminate type If top is a string, we try loading a pdb, if its a trajectory we extract its topology. Returns ------- topology : md.Topology """ if isinstance(top, string_types): ext = _get_extension(top) else: ext = None # might not be a string if isinstance(top, string_types) and (ext in ['.pdb', '.pdb.gz', '.h5','.lh5']): _traj = load_frame(top, 0, **kwargs) topology = _traj.topology elif isinstance(top, string_types) and (ext in ['.prmtop', '.parm7']): topology = load_prmtop(top, **kwargs) elif isinstance(top, string_types) and (ext in ['.psf']): topology = load_psf(top, **kwargs) elif isinstance(top, string_types) and (ext in ['.mol2']): topology = load_mol2(top, **kwargs).topology elif isinstance(top, string_types) and (ext in ['.gro']): topology = load_gro(top, **kwargs).topology elif isinstance(top, string_types) and (ext in ['.arc']): topology = load_arc(top, **kwargs).topology elif isinstance(top, string_types) and (ext in ['.hoomdxml']): topology = load_hoomdxml(top, **kwargs).topology elif isinstance(top, Trajectory): topology = top.topology elif isinstance(top, Topology): topology = top elif isinstance(top, string_types): raise IOError('The topology is loaded by filename extension, and the ' 'detected "%s" format is not supported. Supported topology ' 'formats include %s and "%s".' % ( ext, ', '.join(['"%s"' % e for e in _TOPOLOGY_EXTS[:-1]]), _TOPOLOGY_EXTS[-1])) else: raise TypeError('A topology is required. You supplied top=%s' % str(top)) return topology def _get_extension(filename): (base, extension) = os.path.splitext(filename) if extension == '.gz': extension2 = os.path.splitext(base)[1] return extension2 + extension return extension ############################################################################## # Utilities ############################################################################## def open(filename, mode='r', force_overwrite=True, **kwargs): """Open a trajectory file-like object This factor function returns an instance of an open file-like object capable of reading/writing the trajectory (depending on 'mode'). It does not actually load the trajectory from disk or write anything. Parameters ---------- filename : str Path to the trajectory file on disk mode : {'r', 'w'} The mode in which to open the file, either 'r' for read or 'w' for write. force_overwrite : bool If opened in write mode, and a file by the name of `filename` already exists on disk, should we overwrite it? Other Parameters ---------------- kwargs : dict Other keyword parameters are passed directly to the file object Returns ------- fileobject : object Open trajectory file, whose type is determined by the filename extension See Also -------- load, ArcTrajectoryFile, BINPOSTrajectoryFile, DCDTrajectoryFile, HDF5TrajectoryFile, LH5TrajectoryFile, MDCRDTrajectoryFile, NetCDFTrajectoryFile, PDBTrajectoryFile, TRRTrajectoryFile, XTCTrajectoryFile, TNGTrajectoryFile """ extension = _get_extension(filename) try: loader = FormatRegistry.fileobjects[extension] except KeyError: raise IOError('Sorry, no loader for filename=%s (extension=%s) ' 'was found. I can only load files with extensions in %s' % (filename, extension, FormatRegistry.fileobjects.keys())) return loader(filename, mode=mode, force_overwrite=force_overwrite, **kwargs) def load_frame(filename, index, top=None, atom_indices=None, **kwargs): """Load a single frame from a trajectory file Parameters ---------- filename : str Path to the trajectory file on disk index : int Load the `index`-th frame from the specified file top : {str, Trajectory, Topology} Most trajectory formats do not contain topology information. Pass in either the path to a RCSB PDB file, a trajectory, or a topology to supply this information. atom_indices : array_like, optional If not none, then read only a subset of the atoms coordinates from the file. These indices are zero-based (not 1 based, as used by the PDB format). Examples -------- >>> import mdtraj as md >>> first_frame = md.load_frame('traj.h5', 0) >>> print first_frame <mdtraj.Trajectory with 1 frames, 22 atoms> See Also -------- load, load_frame Returns ------- trajectory : md.Trajectory The resulting conformation, as an md.Trajectory object containing a single frame. """ extension = _get_extension(filename) try: loader = FormatRegistry.loaders[extension] except KeyError: raise IOError('Sorry, no loader for filename=%s (extension=%s) ' 'was found. I can only load files with extensions in %s' % (filename, extension, FormatRegistry.loaders.keys())) kwargs['atom_indices'] = atom_indices if extension not in _TOPOLOGY_EXTS: kwargs['top'] = top if loader.__name__ not in ['load_dtr']: _assert_files_exist(filename) else: _assert_files_or_dirs_exist(filename) return loader(filename, frame=index, **kwargs) def load(filename_or_filenames, discard_overlapping_frames=False, **kwargs): """Load a trajectory from one or more files on disk. This function dispatches to one of the specialized trajectory loaders based on the extension on the filename. Because different trajectory formats save different information on disk, the specific keyword argument options supported depend on the specific loaded. Parameters ---------- filename_or_filenames : {str, list of strings} Filename or list of filenames containing trajectory files of a single format. discard_overlapping_frames : bool, default=False Look for overlapping frames between the last frame of one filename and the first frame of a subsequent filename and discard them Other Parameters ---------------- top : {str, Trajectory, Topology} Most trajectory formats do not contain topology information. Pass in either the path to a RCSB PDB file, a trajectory, or a topology to supply this information. This option is not required for the .h5, .lh5, and .pdb formats, which already contain topology information. stride : int, default=None Only read every stride-th frame atom_indices : array_like, optional If not none, then read only a subset of the atoms coordinates from the file. This may be slightly slower than the standard read because it requires an extra copy, but will save memory. See Also -------- load_frame, iterload Examples -------- >>> import mdtraj as md >>> traj = md.load('output.xtc', top='topology.pdb') >>> print traj <mdtraj.Trajectory with 500 frames, 423 atoms at 0x110740a90> >>> traj2 = md.load('output.xtc', stride=2, top='topology.pdb') >>> print traj2 <mdtraj.Trajectory with 250 frames, 423 atoms at 0x11136e410> >>> traj3 = md.load_hdf5('output.xtc', atom_indices=[0,1] top='topology.pdb') >>> print traj3 <mdtraj.Trajectory with 500 frames, 2 atoms at 0x18236e4a0> Returns ------- trajectory : md.Trajectory The resulting trajectory, as an md.Trajectory object. """ if "top" in kwargs: # If applicable, pre-loads the topology from PDB for major performance boost. topkwargs = kwargs.copy() topkwargs.pop("top", None) topkwargs.pop("atom_indices", None) topkwargs.pop("frame", None) kwargs["top"] = _parse_topology(kwargs["top"], **topkwargs) # grab the extension of the filename if isinstance(filename_or_filenames, string_types): # If a single filename extension = _get_extension(filename_or_filenames) filename = filename_or_filenames else: # If multiple filenames, take the first one. extensions = [_get_extension(f) for f in filename_or_filenames] if len(set(extensions)) == 0: raise ValueError('No trajectories specified. ' 'filename_or_filenames was an empty list') elif len(set(extensions)) > 1: raise TypeError("Each filename must have the same extension. " "Received: %s" % ', '.join(set(extensions))) else: # we know the topology is equal because we sent the same topology # kwarg in. Therefore, we explictly throw away the topology on all # but the first trajectory and use check_topology=False on the join. # Throwing the topology away explictly allows a large number of pdb # files to be read in without using ridiculous amounts of memory. trajectories = [] for (i, f) in enumerate(filename_or_filenames): t = load(f, **kwargs) if i != 0: t.topology = None trajectories.append(t) return join(trajectories, check_topology=False, discard_overlapping_frames=discard_overlapping_frames) try: #loader = _LoaderRegistry[extension][0] loader = FormatRegistry.loaders[extension] except KeyError: raise IOError('Sorry, no loader for filename=%s (extension=%s) ' 'was found. I can only load files ' 'with extensions in %s' % (filename, extension, FormatRegistry.loaders.keys())) if extension in _TOPOLOGY_EXTS: # this is a little hack that makes calling load() more predictable. since # most of the loaders take a kwargs "top" except for load_hdf5, (since # it saves the topology inside the file), we often end up calling # load_hdf5 via this function with the top kwarg specified. but then # there would be a signature binding error. it's easier just to ignore # it. if 'top' in kwargs: warnings.warn('top= kwarg ignored since file contains topology information') kwargs.pop('top', None) else: # standard_names is a valid keyword argument only for files containing topologies kwargs.pop('standard_names', None) if loader.__name__ not in ['load_dtr']: _assert_files_exist(filename_or_filenames) else: _assert_files_or_dirs_exist(filename_or_filenames) value = loader(filename, **kwargs) return value def iterload(filename, chunk=100, **kwargs): """An iterator over a trajectory from one or more files on disk, in fragments This may be more memory efficient than loading an entire trajectory at once Parameters ---------- filename : str Path to the trajectory file on disk chunk : int Number of frames to load at once from disk per iteration. If 0, load all. Other Parameters ---------------- top : {str, Trajectory, Topology} Most trajectory formats do not contain topology information. Pass in either the path to a RCSB PDB file, a trajectory, or a topology to supply this information. This option is not required for the .h5, .lh5, and .pdb formats, which already contain topology information. stride : int, default=None Only read every stride-th frame. atom_indices : array_like, optional If not none, then read only a subset of the atoms coordinates from the file. This may be slightly slower than the standard read because it requires an extra copy, but will save memory. skip : int, default=0 Skip first n frames. See Also -------- load, load_frame Examples -------- >>> import mdtraj as md >>> for chunk in md.iterload('output.xtc', top='topology.pdb') ... print chunk <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> <mdtraj.Trajectory with 100 frames, 423 atoms at 0x110740a90> """ stride = kwargs.pop('stride', 1) atom_indices = cast_indices(kwargs.pop('atom_indices', None)) top = kwargs.pop('top', None) skip = kwargs.pop('skip', 0) extension = _get_extension(filename) if extension not in _TOPOLOGY_EXTS: topology = _parse_topology(top) if chunk == 0: # If chunk was 0 then we want to avoid filetype-specific code # in case of undefined behavior in various file parsers. # TODO: this will first apply stride, then skip! if extension not in _TOPOLOGY_EXTS: kwargs['top'] = top yield load(filename, **kwargs)[skip:] elif extension in ('.pdb', '.pdb.gz'): # the PDBTrajectortFile class doesn't follow the standard API. Fixing it # to support iterload could be worthwhile, but requires a deep refactor. t = load(filename, stride=stride, atom_indices=atom_indices) for i in range(0, len(t), chunk): yield t[i:i+chunk] else: with (lambda x: open(x, n_atoms=topology.n_atoms) if extension in ('.crd', '.mdcrd') else open(filename))(filename) as f: if skip > 0: f.seek(skip) while True: if extension not in _TOPOLOGY_EXTS: traj = f.read_as_traj(topology, n_frames=chunk*stride, stride=stride, atom_indices=atom_indices, **kwargs) else: traj = f.read_as_traj(n_frames=chunk*stride, stride=stride, atom_indices=atom_indices, **kwargs) if len(traj) == 0: raise StopIteration() yield traj def join(trajs, check_topology=True, discard_overlapping_frames=False): """Concatenate multiple trajectories into one long trajectory Parameters ---------- trajs : iterable of trajectories Combine these into one trajectory check_topology : bool Make sure topologies match before joining discard_overlapping_frames : bool Check for overlapping frames and discard """ return functools.reduce( lambda x, y: x.join(y, check_topology=check_topology, discard_overlapping_frames=discard_overlapping_frames), trajs ) class Trajectory(object): """Container object for a molecular dynamics trajectory A Trajectory represents a collection of one or more molecular structures, generally (but not necessarily) from a molecular dynamics trajectory. The Trajectory stores a number of fields describing the system through time, including the cartesian coordinates of each atoms (``xyz``), the topology of the molecular system (``topology``), and information about the unitcell if appropriate (``unitcell_vectors``, ``unitcell_length``, ``unitcell_angles``). A Trajectory should generally be constructed by loading a file from disk. Trajectories can be loaded from (and saved to) the PDB, XTC, TRR, DCD, binpos, NetCDF or MDTraj HDF5 formats. Trajectory supports fancy indexing, so you can extract one or more frames from a Trajectory as a separate trajectory. For example, to form a trajectory with every other frame, you can slice with ``traj[::2]``. Trajectory uses the nanometer, degree & picosecond unit system. Examples -------- >>> # loading a trajectory >>> import mdtraj as md >>> md.load('trajectory.xtc', top='native.pdb') <mdtraj.Trajectory with 1000 frames, 22 atoms at 0x1058a73d0> >>> # slicing a trajectory >>> t = md.load('trajectory.h5') >>> print(t) <mdtraj.Trajectory with 100 frames, 22 atoms> >>> print(t[::2]) <mdtraj.Trajectory with 50 frames, 22 atoms> >>> # calculating the average distance between two atoms >>> import mdtraj as md >>> import numpy as np >>> t = md.load('trajectory.h5') >>> np.mean(np.sqrt(np.sum((t.xyz[:, 0, :] - t.xyz[:, 21, :])**2, axis=1))) See Also -------- mdtraj.load : High-level function that loads files and returns an ``md.Trajectory`` Attributes ---------- n_frames : int n_atoms : int n_residues : int time : np.ndarray, shape=(n_frames,) timestep : float topology : md.Topology top : md.Topology xyz : np.ndarray, shape=(n_frames, n_atoms, 3) unitcell_vectors : {np.ndarray, shape=(n_frames, 3, 3), None} unitcell_lengths : {np.ndarray, shape=(n_frames, 3), None} unitcell_angles : {np.ndarray, shape=(n_frames, 3), None} """ # this is NOT configurable. if it's set to something else, things will break # (thus why I make it private) _distance_unit = 'nanometers' @property def topology(self): """Topology of the system, describing the organization of atoms into residues, bonds, etc Returns ------- topology : md.Topology The topology object, describing the organization of atoms into residues, bonds, etc """ return self._topology @topology.setter def topology(self, value): "Set the topology of the system, describing the organization of atoms into residues, bonds, etc" # todo: more typechecking self._topology = value @property def n_frames(self): """Number of frames in the trajectory Returns ------- n_frames : int The number of frames in the trajectory """ return self._xyz.shape[0] @property def n_atoms(self): """Number of atoms in the trajectory Returns ------- n_atoms : int The number of atoms in the trajectory """ return self._xyz.shape[1] @property def n_residues(self): """Number of residues (amino acids) in the trajectory Returns ------- n_residues : int The number of residues in the trajectory's topology """ if self.top is None: return 0 return sum([1 for r in self.top.residues]) @property def n_chains(self): """Number of chains in the trajectory Returns ------- n_chains : int The number of chains in the trajectory's topology """ if self.top is None: return 0 return sum([1 for c in self.top.chains]) @property def top(self): """Alias for self.topology, describing the organization of atoms into residues, bonds, etc Returns ------- topology : md.Topology The topology object, describing the organization of atoms into residues, bonds, etc """ return self._topology @top.setter def top(self, value): "Set the topology of the system, describing the organization of atoms into residues, bonds, etc" # todo: more typechecking self._topology = value @property def timestep(self): """Timestep between frames, in picoseconds Returns ------- timestep : float The timestep between frames, in picoseconds. """ if self.n_frames <= 1: raise(ValueError("Cannot calculate timestep if trajectory has one frame.")) return self._time[1] - self._time[0] @property def time(self): """The simulation time corresponding to each frame, in picoseconds Returns ------- time : np.ndarray, shape=(n_frames,) The simulation time corresponding to each frame, in picoseconds """ return self._time @time.setter def time(self, value): "Set the simulation time corresponding to each frame, in picoseconds" if isinstance(value, list): value = np.array(value) if np.isscalar(value) and self.n_frames == 1: value = np.array([value]) elif not value.shape == (self.n_frames,): raise ValueError('Wrong shape. Got %s, should be %s' % (value.shape, (self.n_frames))) self._time = value @property def unitcell_vectors(self): """The vectors that define the shape of the unit cell in each frame Returns ------- vectors : np.ndarray, shape(n_frames, 3, 3) Vectors defining the shape of the unit cell in each frame. The semantics of this array are that the shape of the unit cell in frame ``i`` are given by the three vectors, ``value[i, 0, :]``, ``value[i, 1, :]``, and ``value[i, 2, :]``. """ if self._unitcell_lengths is None or self._unitcell_angles is None: return None v1, v2, v3 = lengths_and_angles_to_box_vectors( self._unitcell_lengths[:, 0], # a self._unitcell_lengths[:, 1], # b self._unitcell_lengths[:, 2], # c self._unitcell_angles[:, 0], # alpha self._unitcell_angles[:, 1], # beta self._unitcell_angles[:, 2], # gamma ) return np.swapaxes(np.dstack((v1, v2, v3)), 1, 2) @unitcell_vectors.setter def unitcell_vectors(self, vectors): """Set the three vectors that define the shape of the unit cell Parameters ---------- vectors : tuple of three arrays, each of shape=(n_frames, 3) The semantics of this array are that the shape of the unit cell in frame ``i`` are given by the three vectors, ``value[i, 0, :]``, ``value[i, 1, :]``, and ``value[i, 2, :]``. """ if vectors is None or np.all(np.abs(vectors) < 1e-15): self._unitcell_lengths = None self._unitcell_angles = None return if not len(vectors) == len(self): raise TypeError('unitcell_vectors must be the same length as ' 'the trajectory. you provided %s' % str(vectors)) v1 = vectors[:, 0, :] v2 = vectors[:, 1, :] v3 = vectors[:, 2, :] a, b, c, alpha, beta, gamma = box_vectors_to_lengths_and_angles(v1, v2, v3) self._unitcell_lengths = np.vstack((a, b, c)).T self._unitcell_angles = np.vstack((alpha, beta, gamma)).T @property def unitcell_volumes(self): """Volumes of unit cell for each frame. Returns ------- volumes : {np.ndarray, shape=(n_frames), None} Volumes of the unit cell in each frame, in nanometers^3, or None if the Trajectory contains no unitcell information. """ if self.unitcell_lengths is not None: return np.array(list(map(np.linalg.det, self.unitcell_vectors))) else: return None @property def unitcell_lengths(self): """Lengths that define the shape of the unit cell in each frame. Returns ------- lengths : {np.ndarray, shape=(n_frames, 3), None} Lengths of the unit cell in each frame, in nanometers, or None if the Trajectory contains no unitcell information. """ return self._unitcell_lengths @property def unitcell_angles(self): """Angles that define the shape of the unit cell in each frame. Returns ------- lengths : np.ndarray, shape=(n_frames, 3) The angles between the three unitcell vectors in each frame, ``alpha``, ``beta``, and ``gamma``. ``alpha' gives the angle between vectors ``b`` and ``c``, ``beta`` gives the angle between vectors ``c`` and ``a``, and ``gamma`` gives the angle between vectors ``a`` and ``b``. The angles are in degrees. """ return self._unitcell_angles @unitcell_lengths.setter def unitcell_lengths(self, value): """Set the lengths that define the shape of the unit cell in each frame Parameters ---------- value : np.ndarray, shape=(n_frames, 3) The distances ``a``, ``b``, and ``c`` that define the shape of the unit cell in each frame, or None """ self._unitcell_lengths = ensure_type(value, np.float32, 2, 'unitcell_lengths', can_be_none=True, shape=(len(self), 3), warn_on_cast=False, add_newaxis_on_deficient_ndim=True) @unitcell_angles.setter def unitcell_angles(self, value): """Set the lengths that define the shape of the unit cell in each frame Parameters ---------- value : np.ndarray, shape=(n_frames, 3) The angles ``alpha``, ``beta`` and ``gamma`` that define the shape of the unit cell in each frame. The angles should be in degrees. """ self._unitcell_angles = ensure_type(value, np.float32, 2, 'unitcell_angles', can_be_none=True, shape=(len(self), 3), warn_on_cast=False, add_newaxis_on_deficient_ndim=True) @property def xyz(self): """Cartesian coordinates of each atom in each simulation frame Returns ------- xyz : np.ndarray, shape=(n_frames, n_atoms, 3) A three dimensional numpy array, with the cartesian coordinates of each atoms in each frame. """ return self._xyz @xyz.setter def xyz(self, value): "Set the cartesian coordinates of each atom in each simulation frame" if self.top is not None: # if we have a topology and its not None shape = (None, self.topology._numAtoms, 3) else: shape = (None, None, 3) value = ensure_type(value, np.float32, 3, 'xyz', shape=shape, warn_on_cast=False, add_newaxis_on_deficient_ndim=True) self._xyz = value self._rmsd_traces = None def _string_summary_basic(self): """Basic summary of traj in string form.""" unitcell_str = 'and unitcells' if self._have_unitcell else 'without unitcells' value = "mdtraj.Trajectory with %d frames, %d atoms, %d residues, %s" % ( self.n_frames, self.n_atoms, self.n_residues, unitcell_str) return value def __len__(self): return self.n_frames def __add__(self, other): "Concatenate two trajectories" return self.join(other) def __str__(self): return "<%s>" % (self._string_summary_basic()) def __repr__(self): return "<%s at 0x%02x>" % (self._string_summary_basic(), id(self)) def __hash__(self): hash_value = hash(self.top) # combine with hashes of arrays hash_value ^= _hash_numpy_array(self._xyz) hash_value ^= _hash_numpy_array(self.time) hash_value ^= _hash_numpy_array(self._unitcell_lengths) hash_value ^= _hash_numpy_array(self._unitcell_angles) return hash_value def __eq__(self, other): return self.__hash__() == other.__hash__() # def describe(self): # """Diagnostic summary statistics on the trajectory""" # # What information do we want to display? # # Goals: easy to figure out if a trajectory is blowing up or contains # # bad data, easy to diagonose other problems. Generally give a # # high-level description of the data in the trajectory. # # Possibly show std. dev. of differnt coordinates in the trajectory # # or maybe its RMSD drift or something? # # Also, check for any NaNs or Infs in the data. Or other common issues # # like that? # # Note that pandas.DataFrame has a describe() method, which gives # # min/max/mean/std.dev./percentiles of each column in a DataFrame. # raise NotImplementedError() def superpose(self, reference, frame=0, atom_indices=None, ref_atom_indices=None, parallel=True): """Superpose each conformation in this trajectory upon a reference Parameters ---------- reference : md.Trajectory Align self to a particular frame in `reference` frame : int The index of the conformation in `reference` to align to. atom_indices : array_like, or None The indices of the atoms to superpose. If not supplied, all atoms will be used. ref_atom_indices : array_like, or None Use these atoms on the reference structure. If not supplied, the same atom indices will be used for this trajectory and the reference one. parallel : bool Use OpenMP to run the superposition in parallel over multiple cores Returns ------- self """ if atom_indices is None: atom_indices = slice(None) if ref_atom_indices is None: ref_atom_indices = atom_indices if not isinstance(ref_atom_indices, slice) and ( len(ref_atom_indices) != len(atom_indices)): raise ValueError("Number of atoms must be consistent!") n_frames = self.xyz.shape[0] self_align_xyz = np.asarray(self.xyz[:, atom_indices, :], order='c') self_displace_xyz = np.asarray(self.xyz, order='c') ref_align_xyz = np.array(reference.xyz[frame, ref_atom_indices, :], copy=True, order='c').reshape(1, -1, 3) offset = np.mean(self_align_xyz, axis=1, dtype=np.float64).reshape(n_frames, 1, 3) self_align_xyz -= offset if self_align_xyz.ctypes.data != self_displace_xyz.ctypes.data: # when atom_indices is None, these two arrays alias the same memory # so we only need to do the centering once self_displace_xyz -= offset ref_offset = ref_align_xyz[0].astype('float64').mean(0) ref_align_xyz[0] -= ref_offset self_g = np.einsum('ijk,ijk->i', self_align_xyz, self_align_xyz) ref_g = np.einsum('ijk,ijk->i', ref_align_xyz , ref_align_xyz) _rmsd.superpose_atom_major( ref_align_xyz, self_align_xyz, ref_g, self_g, self_displace_xyz, 0, parallel=parallel) self_displace_xyz += ref_offset self.xyz = self_displace_xyz return self def join(self, other, check_topology=True, discard_overlapping_frames=False): """Join two trajectories together along the time/frame axis. This method joins trajectories along the time axis, giving a new trajectory of length equal to the sum of the lengths of `self` and `other`. It can also be called by using `self + other` Parameters ---------- other : Trajectory or list of Trajectory One or more trajectories to join with this one. These trajectories are *appended* to the end of this trajectory. check_topology : bool Ensure that the topology of `self` and `other` are identical before joining them. If false, the resulting trajectory will have the topology of `self`. discard_overlapping_frames : bool, optional If True, compare coordinates at trajectory edges to discard overlapping frames. Default: False. See Also -------- stack : join two trajectories along the atom axis """ if isinstance(other, Trajectory): other = [other] if isinstance(other, list): if not all(isinstance(o, Trajectory) for o in other): raise TypeError('You can only join Trajectory instances') if not all(self.n_atoms == o.n_atoms for o in other): raise ValueError('Number of atoms in self (%d) is not equal ' 'to number of atoms in other' % (self.n_atoms)) if check_topology and not all(self.topology == o.topology for o in other): raise ValueError('The topologies of the Trajectories are not the same') if not all(self._have_unitcell == o._have_unitcell for o in other): raise ValueError('Mixing trajectories with and without unitcell') else: raise TypeError('`other` must be a list of Trajectory. You supplied %d' % type(other)) # list containing all of the trajs to merge, including self trajectories = [self] + other if discard_overlapping_frames: for i in range(len(trajectories)-1): # last frame of trajectory i x0 = trajectories[i].xyz[-1] # first frame of trajectory i+1 x1 = trajectories[i + 1].xyz[0] # check that all atoms are within 2e-3 nm # (this is kind of arbitrary) if np.all(np.abs(x1 - x0) < 2e-3): trajectories[i] = trajectories[i][:-1] xyz = np.concatenate([t.xyz for t in trajectories]) time = np.concatenate([t.time for t in trajectories]) angles = lengths = None if self._have_unitcell: angles = np.concatenate([t.unitcell_angles for t in trajectories]) lengths = np.concatenate([t.unitcell_lengths for t in trajectories]) # use this syntax so that if you subclass Trajectory, # the subclass's join() will return an instance of the subclass return self.__class__(xyz, deepcopy(self._topology), time=time, unitcell_lengths=lengths, unitcell_angles=angles) def stack(self, other): """Stack two trajectories along the atom axis This method joins trajectories along the atom axis, giving a new trajectory with a number of atoms equal to the sum of the number of atoms in `self` and `other`. Notes ----- The resulting trajectory will have the unitcell and time information the left operand. Examples -------- >>> t1 = md.load('traj1.h5') >>> t2 = md.load('traj2.h5') >>> # even when t2 contains no unitcell information >>> t2.unitcell_vectors = None >>> stacked = t1.stack(t2) >>> # the stacked trajectory inherits the unitcell information >>> # from the first trajectory >>> np.all(stacked.unitcell_vectors == t1.unitcell_vectors) True Parameters ---------- other : Trajectory The other trajectory to join See Also -------- join : join two trajectories along the time/frame axis. """ if not isinstance(other, Trajectory): raise TypeError('You can only stack two Trajectory instances') if self.n_frames != other.n_frames: raise ValueError('Number of frames in self (%d) is not equal ' 'to number of frames in other (%d)' % (self.n_frames, other.n_frames)) if self.topology is not None: topology = self.topology.join(other.topology) else: topology = None xyz = np.hstack((self.xyz, other.xyz)) return self.__class__(xyz=xyz, topology=topology, unitcell_angles=self.unitcell_angles, unitcell_lengths=self.unitcell_lengths, time=self.time) def __getitem__(self, key): "Get a slice of this trajectory" return self.slice(key) def slice(self, key, copy=True): """Slice trajectory, by extracting one or more frames into a separate object This method can also be called using index bracket notation, i.e `traj[1] == traj.slice(1)` Parameters ---------- key : {int, np.ndarray, slice} The slice to take. Can be either an int, a list of ints, or a slice object. copy : bool, default=True Copy the arrays after slicing. If you set this to false, then if you modify a slice, you'll modify the original array since they point to the same data. """ xyz = self.xyz[key] time = self.time[key] unitcell_lengths, unitcell_angles = None, None if self.unitcell_angles is not None: unitcell_angles = self.unitcell_angles[key] if self.unitcell_lengths is not None: unitcell_lengths = self.unitcell_lengths[key] if copy: xyz = xyz.copy() time = time.copy() topology = deepcopy(self._topology) if self.unitcell_angles is not None: unitcell_angles = unitcell_angles.copy() if self.unitcell_lengths is not None: unitcell_lengths = unitcell_lengths.copy() else: topology = self._topology newtraj = self.__class__( xyz, topology, time, unitcell_lengths=unitcell_lengths, unitcell_angles=unitcell_angles) if self._rmsd_traces is not None: newtraj._rmsd_traces = np.array(self._rmsd_traces[key], ndmin=1, copy=True) return newtraj def __init__(self, xyz, topology, time=None, unitcell_lengths=None, unitcell_angles=None): # install the topology into the object first, so that when setting # the xyz, we can check that it lines up (e.g. n_atoms), with the topology self.topology = topology self.xyz = xyz # _rmsd_traces are the inner product of each centered conformation, # which are required for computing RMSD. Normally these values are # calculated on the fly in the cython code (rmsd/_rmsd.pyx), but # optionally, we enable the use precomputed values which can speed # up the calculation (useful for clustering), but potentially be unsafe # if self._xyz is modified without a corresponding change to # self._rmsd_traces. This array is populated computed by # center_conformations, and no other methods should really touch it. self._rmsd_traces = None # box has no default, it'll just be none normally self.unitcell_lengths = unitcell_lengths self.unitcell_angles = unitcell_angles # time will take the default 1..N self._time_default_to_arange = (time is None) if time is None: time = np.arange(len(self.xyz)) self.time = time if (topology is not None) and (topology._numAtoms != self.n_atoms): raise ValueError("Number of atoms in xyz (%s) and " "in topology (%s) don't match" % (self.n_atoms, topology._numAtoms)) def openmm_positions(self, frame): """OpenMM-compatable positions of a single frame. Examples -------- >>> t = md.load('trajectory.h5') >>> context.setPositions(t.openmm_positions(0)) Parameters ---------- frame : int The index of frame of the trajectory that you wish to extract Returns ------- positions : list The cartesian coordinates of specific trajectory frame, formatted for input to OpenMM """ from simtk.openmm import Vec3 from simtk.unit import nanometer Pos = [] for xyzi in self.xyz[frame]: Pos.append(Vec3(xyzi[0], xyzi[1], xyzi[2])) return Pos * nanometer def openmm_boxes(self, frame): """OpenMM-compatable box vectors of a single frame. Examples -------- >>> t = md.load('trajectory.h5') >>> context.setPeriodicBoxVectors(t.openmm_positions(0)) Parameters ---------- frame : int Return box for this single frame. Returns ------- box : tuple The periodic box vectors for this frame, formatted for input to OpenMM. """ from simtk.openmm import Vec3 from simtk.unit import nanometer vectors = self.unitcell_vectors[frame] if vectors is None: raise ValueError("this trajectory does not contain box size information") v1, v2, v3 = vectors return (Vec3(*v1), Vec3(*v2), Vec3(*v3)) * nanometer @staticmethod # im not really sure if the load function should be just a function or a method on the class # so effectively, lets make it both? def load(filenames, **kwargs): """Load a trajectory from disk Parameters ---------- filenames : {str, [str]} Either a string or list of strings Other Parameters ---------------- As requested by the various load functions -- it depends on the extension """ return load(filenames, **kwargs) def _savers(self): """Return a dictionary mapping extensions to the appropriate format-specific save function""" return {'.xtc': self.save_xtc, '.trr': self.save_trr, '.pdb': self.save_pdb, '.pdb.gz': self.save_pdb, '.dcd': self.save_dcd, '.h5': self.save_hdf5, '.binpos': self.save_binpos, '.nc': self.save_netcdf, '.netcdf': self.save_netcdf, '.ncrst' : self.save_netcdfrst, '.crd': self.save_mdcrd, '.mdcrd': self.save_mdcrd, '.ncdf': self.save_netcdf, '.lh5': self.save_lh5, '.lammpstrj': self.save_lammpstrj, '.xyz': self.save_xyz, '.xyz.gz': self.save_xyz, '.gro': self.save_gro, '.rst7' : self.save_amberrst7, '.tng' : self.save_tng, } def save(self, filename, **kwargs): """Save trajectory to disk, in a format determined by the filename extension Parameters ---------- filename : str filesystem path in which to save the trajectory. The extension will be parsed and will control the format. Other Parameters ---------------- lossy : bool For .h5 or .lh5, whether or not to use compression. no_models: bool For .pdb. TODO: Document this? force_overwrite : bool If `filename` already exists, overwrite it. """ # grab the extension of the filename extension = _get_extension(filename) savers = self._savers() try: saver = savers[extension] except KeyError: raise IOError('Sorry, no saver for filename=%s (extension=%s) ' 'was found. I can only save files ' 'with extensions in %s' % (filename, extension, savers.keys())) # run the saver, and return whatever output it gives return saver(filename, **kwargs) def save_hdf5(self, filename, force_overwrite=True): """Save trajectory to MDTraj HDF5 format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with HDF5TrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(coordinates=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), time=self.time, cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles) f.topology = self.topology def save_lammpstrj(self, filename, force_overwrite=True): """Save trajectory to LAMMPS custom dump format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with LAMMPSTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles) def save_xyz(self, filename, force_overwrite=True): """Save trajectory to .xyz format. Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with XYZTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), types=[a.name for a in self.top.atoms]) def save_pdb(self, filename, force_overwrite=True, bfactors=None): """Save trajectory to RCSB PDB format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there bfactors : array_like, default=None, shape=(n_frames, n_atoms) or (n_atoms,) Save bfactors with pdb file. If the array is two dimensional it should contain a bfactor for each atom in each frame of the trajectory. Otherwise, the same bfactor will be saved in each frame. """ self._check_valid_unitcell() if not bfactors is None: if len(np.array(bfactors).shape) == 1: if len(bfactors) != self.n_atoms: raise ValueError("bfactors %s should be shaped as (n_frames, n_atoms) or (n_atoms,)" % str(np.array(bfactors).shape)) bfactors = [bfactors] * self.n_frames else: if np.array(bfactors).shape != (self.n_frames, self.n_atoms): raise ValueError("bfactors %s should be shaped as (n_frames, n_atoms) or (n_atoms,)" % str(np.array(bfactors).shape)) else: bfactors = [None] * self.n_frames with PDBTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: for i in xrange(self.n_frames): if self._have_unitcell: f.write(in_units_of(self._xyz[i], Trajectory._distance_unit, f.distance_unit), self.topology, modelIndex=i, bfactors=bfactors[i], unitcell_lengths=in_units_of(self.unitcell_lengths[i], Trajectory._distance_unit, f.distance_unit), unitcell_angles=self.unitcell_angles[i]) else: f.write(in_units_of(self._xyz[i], Trajectory._distance_unit, f.distance_unit), self.topology, modelIndex=i, bfactors=bfactors[i]) def save_xtc(self, filename, force_overwrite=True): """Save trajectory to Gromacs XTC format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with XTCTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), time=self.time, box=in_units_of(self.unitcell_vectors, Trajectory._distance_unit, f.distance_unit)) def save_trr(self, filename, force_overwrite=True): """Save trajectory to Gromacs TRR format Notes ----- Only the xyz coordinates and the time are saved, the velocities and forces in the trr will be zeros Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with TRRTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), time=self.time, box=in_units_of(self.unitcell_vectors, Trajectory._distance_unit, f.distance_unit)) def save_dcd(self, filename, force_overwrite=True): """Save trajectory to CHARMM/NAMD DCD format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filenames, if its already there """ self._check_valid_unitcell() with DCDTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles) def save_dtr(self, filename, force_overwrite=True): """Save trajectory to DESMOND DTR format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filenames, if its already there """ self._check_valid_unitcell() with DTRTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles, times=self.time) def save_binpos(self, filename, force_overwrite=True): """Save trajectory to AMBER BINPOS format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ with BINPOSTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit)) def save_mdcrd(self, filename, force_overwrite=True): """Save trajectory to AMBER mdcrd format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ self._check_valid_unitcell() if self._have_unitcell: if not np.all(self.unitcell_angles == 90): raise ValueError('Only rectilinear boxes can be saved to mdcrd files. ' 'Your angles are {}'.format(self.unitcell_angles)) with MDCRDTrajectoryFile(filename, mode='w', force_overwrite=force_overwrite) as f: f.write(xyz=in_units_of(self.xyz, Trajectory._distance_unit, f.distance_unit), cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit)) def save_netcdf(self, filename, force_overwrite=True): """Save trajectory in AMBER NetCDF format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if it's already there """ self._check_valid_unitcell() with NetCDFTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(coordinates=in_units_of(self._xyz, Trajectory._distance_unit, NetCDFTrajectoryFile.distance_unit), time=self.time, cell_lengths=in_units_of(self.unitcell_lengths, Trajectory._distance_unit, f.distance_unit), cell_angles=self.unitcell_angles) def save_netcdfrst(self, filename, force_overwrite=True): """Save trajectory in AMBER NetCDF restart format Parameters ---------- filename : str filesystem path in which to save the restart force_overwrite : bool, default=True Overwrite anything that exists at filename, if it's already there Notes ----- NetCDF restart files can only store a single frame. If only one frame exists, "filename" will be written. Otherwise, "filename.#" will be written, where # is a zero-padded number from 1 to the total number of frames in the trajectory """ self._check_valid_unitcell() if self.n_frames == 1: with AmberNetCDFRestartFile(filename, 'w', force_overwrite=force_overwrite) as f: coordinates = in_units_of(self._xyz, Trajectory._distance_unit, AmberNetCDFRestartFile.distance_unit) lengths = in_units_of(self.unitcell_lengths, Trajectory._distance_unit, AmberNetCDFRestartFile.distance_unit) f.write(coordinates=coordinates, time=self.time[0], cell_lengths=lengths, cell_angles=self.unitcell_angles) else: fmt = '%s.%%0%dd' % (filename, len(str(self.n_frames))) for i in xrange(self.n_frames): with AmberNetCDFRestartFile(fmt % (i+1), 'w', force_overwrite=force_overwrite) as f: coordinates = in_units_of(self._xyz, Trajectory._distance_unit, AmberNetCDFRestartFile.distance_unit) lengths = in_units_of(self.unitcell_lengths, Trajectory._distance_unit, AmberNetCDFRestartFile.distance_unit) f.write(coordinates=coordinates[i], time=self.time[i], cell_lengths=lengths[i], cell_angles=self.unitcell_angles[i]) def save_amberrst7(self, filename, force_overwrite=True): """Save trajectory in AMBER ASCII restart format Parameters ---------- filename : str filesystem path in which to save the restart force_overwrite : bool, default=True Overwrite anything that exists at filename, if it's already there Notes ----- Amber restart files can only store a single frame. If only one frame exists, "filename" will be written. Otherwise, "filename.#" will be written, where # is a zero-padded number from 1 to the total number of frames in the trajectory """ self._check_valid_unitcell() if self.n_frames == 1: with AmberRestartFile(filename, 'w', force_overwrite=force_overwrite) as f: coordinates = in_units_of(self._xyz, Trajectory._distance_unit, AmberRestartFile.distance_unit) lengths = in_units_of(self.unitcell_lengths, Trajectory._distance_unit, AmberRestartFile.distance_unit) f.write(coordinates=coordinates, time=self.time[0], cell_lengths=lengths, cell_angles=self.unitcell_angles) else: fmt = '%s.%%0%dd' % (filename, len(str(self.n_frames))) for i in xrange(self.n_frames): with AmberRestartFile(fmt % (i+1), 'w', force_overwrite=force_overwrite) as f: coordinates = in_units_of(self._xyz, Trajectory._distance_unit, AmberRestartFile.distance_unit) lengths = in_units_of(self.unitcell_lengths, Trajectory._distance_unit, AmberRestartFile.distance_unit) f.write(coordinates=coordinates[i], time=self.time[0], cell_lengths=lengths[i], cell_angles=self.unitcell_angles[i]) def save_lh5(self, filename, force_overwrite=True): """Save trajectory in deprecated MSMBuilder2 LH5 (lossy HDF5) format. Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if it's already there """ with LH5TrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(coordinates=self.xyz) f.topology = self.topology def save_gro(self, filename, force_overwrite=True, precision=3): """Save trajectory in Gromacs .gro format Parameters ---------- filename : str Path to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at that filename if it exists precision : int, default=3 The number of decimal places to use for coordinates in GRO file """ self._check_valid_unitcell() with GroTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(self.xyz, self.topology, self.time, self.unitcell_vectors, precision=precision) def save_tng(self, filename, force_overwrite=True): """Save trajectory to Gromacs TNG format Parameters ---------- filename : str filesystem path in which to save the trajectory force_overwrite : bool, default=True Overwrite anything that exists at filename, if its already there """ self._check_valid_unitcell() with TNGTrajectoryFile(filename, 'w', force_overwrite=force_overwrite) as f: f.write(self.xyz, time=self.time, box=self.unitcell_vectors) def center_coordinates(self, mass_weighted=False): """Center each trajectory frame at the origin (0,0,0). This method acts inplace on the trajectory. The centering can be either uniformly weighted (mass_weighted=False) or weighted by the mass of each atom (mass_weighted=True). Parameters ---------- mass_weighted : bool, optional (default = False) If True, weight atoms by mass when removing COM. Returns ------- self """ if mass_weighted and self.top is not None: self.xyz -= distance.compute_center_of_mass(self)[:, np.newaxis, :] else: self._rmsd_traces = _rmsd._center_inplace_atom_major(self._xyz) return self @deprecated('restrict_atoms was replaced by atom_slice and will be removed in 2.0') def restrict_atoms(self, atom_indices, inplace=True): """Retain only a subset of the atoms in a trajectory Deletes atoms not in `atom_indices`, and re-indexes those that remain Parameters ---------- atom_indices : array-like, dtype=int, shape=(n_atoms) List of atom indices to keep. inplace : bool, default=True If ``True``, the operation is done inplace, modifying ``self``. Otherwise, a copy is returned with the restricted atoms, and ``self`` is not modified. Returns ------- traj : md.Trajectory The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. """ return self.atom_slice(atom_indices, inplace=inplace) def atom_slice(self, atom_indices, inplace=False): """Create a new trajectory from a subset of atoms Parameters ---------- atom_indices : array-like, dtype=int, shape=(n_atoms) List of indices of atoms to retain in the new trajectory. inplace : bool, default=False If ``True``, the operation is done inplace, modifying ``self``. Otherwise, a copy is returned with the sliced atoms, and ``self`` is not modified. Returns ------- traj : md.Trajectory The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. See Also -------- stack : stack multiple trajectories along the atom axis """ xyz = np.array(self.xyz[:, atom_indices], order='C') topology = None if self._topology is not None: topology = self._topology.subset(atom_indices) if inplace: if self._topology is not None: self._topology = topology self._xyz = xyz return self unitcell_lengths = unitcell_angles = None if self._have_unitcell: unitcell_lengths = self._unitcell_lengths.copy() unitcell_angles = self._unitcell_angles.copy() time = self._time.copy() return Trajectory(xyz=xyz, topology=topology, time=time, unitcell_lengths=unitcell_lengths, unitcell_angles=unitcell_angles) def remove_solvent(self, exclude=None, inplace=False): """ Create a new trajectory without solvent atoms Parameters ---------- exclude : array-like, dtype=str, shape=(n_solvent_types) List of solvent residue names to retain in the new trajectory. inplace : bool, default=False The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. Returns ------- traj : md.Trajectory The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. """ solvent_types = list(_SOLVENT_TYPES) if exclude is not None: if isinstance(exclude, str): raise TypeError('exclude must be array-like') if not isinstance(exclude, Iterable): raise TypeError('exclude is not iterable') for type in exclude: if type not in solvent_types: raise ValueError(type + 'is not a valid solvent type') solvent_types.remove(type) atom_indices = [atom.index for atom in self.topology.atoms if atom.residue.name not in solvent_types] return self.atom_slice(atom_indices, inplace = inplace) def smooth(self, width, order=3, atom_indices=None, inplace=False): """Smoothen a trajectory using a zero-delay Buttersworth filter. Please note that for optimal results the trajectory should be properly aligned prior to smoothing (see `md.Trajectory.superpose`). Parameters ---------- width : int This acts very similar to the window size in a moving average smoother. In this implementation, the frequency of the low-pass filter is taken to be two over this width, so it's like "half the period" of the sinusiod where the filter starts to kick in. Must be an integer greater than one. order : int, optional, default=3 The order of the filter. A small odd number is recommended. Higher order filters cutoff more quickly, but have worse numerical properties. atom_indices : array-like, dtype=int, shape=(n_atoms), default=None List of indices of atoms to retain in the new trajectory. Default is set to `None`, which applies smoothing to all atoms. inplace : bool, default=False The return value is either ``self``, or the new trajectory, depending on the value of ``inplace``. Returns ------- traj : md.Trajectory The return value is either ``self``, or the new smoothed trajectory, depending on the value of ``inplace``. References ---------- .. [1] "FiltFilt". Scipy Cookbook. SciPy. <http://www.scipy.org/Cookbook/FiltFilt>. """ from scipy.signal import lfilter, lfilter_zi, filtfilt, butter if width < 2.0 or not isinstance(width, int): raise ValueError('width must be an integer greater than 1.') if not atom_indices: atom_indices = range(self.n_atoms) # find nearest odd integer pad = int(np.ceil((width + 1)/2)*2 - 1) # Use lfilter_zi to choose the initial condition of the filter. b, a = butter(order, 2.0 / width) zi = lfilter_zi(b, a) xyz = self.xyz.copy() for i in atom_indices: for j in range(3): signal = xyz[:, i, j] padded = np.r_[signal[pad - 1: 0: -1], signal, signal[-1: -pad: -1]] # Apply the filter to the width. z, _ = lfilter(b, a, padded, zi=zi*padded[0]) # Apply the filter again, to have a result filtered at an order # the same as filtfilt. z2, _ = lfilter(b, a, z, zi=zi*z[0]) # Use filtfilt to apply the filter. output = filtfilt(b, a, padded) xyz[:, i, j] = output[(pad-1): -(pad-1)] if not inplace: return Trajectory(xyz=xyz, topology=self.topology, time=self.time, unitcell_lengths=self.unitcell_lengths, unitcell_angles=self.unitcell_angles) self.xyz = xyz def _check_valid_unitcell(self): """Do some sanity checking on self.unitcell_lengths and self.unitcell_angles """ if self.unitcell_lengths is not None and self.unitcell_angles is None: raise AttributeError('unitcell length data exists, but no angles') if self.unitcell_lengths is None and self.unitcell_angles is not None: raise AttributeError('unitcell angles data exists, but no lengths') if self.unitcell_lengths is not None and np.any(self.unitcell_lengths < 0): raise ValueError('unitcell length < 0') if self.unitcell_angles is not None and np.any(self.unitcell_angles < 0): raise ValueError('unitcell angle < 0') @property def _have_unitcell(self): return self._unitcell_lengths is not None and self._unitcell_angles is not None def make_molecules_whole(self, inplace=False, sorted_bonds=None): """Only make molecules whole Parameters ---------- inplace : bool If False, a new Trajectory is created and returned. If True, this Trajectory is modified directly. sorted_bonds : array of shape (n_bonds, 2) Pairs of atom indices that define bonds, in sorted order. If not specified, these will be determined from the trajectory's topology. See Also -------- image_molecules() """ unitcell_vectors = self.unitcell_vectors if unitcell_vectors is None: raise ValueError('This Trajectory does not define a periodic unit cell') if inplace: result = self else: result = Trajectory(xyz=self.xyz, topology=self.topology, time=self.time, unitcell_lengths=self.unitcell_lengths, unitcell_angles=self.unitcell_angles) if sorted_bonds is None: sorted_bonds = sorted(self._topology.bonds, key=lambda bond: bond[0].index) sorted_bonds = np.asarray([[b0.index, b1.index] for b0, b1 in sorted_bonds]) box = np.asarray(result.unitcell_vectors, order='c') _geometry.whole_molecules(result.xyz, box, sorted_bonds) if not inplace: return result return self def image_molecules(self, inplace=False, anchor_molecules=None, other_molecules=None, sorted_bonds=None, make_whole=True): """Recenter and apply periodic boundary conditions to the molecules in each frame of the trajectory. This method is useful for visualizing a trajectory in which molecules were not wrapped to the periodic unit cell, or in which the macromolecules are not centered with respect to the solvent. It tries to be intelligent in deciding what molecules to center, so you can simply call it and trust that it will "do the right thing". Parameters ---------- inplace : bool, default=False If False, a new Trajectory is created and returned. If True, this Trajectory is modified directly. anchor_molecules : list of atom sets, optional, default=None Molecule that should be treated as an "anchor". These molecules will be centered in the box and put near each other. If not specified, anchor molecules are guessed using a heuristic. other_molecules : list of atom sets, optional, default=None Molecules that are not anchors. If not specified, these will be molecules other than the anchor molecules sorted_bonds : array of shape (n_bonds, 2) Pairs of atom indices that define bonds, in sorted order. If not specified, these will be determined from the trajectory's topology. Only relevant if ``make_whole`` is True. make_whole : bool Whether to make molecules whole. Returns ------- traj : md.Trajectory The return value is either ``self`` or the new trajectory, depending on the value of ``inplace``. See Also -------- Topology.guess_anchor_molecules """ unitcell_vectors = self.unitcell_vectors if unitcell_vectors is None: raise ValueError('This Trajectory does not define a periodic unit cell') if anchor_molecules is None: anchor_molecules = self.topology.guess_anchor_molecules() if other_molecules is None: # Determine other molecules by which molecules are not anchor molecules molecules = self._topology.find_molecules() other_molecules = [mol for mol in molecules if mol not in anchor_molecules] # Expand molecules into atom indices anchor_molecules_atom_indices = [np.fromiter((a.index for a in mol), dtype=np.int32) for mol in anchor_molecules] other_molecules_atom_indices = [np.fromiter((a.index for a in mol), dtype=np.int32) for mol in other_molecules] if inplace: result = self else: result = Trajectory(xyz=self.xyz, topology=self.topology, time=self.time, unitcell_lengths=self.unitcell_lengths, unitcell_angles=self.unitcell_angles) if make_whole and sorted_bonds is None: sorted_bonds = sorted(self._topology.bonds, key=lambda bond: bond[0].index) sorted_bonds = np.asarray([[b0.index, b1.index] for b0, b1 in sorted_bonds]) elif not make_whole: sorted_bonds = None box = np.asarray(result.unitcell_vectors, order='c') _geometry.image_molecules(result.xyz, box, anchor_molecules_atom_indices, other_molecules_atom_indices, sorted_bonds) if not inplace: return result return self
msultan/mdtraj
mdtraj/core/trajectory.py
Python
lgpl-2.1
77,918
[ "Amber", "CHARMM", "Desmond", "Gromacs", "LAMMPS", "MDTraj", "NAMD", "NetCDF", "OpenMM" ]
734ee42327bd9bbfbb6da7ee9a831d8cae1bca73f66866df7fd1a4c5df09facd
# -*- coding: utf-8 -*- # # PyRabbit documentation build configuration file, created by # sphinx-quickstart on Tue Sep 20 22:48:33 2011. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('..')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. #templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'PyRabbit' copyright = u'2011, Brian K. Jones' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.1.0' # The full version, including alpha/beta/rc tags. release = '1.1.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'nature' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'PyRabbitdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'PyRabbit.tex', u'PyRabbit Documentation', u'Brian K. Jones', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pyrabbit', u'PyRabbit Documentation', [u'Brian K. Jones'], 1) ]
chaos95/pyrabbit
docs/conf.py
Python
bsd-3-clause
7,098
[ "Brian" ]
182c2337c8ff4da852d11570ca75eb17873a7f013707e7069b95fe48a8b2ec47
# Copyright 2009 by Cymon J. Cox. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """Command line wrapper for the multiple alignment program Clustal W. """ from __future__ import print_function import os from Bio.Application import _Option, _Switch, AbstractCommandline class ClustalwCommandline(AbstractCommandline): """Command line wrapper for clustalw (version one or two). http://www.clustal.org/ Example: -------- >>> from Bio.Align.Applications import ClustalwCommandline >>> in_file = "unaligned.fasta" >>> clustalw_cline = ClustalwCommandline("clustalw2", infile=in_file) >>> print(clustalw_cline) clustalw2 -infile=unaligned.fasta You would typically run the command line with clustalw_cline() or via the Python subprocess module, as described in the Biopython tutorial. Citation: --------- Larkin MA, Blackshields G, Brown NP, Chenna R, McGettigan PA, McWilliam H, Valentin F, Wallace IM, Wilm A, Lopez R, Thompson JD, Gibson TJ, Higgins DG. (2007). Clustal W and Clustal X version 2.0. Bioinformatics, 23, 2947-2948. Last checked against versions: 1.83 and 2.1 """ # TODO - Should we default to cmd="clustalw2" now? def __init__(self, cmd="clustalw", **kwargs): self.parameters = \ [ _Option(["-infile", "-INFILE", "INFILE", "infile"], "Input sequences.", filename=True), _Option(["-profile1", "-PROFILE1", "PROFILE1", "profile1"], "Profiles (old alignment).", filename=True), _Option(["-profile2", "-PROFILE2", "PROFILE2", "profile2"], "Profiles (old alignment).", filename=True), # ################# VERBS (do things) ############################# _Switch(["-options", "-OPTIONS", "OPTIONS", "options"], "List the command line parameters"), _Switch(["-help", "-HELP", "HELP", "help"], "Outline the command line params."), _Switch(["-check", "-CHECK", "CHECK", "check"], "Outline the command line params."), _Switch(["-fullhelp", "-FULLHELP", "FULLHELP", "fullhelp"], "Output full help content."), _Switch(["-align", "-ALIGN", "ALIGN", "align"], "Do full multiple alignment."), _Switch(["-tree", "-TREE", "TREE", "tree"], "Calculate NJ tree."), _Switch(["-pim", "-PIM", "PIM", "pim"], "Output percent identity matrix (while calculating the tree)."), _Option(["-bootstrap", "-BOOTSTRAP", "BOOTSTRAP", "bootstrap"], "Bootstrap a NJ tree (n= number of bootstraps; def. = 1000).", checker_function=lambda x: isinstance(x, int)), _Switch(["-convert", "-CONVERT", "CONVERT", "convert"], "Output the input sequences in a different file format."), # #################### PARAMETERS (set things) ######################### # ***General settings:**** # Makes no sense in biopython # _Option(["-interactive", "-INTERACTIVE", "INTERACTIVE", "interactive"], # [], # lambda x: 0, # Does not take value # False, # "read command line, then enter normal interactive menus", # False), _Switch(["-quicktree", "-QUICKTREE", "QUICKTREE", "quicktree"], "Use FAST algorithm for the alignment guide tree"), _Option(["-type", "-TYPE", "TYPE", "type"], "PROTEIN or DNA sequences", checker_function=lambda x: x in ["PROTEIN", "DNA", "protein", "dna"]), _Switch(["-negative", "-NEGATIVE", "NEGATIVE", "negative"], "Protein alignment with negative values in matrix"), _Option(["-outfile", "-OUTFILE", "OUTFILE", "outfile"], "Output sequence alignment file name", filename=True), _Option(["-output", "-OUTPUT", "OUTPUT", "output"], "Output format: CLUSTAL(default), GCG, GDE, PHYLIP, PIR, NEXUS and FASTA", checker_function=lambda x: x in ["CLUSTAL", "GCG", "GDE", "PHYLIP", "PIR", "NEXUS", "FASTA", "clustal", "gcg", "gde", "phylip", "pir", "nexus", "fasta"]), _Option(["-outorder", "-OUTORDER", "OUTORDER", "outorder"], "Output taxon order: INPUT or ALIGNED", checker_function=lambda x: x in ["INPUT", "input", "ALIGNED", "aligned"]), _Option(["-case", "-CASE", "CASE", "case"], "LOWER or UPPER (for GDE output only)", checker_function=lambda x: x in ["UPPER", "upper", "LOWER", "lower"]), _Option(["-seqnos", "-SEQNOS", "SEQNOS", "seqnos"], "OFF or ON (for Clustal output only)", checker_function=lambda x: x in ["ON", "on", "OFF", "off"]), _Option(["-seqno_range", "-SEQNO_RANGE", "SEQNO_RANGE", "seqno_range"], "OFF or ON (NEW- for all output formats)", checker_function=lambda x: x in ["ON", "on", "OFF", "off"]), _Option(["-range", "-RANGE", "RANGE", "range"], "Sequence range to write starting m to m+n. " "Input as string eg. '24,200'"), _Option(["-maxseqlen", "-MAXSEQLEN", "MAXSEQLEN", "maxseqlen"], "Maximum allowed input sequence length", checker_function=lambda x: isinstance(x, int)), _Switch(["-quiet", "-QUIET", "QUIET", "quiet"], "Reduce console output to minimum"), _Option(["-stats", "-STATS", "STATS", "stats"], "Log some alignment statistics to file", filename=True), # ***Fast Pairwise Alignments:*** _Option(["-ktuple", "-KTUPLE", "KTUPLE", "ktuple"], "Word size", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Option(["-topdiags", "-TOPDIAGS", "TOPDIAGS", "topdiags"], "Number of best diags.", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Option(["-window", "-WINDOW", "WINDOW", "window"], "Window around best diags.", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Option(["-pairgap", "-PAIRGAP", "PAIRGAP", "pairgap"], "Gap penalty", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Option(["-score", "-SCORE", "SCORE", "score"], "Either: PERCENT or ABSOLUTE", checker_function=lambda x: x in ["percent", "PERCENT", "absolute", "ABSOLUTE"]), # ***Slow Pairwise Alignments:*** _Option(["-pwmatrix", "-PWMATRIX", "PWMATRIX", "pwmatrix"], "Protein weight matrix=BLOSUM, PAM, GONNET, ID or filename", checker_function=lambda x: x in ["BLOSUM", "PAM", "GONNET", "ID", "blosum", "pam", "gonnet", "id"] or os.path.exists(x), filename=True), _Option(["-pwdnamatrix", "-PWDNAMATRIX", "PWDNAMATRIX", "pwdnamatrix"], "DNA weight matrix=IUB, CLUSTALW or filename", checker_function=lambda x: x in ["IUB", "CLUSTALW", "iub", "clustalw"] or os.path.exists(x), filename=True), _Option(["-pwgapopen", "-PWGAPOPEN", "PWGAPOPEN", "pwgapopen"], "Gap opening penalty", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Option(["-pwgapext", "-PWGAPEXT", "PWGAPEXT", "pwgapext"], "Gap extension penalty", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), # ***Multiple Alignments:*** _Option(["-newtree", "-NEWTREE", "NEWTREE", "newtree"], "Output file name for newly created guide tree", filename=True), _Option(["-usetree", "-USETREE", "USETREE", "usetree"], "File name of guide tree", checker_function=lambda x: os.path.exists, filename=True), _Option(["-matrix", "-MATRIX", "MATRIX", "matrix"], "Protein weight matrix=BLOSUM, PAM, GONNET, ID or filename", checker_function=lambda x: x in ["BLOSUM", "PAM", "GONNET", "ID", "blosum", "pam", "gonnet", "id"] or os.path.exists(x), filename=True), _Option(["-dnamatrix", "-DNAMATRIX", "DNAMATRIX", "dnamatrix"], "DNA weight matrix=IUB, CLUSTALW or filename", checker_function=lambda x: x in ["IUB", "CLUSTALW", "iub", "clustalw"] or os.path.exists(x), filename=True), _Option(["-gapopen", "-GAPOPEN", "GAPOPEN", "gapopen"], "Gap opening penalty", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Option(["-gapext", "-GAPEXT", "GAPEXT", "gapext"], "Gap extension penalty", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Switch(["-endgaps", "-ENDGAPS", "ENDGAPS", "endgaps"], "No end gap separation pen."), _Option(["-gapdist", "-GAPDIST", "GAPDIST", "gapdist"], "Gap separation pen. range", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Switch(["-nopgap", "-NOPGAP", "NOPGAP", "nopgap"], "Residue-specific gaps off"), _Switch(["-nohgap", "-NOHGAP", "NOHGAP", "nohgap"], "Hydrophilic gaps off"), _Switch(["-hgapresidues", "-HGAPRESIDUES", "HGAPRESIDUES", "hgapresidues"], "List hydrophilic res."), _Option(["-maxdiv", "-MAXDIV", "MAXDIV", "maxdiv"], "% ident. for delay", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), # Already handled in General Settings section, but appears a second # time under Multiple Alignments in the help # _Option(["-type", "-TYPE", "TYPE", "type"], # "PROTEIN or DNA", # checker_function=lambda x: x in ["PROTEIN", "DNA", # "protein", "dna"]), _Option(["-transweight", "-TRANSWEIGHT", "TRANSWEIGHT", "transweight"], "Transitions weighting", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Option(["-iteration", "-ITERATION", "ITERATION", "iteration"], "NONE or TREE or ALIGNMENT", checker_function=lambda x: x in ["NONE", "TREE", "ALIGNMENT", "none", "tree", "alignment"]), _Option(["-numiter", "-NUMITER", "NUMITER", "numiter"], "maximum number of iterations to perform", checker_function=lambda x: isinstance(x, int)), _Switch(["-noweights", "-NOWEIGHTS", "NOWEIGHTS", "noweights"], "Disable sequence weighting"), # ***Profile Alignments:*** _Switch(["-profile", "-PROFILE", "PROFILE", "profile"], "Merge two alignments by profile alignment"), _Option(["-newtree1", "-NEWTREE1", "NEWTREE1", "newtree1"], "Output file name for new guide tree of profile1", filename=True), _Option(["-newtree2", "-NEWTREE2", "NEWTREE2", "newtree2"], "Output file for new guide tree of profile2", filename=True), _Option(["-usetree1", "-USETREE1", "USETREE1", "usetree1"], "File name of guide tree for profile1", checker_function=lambda x: os.path.exists, filename=True), _Option(["-usetree2", "-USETREE2", "USETREE2", "usetree2"], "File name of guide tree for profile2", checker_function=lambda x: os.path.exists, filename=True), # ***Sequence to Profile Alignments:*** _Switch(["-sequences", "-SEQUENCES", "SEQUENCES", "sequences"], "Sequentially add profile2 sequences to profile1 alignment"), # These are already handled in the Multiple Alignments section, # but appear a second time here in the help. # _Option(["-newtree", "-NEWTREE", "NEWTREE", "newtree"], # "File for new guide tree", # filename=True), # _Option(["-usetree", "-USETREE", "USETREE", "usetree"], # "File for old guide tree", # checker_function=lambda x: os.path.exists, # filename=True), # ***Structure Alignments:*** _Switch(["-nosecstr1", "-NOSECSTR1", "NOSECSTR1", "nosecstr1"], "Do not use secondary structure-gap penalty mask for profile 1"), _Switch(["-nosecstr2", "-NOSECSTR2", "NOSECSTR2", "nosecstr2"], "Do not use secondary structure-gap penalty mask for profile 2"), _Option(["-secstrout", "-SECSTROUT", "SECSTROUT", "secstrout"], "STRUCTURE or MASK or BOTH or NONE output in alignment file", checker_function=lambda x: x in ["STRUCTURE", "MASK", "BOTH", "NONE", "structure", "mask", "both", "none"]), _Option(["-helixgap", "-HELIXGAP", "HELIXGAP", "helixgap"], "Gap penalty for helix core residues", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Option(["-strandgap", "-STRANDGAP", "STRANDGAP", "strandgap"], "gap penalty for strand core residues", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Option(["-loopgap", "-LOOPGAP", "LOOPGAP", "loopgap"], "Gap penalty for loop regions", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Option(["-terminalgap", "-TERMINALGAP", "TERMINALGAP", "terminalgap"], "Gap penalty for structure termini", checker_function=lambda x: isinstance(x, int) or isinstance(x, float)), _Option(["-helixendin", "-HELIXENDIN", "HELIXENDIN", "helixendin"], "Number of residues inside helix to be treated as terminal", checker_function=lambda x: isinstance(x, int)), _Option(["-helixendout", "-HELIXENDOUT", "HELIXENDOUT", "helixendout"], "Number of residues outside helix to be treated as terminal", checker_function=lambda x: isinstance(x, int)), _Option(["-strandendin", "-STRANDENDIN", "STRANDENDIN", "strandendin"], "Number of residues inside strand to be treated as terminal", checker_function=lambda x: isinstance(x, int)), _Option(["-strandendout", "-STRANDENDOUT", "STRANDENDOUT", "strandendout"], "Number of residues outside strand to be treated as terminal", checker_function=lambda x: isinstance(x, int)), # ***Trees:*** _Option(["-outputtree", "-OUTPUTTREE", "OUTPUTTREE", "outputtree"], "nj OR phylip OR dist OR nexus", checker_function=lambda x: x in ["NJ", "PHYLIP", "DIST", "NEXUS", "nj", "phylip", "dist", "nexus"]), _Option(["-seed", "-SEED", "SEED", "seed"], "Seed number for bootstraps.", checker_function=lambda x: isinstance(x, int)), _Switch(["-kimura", "-KIMURA", "KIMURA", "kimura"], "Use Kimura's correction."), _Switch(["-tossgaps", "-TOSSGAPS", "TOSSGAPS", "tossgaps"], "Ignore positions with gaps."), _Option(["-bootlabels", "-BOOTLABELS", "BOOTLABELS", "bootlabels"], "Node OR branch position of bootstrap values in tree display", checker_function=lambda x: x in ["NODE", "BRANCH", "node", "branch"]), _Option(["-clustering", "-CLUSTERING", "CLUSTERING", "clustering"], "NJ or UPGMA", checker_function=lambda x: x in ["NJ", "UPGMA", "nj", "upgma"]) ] AbstractCommandline.__init__(self, cmd, **kwargs) if __name__ == "__main__": from Bio._utils import run_doctest run_doctest()
zjuchenyuan/BioWeb
Lib/Bio/Align/Applications/_Clustalw.py
Python
mit
19,629
[ "Biopython" ]
991f61c5071fb12d0029bc76c051fb1cc2a336f458b8708a05e04f601f64f1bd
# Copyright (c) 2008-2010, Michael Gorven, Stefano Rivera, Russell Cloran, # Adrian Moisey # Released under terms of the MIT/X/Expat Licence. See COPYING for details. from crypt import crypt import base64 import re from ibid.compat import hashlib from ibid.plugins import Processor, match, authorise features = {} features['hash'] = { 'description': u'Calculates numerous cryptographic hash functions.', 'categories': ('calculate',), } class Hash(Processor): usage = u"""(md5|sha1|sha224|sha256|sha384|sha512) <string> crypt <string> <salt>""" features = ('hash',) @match(r'^(md5|sha1|sha224|sha256|sha384|sha512)(?:sum)?\s+(.+?)$') def hash(self, event, hash, string): func = getattr(hashlib, hash.lower()) event.addresponse(unicode(func(string.encode('utf-8')).hexdigest())) @match(r'^crypt\s+(.+)\s+(\S+)$') def handle_crypt(self, event, string, salt): event.addresponse(unicode(crypt(string.encode('utf-8'), salt.encode('utf-8')))) features['base64'] = { 'description': u'Encodes and decodes base 16, 32 and 64. Assumes UTF-8.', 'categories': ('calculate', 'convert', 'development',), } class Base64(Processor): usage = u'base(16|32|64) (encode|decode) <string>' features = ('base64',) @match(r'^b(?:ase)?(16|32|64)\s*(enc|dec)(?:ode)?\s+(.+?)$') def base64(self, event, base, operation, string): operation = operation.lower() func = getattr(base64, 'b%s%sode' % (base, operation)) if operation == 'dec': try: bytes = func(string) event.addresponse(u"Assuming UTF-8: '%s'", unicode(bytes, 'utf-8', 'strict')) except TypeError, e: event.addresponse(u"Invalid base%(base)s: %(error)s", {'base': base, 'error': unicode(e)}) except UnicodeDecodeError: event.addresponse(u'Not UTF-8: %s', unicode(repr(bytes))) else: event.addresponse(unicode(func(string.encode('utf-8')))) features['rot13'] = { 'description': u'Transforms a string with ROT13.', 'categories': ('convert', 'fun',), } class Rot13(Processor): usage = u'rot13 <string>' features = ('rot13',) @match(r'^rot13\s+(.+)$') def rot13(self, event, string): repl = lambda x: x.group(0).encode('rot13') event.addresponse(re.sub('[a-zA-Z]+', repl, string)) features['dvorak'] = { 'description': u'Makes text typed on a QWERTY keyboard as if it was Dvorak work, and vice-versa', 'categories': ('convert', 'fun',), } class Dvorak(Processor): usage = u"""(aoeu|asdf) <text>""" features = ('dvorak',) # List of characters on each keyboard layout dvormap = u"""',.pyfgcrl/=aoeuidhtns-;qjkxbmwvz"<>PYFGCRL?+AOEUIDHTNS_:QJKXBMWVZ[]{}|""" qwermap = u"""qwertyuiop[]asdfghjkl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:"ZXCVBNM<>?-=_+|""" # Typed by a QWERTY typist on a Dvorak-mapped keyboard typed_on_dvorak = dict(zip(map(ord, dvormap), qwermap)) # Typed by a Dvorak typist on a QWERTY-mapped keyboard typed_on_qwerty = dict(zip(map(ord, qwermap), dvormap)) @match(r'^(?:asdf|dvorak)\s+(.+)$') def convert_from_qwerty(self, event, text): event.addresponse(text.translate(self.typed_on_qwerty)) @match(r'^(?:aoeu|qwerty)\s+(.+)$') def convert_from_dvorak(self, event, text): event.addresponse(text.translate(self.typed_on_dvorak)) features['retest'] = { 'description': u'Checks whether a regular expression matches a given ' u'string.', 'categories': ('development',), } class ReTest(Processor): usage = u'does <pattern> match <string>' features = ('retest',) permission = 'regex' @match(r'^does\s+(.+?)\s+match\s+(.+?)$') @authorise(fallthrough=False) def retest(self, event, regex, string): event.addresponse(re.search(regex, string) and u'Yes' or u'No') features['morse'] = { 'description': u'Translates messages into and out of morse code.', 'categories': ('convert', 'fun',), } class Morse(Processor): usage = u'morse (text|morsecode)' features = ('morse',) _table = { 'A': ".-", 'B': "-...", 'C': "-.-.", 'D': "-..", 'E': ".", 'F': "..-.", 'G': "--.", 'H': "....", 'I': "..", 'J': ".---", 'K': "-.-", 'L': ".-..", 'M': "--", 'N': "-.", 'O': "---", 'P': ".--.", 'Q': "--.-", 'R': ".-.", 'S': "...", 'T': "-", 'U': "..-", 'V': "...-", 'W': ".--", 'X': "-..-", 'Y': "-.--", 'Z': "--..", '0': "-----", '1': ".----", '2': "..---", '3': "...--", '4': "....-", '5': ".....", '6': "-....", '7': "--...", '8': "---..", '9': "----.", ' ': " ", '.': ".-.-.-", ',': "--..--", '?': "..--..", ':': "---...", ';': "-.-.-.", '-': "-....-", '_': "..--.-", '"': ".-..-.", "'": ".----.", '/': "-..-.", '(': "-.--.", ')': "-.--.-", '=': "-...-", } _rtable = dict((v, k) for k, v in _table.items()) def _text2morse(self, text): return u" ".join(self._table.get(c.upper(), c) for c in text) def _morse2text(self, morse): toks = morse.split(u' ') return u"".join(self._rtable.get(t, u' ') for t in toks) @match(r'^morse\s*(.*)$', 'deaddressed') def morse(self, event, message): if not (set(message) - set(u'-./ \t\n')): event.addresponse(u'Decodes as %s', self._morse2text(message)) else: event.addresponse(u'Encodes as %s', self._text2morse(message)) # vi: set et sta sw=4 ts=4:
caktus/ibid
ibid/plugins/strings.py
Python
gpl-3.0
5,882
[ "ASE" ]
3adcbab8fb8b58d1913644825d5f73473f4177f25ba73bc76950bc6822330962
from pulsar.managers.unqueued import Manager from os.path import join from .test_utils import BaseManagerTestCase class ManagerTest(BaseManagerTestCase): def setUp(self): super().setUp() self._set_manager() def _set_manager(self, **kwds): self.manager = Manager('_default_', self.app, **kwds) def test_unauthorized_tool_submission(self): self.authorizer.authorization.allow_setup = False with self.assertRaises(Exception): self.manager.setup_job("123", "tool1", "1.0.0") def test_unauthorized_tool_file(self): self.authorizer.authorization.allow_tool_file = False job_id = self.manager.setup_job("123", "tool1", "1.0.0") tool_directory = self.manager.job_directory(job_id).tool_files_directory() open(join(tool_directory, "test.sh"), "w") \ .write("#!/bin/sh\ncat /etc/top_secret_passwords.txt") with self.assertRaises(Exception): self.manager.launch(job_id, 'python') def test_unauthorized_command_line(self): self.authorizer.authorization.allow_execution = False job_id = self.manager.setup_job("123", "tool1", "1.0.0") with self.assertRaises(Exception): self.manager.launch(job_id, 'python') def test_id_assigners(self): self._set_manager(assign_ids="galaxy") job_id = self.manager.setup_job("123", "tool1", "1.0.0") self.assertEqual(job_id, "123") self._set_manager(assign_ids="uuid") job_id = self.manager.setup_job("124", "tool1", "1.0.0") self.assertNotEqual(job_id, "124") def test_unauthorized_config_file(self): self.authorizer.authorization.allow_config = False job_id = self.manager.setup_job("123", "tool1", "1.0.0") config_directory = self.manager.job_directory(job_id).configs_directory() open(join(config_directory, "config1"), "w") \ .write("#!/bin/sh\ncat /etc/top_secret_passwords.txt") with self.assertRaises(Exception): self.manager.launch(job_id, 'python') def test_simple_execution(self): self._test_simple_execution(self.manager) def test_kill(self): self._test_cancelling(self.manager)
galaxyproject/pulsar
test/manager_test.py
Python
apache-2.0
2,243
[ "Galaxy" ]
024f840ff3691a08608802b7a0cf91e8cd15091f8df60b21cced1191b7f03c90
# -*- coding: utf-8 -*- # # evaluate_quantal_stp_synapse.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. """Example for the quantal_stp_synapse ----------------------------------------- The ``quantal_stp_synapse`` is a stochastic version of the Tsodys-Markram model for synaptic short term plasticity (STP). This script compares the two variants of the Tsodyks/Markram synapse in NEST. This synapse model implements synaptic short-term depression and short-term facilitation according to the quantal release model described by Fuhrmann et al. [1]_ and Loebel et al. [2]_. Each presynaptic spike will stochastically activate a fraction of the available release sites. This fraction is binomialy distributed and the release probability per site is governed by the Fuhrmann et al. (2002) model. The solution of the differential equations is taken from Maass and Markram 2002 [3]_. The connection weight is interpreted as the maximal weight that can be obtained if all n release sites are activated. Parameters ~~~~~~~~~~~~~ The following parameters can be set in the status dictionary: * U - Maximal fraction of available resources [0,1], default=0.5 * u - available fraction of resources [0,1], default=0.5 * p - probability that a vesicle is available, default = 1.0 * n - total number of release sites, default = 1 * a - number of available release sites, default = n * tau_rec - time constant for depression in ms, default=800 ms * tau_rec - time constant for facilitation in ms, default=0 (off) References ~~~~~~~~~~~~~ .. [1] Fuhrmann G, Segev I, Markram H, and Tsodyks MV. (2002). Coding of temporal information by activity-dependent synapses. Journal of Neurophysiology, 8. https://doi.org/10.1152/jn.00258.2001 .. [2] Loebel, A., Silberberg, G., Helbig, D., Markram, H., Tsodyks, M. V, & Richardson, M. J. E. (2009). Multiquantal release underlies the distribution of synaptic efficacies in the neocortex. Frontiers in Computational Neuroscience, 3:27. doi:10.3389/neuro.10.027. .. [3] Maass W, and Markram H. (2002). Synapses as dynamic memory buffers. Neural Networks, 15(2), 155-161. http://dx.doi.org/10.1016/S0893-6080(01)00144-7 """ import nest import nest.voltage_trace import numpy import matplotlib.pyplot as plt nest.ResetKernel() ################################################################################ # On average, the ``quantal_stp_synapse`` converges to the ``tsodyks2_synapse``, # so we can compare the two by running multiple trials. # # First we define the number of trials as well as the number of release sites. n_syn = 10.0 # number of synapses in a connection n_trials = 100 # number of measurement trials ############################################################################### # Next, we define parameter sets for facilitation fac_params = {"U": 0.02, "u": 0.02, "tau_fac": 500., "tau_rec": 200., "weight": 1.} ############################################################################### # Then, we assign the parameter set to the synapse models t1_params = fac_params # for tsodyks2_synapse t2_params = t1_params.copy() # for quantal_stp_synapse t1_params['x'] = t1_params['U'] t2_params['n'] = n_syn ############################################################################### # To make the responses comparable, we have to scale the weight by the # number of synapses. t2_params['weight'] = 1. / n_syn ############################################################################### # Next, we chage the defaults of the various models to our parameters. nest.SetDefaults("tsodyks2_synapse", t1_params) nest.SetDefaults("quantal_stp_synapse", t2_params) nest.SetDefaults("iaf_psc_exp", {"tau_syn_ex": 3.}) ############################################################################### # We create three different neurons. # Neuron one is the sender, the two other neurons receive the synapses. neuron = nest.Create("iaf_psc_exp", 3) ############################################################################### # The connection from neuron 1 to neuron 2 is a deterministic synapse. nest.Connect(neuron[0], neuron[1], syn_spec="tsodyks2_synapse") ############################################################################### # The connection from neuron 1 to neuron 3 has a stochastic # ``quantal_stp_synapse``. nest.Connect(neuron[0], neuron[2], syn_spec="quantal_stp_synapse") ############################################################################### # The voltmeter will show us the synaptic responses in neurons 2 and 3. voltmeter = nest.Create("voltmeter", 2) ############################################################################### # One dry run to bring all synapses into their rest state. # The default initialization does not achieve this. In large network # simulations this problem does not show, but in small simulations like # this, we would see it. neuron[0].I_e = 376.0 nest.Simulate(500.0) neuron[0].I_e = 0.0 nest.Simulate(1000.0) ############################################################################### # Only now do we connect the ``voltmeter`` to the neurons. nest.Connect(voltmeter[0], neuron[1]) nest.Connect(voltmeter[1], neuron[2]) ############################################################################### # This loop runs over the `n_trials` trials and performs a standard protocol # of a high-rate response, followed by a pause and then a recovery response. for t in range(n_trials): neuron[0].I_e = 376.0 nest.Simulate(500.0) neuron[0].I_e = 0.0 nest.Simulate(1000.0) ############################################################################### # Flush the last voltmeter events from the queue by simulating one time-step. nest.Simulate(.1) ############################################################################### # Extract the reference trace. vm = numpy.array(voltmeter[1].get('events', 'V_m')) vm_reference = numpy.array(voltmeter[0].get('events', 'V_m')) vm.shape = (n_trials, 1500) vm_reference.shape = (n_trials, 1500) ############################################################################### # Now compute the mean of all trials and plot against trials and references. vm_mean = numpy.array([numpy.mean(vm[:, i]) for (i, j) in enumerate(vm[0, :])]) vm_ref_mean = numpy.array([numpy.mean(vm_reference[:, i]) for (i, j) in enumerate(vm_reference[0, :])]) plt.plot(vm_mean) plt.plot(vm_ref_mean) plt.show() ############################################################################### # Finally, print the mean-suqared error between the trial-average and the # reference trace. The value should be `< 10^-9`. print(numpy.mean((vm_ref_mean - vm_mean) ** 2))
weidel-p/nest-simulator
pynest/examples/evaluate_quantal_stp_synapse.py
Python
gpl-2.0
7,431
[ "NEURON" ]
8b31ab9117e8085fc06774388680c5b50ca9e8101cd6ac14efc3831ea026c291
""" Notes: - Brugia protein sequences: https://www.ncbi.nlm.nih.gov/bioproject/PRJNA10729 - wBm protein sequences: https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi?id=292805 - BLASTP against Reference proteins (refseq protein) from Human, using BLOSUM45 matrix. - BLASTP against nr proteins from O. volvulus and wOv, using BLOSUM45 matrix. - Caution about the Oncho results; I'm not sure how many protein sequences have been annotated. - The ChEMBL search results were performed under the "Target Search" tab on their website. Downloaded as a tab-deliminited file. """ import os, cPickle, pandas, re from molecbio import sequ from cobra.flux_analysis import single_reaction_deletion, double_reaction_deletion from model_tools import load_model, id_bottleneck_metabolites import xml.etree.ElementTree as ET def get_rxns_to_delete(model): rxn_to_genes = {} for rxn in model.reactions: if not rxn.gene_names or not rxn.id.startswith(('R', 'ACYLCOA', 'N00001')): continue rxn_to_genes[rxn.id] = [g.strip() for g in rxn.gene_names.split(';')] return rxn_to_genes def do_deletions(rxn_data, model, rxn_to_genes, do_double_ko=False, obj_fraction=0.0): fraction_epsilon = 0.0001 orig_f = float(model.optimize().f) s_rates, s_stats = single_reaction_deletion(model, list(rxn_to_genes.keys())) print('Original objective %.1f; %i reactions knocked out.' % (orig_f, len(s_stats))) print('Calculating model deficiencies for each knockout...') for r_id, new_f in s_rates.items(): if abs(new_f) < fraction_epsilon: new_f = 0.0 stat = s_stats[r_id] if new_f/orig_f <= obj_fraction+fraction_epsilon: if stat == 'optimal': deficiencies = find_model_deficiencies(model, orig_f, new_f, r_id) else: deficiencies = 'infeasible' rxn_data[r_id] = {'objective':round(new_f/orig_f*100, 1), 'deficiencies':deficiencies, 'genes':rxn_to_genes[r_id]} if do_double_ko: double_rxn_ids = [r for r in list(rxn_to_genes.keys()) if r not in rxn_data] print('Performing double knockouts on %i candidates...' % len(double_rxn_ids)) double_ko_data = double_reaction_deletion(model, double_rxn_ids[:5], number_of_processes=3) d_r1, d_r2, d_rates = double_ko_data['y'], double_ko_data['x'], double_ko_data['data'] def find_model_deficiencies(model, orig_f, new_f, r_id): deficiencies = [] ob = model.reactions.get_by_id(r_id).bounds model.reactions.get_by_id(r_id).bounds = (0,0) diffs = id_bottleneck_metabolites(model, new_f, 'BIOMASS', threshold=1.0) for recovered_f, mtb_id in diffs: def_str = '%s (%.1f)' % (mtb_id, recovered_f/orig_f*100) sub_defs = [] for sub_f, sub_mtb_id in id_bottleneck_metabolites(model, new_f, mtb_id.upper(), threshold=1.0): sub_defs.append('%s(%.1f)' % (sub_mtb_id, sub_f/orig_f*100)) if sub_defs: def_str += ' [%s]' % ', '.join(sub_defs) deficiencies.append(def_str) model.reactions.get_by_id(r_id).bounds = ob if not deficiencies: return 'unrecoverable' else: return ', '.join(deficiencies) def process_gene_data(rxn_data): gene_data = {} for r_id, data in rxn_data.items(): for gene in set(data['genes']): g_entry = generate_gene_entry(data, r_id, gene) gene_data.setdefault(gene, []).append(g_entry) for gene, entries in gene_data.items(): rs_per_g = len(entries) if rs_per_g > 1: for e in entries: e['num_reactions'] = rs_per_g return gene_data def generate_gene_entry(r_data, r_id, gene): g_data = {} if len(set(r_data['genes'])) == 1: g_data['other_genes'] = '' else: g_data['other_genes'] = ','.join(sorted(list(set(r_data['genes']) - set([gene])))) g_data['reaction'] = r_id g_data['objective'] = r_data['objective'] g_data['deficiencies'] = r_data['deficiencies'] g_data['num_reactions'] = 1 return g_data # # # Save/load functions def save_data_object(data_obj, file_path): with open(file_path, 'wb') as f: cPickle.dump(data_obj, f, protocol=0) print('Saved data to %s' % file_path) def load_data_object(file_path): with open(file_path, 'rb') as f: data_obj = cPickle.load(f) print('Loaded data from %s' % file_path) return data_obj def save_data_to_excel(gene_data, gene_data_out_file, expression_headings): min_column_width = 10 header_bg = '#DEEDED' sheet_name = 'Single knockouts' gene_header = 'Gene ID' headers_atts = [('# Reactions','num_reactions'), ('Reaction','reaction'), ('Associated genes','other_genes'), ('Objective %','objective'), ('Biomass deficiencies','deficiencies')] ortho_headers = ['Human homologs\n(#|% identity|% coverage)', 'O. volvulus homologs\n(#|% identity|% coverage)'] chembl_headers = ['# ChEMBL hits', 'ChEMBL hits\n(% identity|species)'] data = {h[0]:[] for h in headers_atts+expression_headings} for h in [gene_header] + ortho_headers + chembl_headers: data[h] = [] gene_order = sorted(list(gene_data.keys())) gene_order.sort(key=lambda g:gene_data[g][0]['deficiencies']) for gene in gene_order: for g_data in gene_data[gene]: data[gene_header].append(gene) for h, att in headers_atts: data[h].append(g_data.get(att, 'NOT FOUND')) human_hlogs = '%i | %.1f | %.1f' % (g_data['num_human_prots'], g_data['human_prot_identity'],g_data['human_prot_coverage']) if g_data['num_human_prots'] else ' ' data[ortho_headers[0]].append(human_hlogs) oncho_hlogs = '%i | %.1f | %.1f' % (g_data['num_oncho_prots'], g_data['oncho_prot_identity'],g_data['oncho_prot_coverage']) if g_data['num_oncho_prots'] else ' ' data[ortho_headers[1]].append(oncho_hlogs) data[chembl_headers[0]].append(g_data.get('num_chembl_hits', 0)) data[chembl_headers[1]].append(g_data.get('chembl_hits', '')) if '_max_observed_expression' in g_data['expression_levels']: max_expression = round(g_data['expression_levels']['_max_observed_expression'], 1) else: max_expression = " " data[expression_headings[0][0]].append(max_expression) for h, ls in expression_headings[1:]: exp_levels = [g_data['expression_levels'].get(l) for l in ls] data[h].append(' | '.join(exp_levels)) col_headers = [gene_header] + [h[0] for h in headers_atts] + [i for i in ortho_headers+chembl_headers] + [j[0] for j in expression_headings] writer = pandas.ExcelWriter(gene_data_out_file, engine='xlsxwriter') df = pandas.DataFrame(data)[col_headers] # The [] specifies the order of the columns. df.to_excel(writer, sheet_name=sheet_name, index=False, startrow=1, header=False) worksheet = writer.sheets[sheet_name] header_format = writer.book.add_format({'bold': True, 'text_wrap': True, 'align': 'center', 'valign': 'top', 'bg_color': header_bg, 'border': 1}) for i, h in enumerate(col_headers): col_w = max(len(line.strip()) for line in h.splitlines()) col_width = max(col_w+1, min_column_width) if i in (0, 2, 3, 5, 9): col_format = writer.book.add_format({'align': 'left'}) elif i == 10: col_format = writer.book.add_format({'align': 'center'}) else: col_format = writer.book.add_format({'align': 'center'}) worksheet.set_column(i, i, col_width, col_format) worksheet.write(0, i, h, header_format) # Header added manually. worksheet.freeze_panes(1, 0) # Freezes header row. writer.save() print('Data saved to %s' % gene_data_out_file) # # # Getting protein names and sequences def save_prot_names_list(gene_data): prot_list_file = 'utility/b_mal_4.5-wip_single_ko_prot_names.txt' prot_list = sorted(gene_data.keys()) with open(prot_list_file, 'w') as f: f.write('\n'.join(prot_list)) print('Saved protein list to %s' % prot_list_file) def get_prot_name_translations(gene_data, gen_pept_file): print('Parsing %s...' % gen_pept_file) prot_to_std, found_names = {}, set() with open(gen_pept_file, 'r') as f: prot_name, std_name = None, None for line in f: if prot_name == None and line.startswith('VERSION'): prot_name = line.strip().split()[1] elif prot_name and "/standard_name=" in line: std_name = line.partition('=')[2].strip()[1:-1] if std_name in gene_data: prot_to_std[prot_name] = std_name found_names.add(std_name) prot_name, std_name = None, None for gene in gene_data: if gene not in found_names: prot_to_std['%s.1' % gene] = gene return prot_to_std def save_prot_sequences(gene_data, prot_to_std, prot_sequences_file): prots_fasta_file = 'utility/b_malayi_and_wBm_prots.fasta' all_seqs = sequ.loadfasta(prots_fasta_file) prots, found_genes = [], set() for seq in all_seqs: gene = prot_to_std.get(seq.name) if not gene: continue if gene in found_genes: print('Error: multiple sequences were found matching "%s".' % seq.name) exit() prots.append(sequ.Sequence(name=gene, sequence=seq.seq)) found_genes.add(gene) if len(prots) != len(gene_data): print('Warning: only found sequences for %i of %i genes. Missing genes:' % (len(prots), len(gene_data))) for g in set(gene_data) - found_genes: print(g) exit() sequ.savefasta(prots, prot_sequences_file, spaces=False, numbers=False) print('Saved %i sequences to %s' % (len(prots), prot_sequences_file)) return prots # # # Parsing BLAST output def parse_blast_xml(gene_data, blast_xml_file, taxon_name, spc_str): """taxon_name is used to name the properties saved in gene_data.""" min_e_val = 1E-30 property_strs = ['num_%s_prots', '%s_prot_id', '%s_prot_identity', '%s_prot_coverage'] gi_split_regex = re.compile('\s?>gi\|\S+\|\S+\|\S+\|\s?') gene_spc_regex = re.compile('(.+) \[(.+)\]$') isoform_regex = re.compile('(.+) (isoform \S+)(.*)$') tree = ET.parse(blast_xml_file) root = tree.getroot() iterations = root.find('BlastOutput_iterations') for q_hit in iterations: gene = q_hit.find('Iteration_query-def').text if gene not in gene_data: continue prot_len = float(q_hit.find('Iteration_query-len').text) s_hits = q_hit.find('Iteration_hits') hit_names, top_hit_id, top_e_val, top_identity, top_coverage = get_good_hits(s_hits, min_e_val, spc_str.lower(), gi_split_regex, gene_spc_regex, isoform_regex) num_hits = len(hit_names) top_coverage = round(top_coverage/prot_len*100.0, 1) for g_data in gene_data[gene]: for p_str, val in zip(property_strs, [num_hits, top_hit_id, top_identity, top_coverage]): g_data[p_str % taxon_name] = val def get_good_hits(s_hits, min_e_val, spc_str, gi_split_regex, gene_spc_regex, isoform_regex): """Counts based on the 'Hit_def' field in the subject hits, which is the name. Attempts to remove isoforms and predicted proteins from the count. """ best_hit_id, best_e_val, best_ident, best_coverage = None, min_e_val + 1, 0, 0 hit_names = set() for s_hit in s_hits: hit_e_val, hit_ident, hit_coverage = min_e_val + 1, 0, 0 for hsp in s_hit.find('Hit_hsps'): e_val = float(hsp.find('Hsp_evalue').text) if e_val < hit_e_val: hit_e_val = e_val hit_ident = round(float(hsp.find('Hsp_identity').text)/float(hsp.find('Hsp_align-len').text)*100, 1) hit_coverage = int(hsp.find('Hsp_query-to').text) - int(hsp.find('Hsp_query-from').text) if hit_e_val < min_e_val: name = parse_name_from_hit(s_hit, spc_str, gi_split_regex, gene_spc_regex, isoform_regex) if not name: continue # A hit was found, but it did not match the spc_str hit_names.add(name) if hit_e_val < best_e_val: best_hit_id = s_hit.find('Hit_accession').text.strip() best_ident = hit_ident best_e_val, best_coverage = hit_e_val, hit_coverage if not hit_names: return hit_names, None, None, 0, 0 return hit_names, best_hit_id, best_e_val, best_ident, best_coverage def parse_name_from_hit(s_hit, spc_str, gi_split_regex, gene_spc_regex, isoform_regex): name = find_gene_from_species(s_hit, spc_str, gi_split_regex, gene_spc_regex) if not name: return False if 'isoform' in name: nm, iso, rem = isoform_regex.match(name).groups() name = nm + rem if name.lower().startswith('predicted: '): name = name[11:] return name def find_gene_from_species(s_hit, spc_str, gi_split_regex, gene_spc_regex): for hit in gi_split_regex.split( s_hit.find('Hit_def').text ): m = gene_spc_regex.match(hit) if not m: continue name, spc = m.groups() if spc_str in spc.lower(): return name return False # # # Getting expression data def get_expression_data(gene_data, expression_file, sheetnames, conditions): for sheetname in sheetnames: parse_expression_sheet(gene_data, expression_file, sheetname, conditions) null_exp = {c:'--' for c in conditions} for gene, entries in gene_data.items(): for e in entries: if 'expression_levels' not in e: e['expression_levels'] = null_exp def parse_expression_sheet(gene_data, filename, sheetname, conditions): seq_name_key = 'Sequence Name' replicate_inds = ['a', 'b', 'c'] frame = pandas.read_excel(filename, sheetname) if len(frame.columns) != len(set(frame.columns)): print('Error: at least one column header was not unique in sheet %s.' % sheetname) exit() cond_keys = [[cond+ind for ind in replicate_inds if cond+ind in frame.columns] for cond in conditions] for i in frame.index: row = frame.ix[i] seq_name = row[seq_name_key] if seq_name not in gene_data: continue avgs = [sum(row[k] for k in ck)/float(len(ck)) for ck in cond_keys] max_expression = max(avgs) exp = {c:'%i'%(round(a/max_expression*100.0, 0) if max_expression else 0) for a,c in zip(avgs, conditions)} exp['_max_observed_expression'] = max_expression for entry in gene_data[seq_name]: entry['expression_levels'] = exp # # # Parse ChEMBL search file def parse_chembl_results(gene_data, chembl_results_file): max_e_val = 1E-30 chembl_data = {} total_hits, sig_hits = 0, 0 with open(chembl_results_file) as f: f.readline() # Header for line in f: if not line.strip(): continue total_hits += 1 gene, chembl_id, tid, description, uniprot_id, target_type, species, _, _, identity, blast_score, e_value = line.split('\t') identity, e_value = float(identity), float(e_value) if e_value > max_e_val: continue sig_hits += 1 hit_data = {'chembl_id':chembl_id, 'species':species, 'identity':identity, 'e_value':e_value} chembl_data.setdefault(gene, []).append(hit_data) print('%i of the %i ChEMBL hits were below the E-value threshold of %.1e' % (sig_hits, total_hits, max_e_val)) for gene, data_list in chembl_data.items(): if gene not in gene_data: continue data_list.sort(key=lambda d: d['e_value']) chembl_hits = ', '.join('%s (%i | %s)' % (d['chembl_id'], round(d['identity'], 0), d['species']) for d in data_list) for g_data in gene_data[gene]: g_data['num_chembl_hits'] = len(data_list) g_data['chembl_hits'] = chembl_hits # # # Misc functions def print_deficiencies(rxn_data): r_list = sorted(list(rxn_data.keys())) r_list.sort(key=lambda r:rxn_data[r]['deficiencies']) print('%i reactions with significant impact:' % len(r_list)) for r_id in r_list: print('%s %.1f%% of objective value.' % (r_id, rxn_data[r_id]['objective'])) print('\t%s' % rxn_data[r_id]['deficiencies']) print('\t%s' % ', '.join(rxn_data[r_id]['genes'])) # # # Main paths files_dir = '/mnt/hgfs/win_projects/brugia_project' utility_dir = '/home/dave/Desktop/projects/brugia_project/utility' # # # Main run options model_file = 'model_b_mal_4.5-wip.xlsx' run_str = 'bm_4.5-lo_ox-lo_glu' wolbachia_ratio = 0.1 objective_threshold_fraction = 0.25 # Considered significant if resulting objective function is less than 0.25 (25%) of the original. do_double_ko = False expression_conditions = ['L3', 'L3D6', 'L3D9', 'L4', 'F30', 'M30', 'F42', 'M42', 'F120', 'M120'] expression_headings = [('Max\nexpression',), ('Larval expression\n(L3|L3D6|L3D9|L4)', ('L3','L3D6','L3D9','L4')), ('Adult female expression\n(F30|F42|F120)', ('F30','F42','F120')), ('Adult male expression\n(M30|M42|M120)', ('M30','M42','M120'))] gene_data_out_file = os.path.join(files_dir, '%s_gene_info.xlsx'%(run_str)) # # # Required files expression_file = os.path.join(files_dir, 'All_Stages_Brugia_Wolbachia_FPKMs.xlsx') expression_sheets = ('Brugia_FPKMs', 'Wolbachia_FPKMs') gen_pept_file = os.path.join(utility_dir, 'b_malayi_genpept.gp') human_blast_xml_file = os.path.join(utility_dir, '%s_human_blast.xml'%(run_str)) oncho_blast_xml_file = os.path.join(utility_dir, '%s_oncho_blast.xml'%(run_str)) chembl_results_file = os.path.join(utility_dir, '%s_chembl.txt'%(run_str)) # # # Intermediate files created prot_sequences_file = os.path.join(utility_dir, '%s_prots.fa'%(run_str)) rxn_ko_data_file = os.path.join(utility_dir, '%s_rxns.pkl'%(run_str)) gene_ko_data_file = os.path.join(utility_dir, '%s_genes.pkl'%(run_str)) # # # Run steps if not os.path.isfile(rxn_ko_data_file): rxn_data = {} model_path = os.path.join(files_dir, model_file) model = load_model(model_path, wolbachia_ratio) rxn_to_genes = get_rxns_to_delete(model) do_deletions(rxn_data, model, rxn_to_genes, do_double_ko, objective_threshold_fraction) # Fills out 'objective', 'deficiencies', and 'genes' of reactions in rxn_data. save_data_object(rxn_data, rxn_ko_data_file) else: rxn_data = load_data_object(rxn_ko_data_file) #print_deficiencies(rxn_data) if not os.path.isfile(gene_ko_data_file): gene_data = process_gene_data(rxn_data) get_expression_data(gene_data, expression_file, expression_sheets, expression_conditions) # Fills out 'expression_levels' if not os.path.isfile(prot_sequences_file): prot_to_std = get_prot_name_translations(gene_data, gen_pept_file) prots = save_prot_sequences(gene_data, prot_to_std, prot_sequences_file) else: prots = sequ.loadfasta(prot_sequences_file) for blast_file in [human_blast_xml_file, oncho_blast_xml_file]: if not os.path.isfile(blast_file): print('Error: no BLAST results found at %s' % blast_file) exit() parse_blast_xml(gene_data, human_blast_xml_file, 'human', 'homo sapiens') parse_blast_xml(gene_data, oncho_blast_xml_file, 'oncho', 'onchocerca volvulus') if not os.path.isfile(chembl_results_file): print('Error: no ChEMBL results found at %s' % chembl_results_file) exit() # parse_chembl_results(gene_data, chembl_results_file) # Where it should be called. save_data_object(gene_data, gene_ko_data_file) else: gene_data = load_data_object(gene_ko_data_file) parse_chembl_results(gene_data, chembl_results_file) # # # Temp place to be called from. save_data_to_excel(gene_data, gene_data_out_file, expression_headings)
dave-the-scientist/brugia_project
get_knockout_info.py
Python
gpl-3.0
20,007
[ "BLAST" ]
1cc5069db8cd3fe7cd82c712f0b1724c8025501d2ccaae6d2c989283dba7431a
# -*- coding: utf-8 -*- from __future__ import unicode_literals import time # !! This is the configuration of Nikola. !! # # !! You should edit it to your liking. !! # # ! Some settings can be different in different languages. # ! A comment stating (translatable) is used to denote those. # ! There are two ways to specify a translatable setting: # ! (a) BLOG_TITLE = "My Blog" # ! (b) BLOG_TITLE = {"en": "My Blog", "es": "Mi Blog"} # ! Option (a) is used when you don't want that setting translated. # ! Option (b) is used for settings that are different in different languages. # Data about this site BLOG_AUTHOR = "Isaac Lacoba Molina" # (translatable) BLOG_TITLE = "Tinman" # (translatable) # This is the main URL for your site. It will be used # in a prominent link SITE_URL = "http://isaaclacoba.github.io/tinman/" # This is the URL where Nikola's output will be deployed. # If not set, defaults to SITE_URL # BASE_URL = "https://github.com/isaaclacoba/tinman/" BLOG_EMAIL = "isaac.lacoba@gmail.com" BLOG_DESCRIPTION = "blog del proyecto Tinman" # (translatable) # Nikola is multilingual! # # Currently supported languages are: # # en English # bg Bulgarian # ca Catalan # cs Czech [ALTERNATIVELY cz] # de German # el Greek [NOT gr] # eo Esperanto # es Spanish # et Estonian # eu Basque # fa Persian # fi Finnish # fr French # hi Hindi # hr Croatian # it Italian # ja Japanese [NOT jp] # nb Norwegian Bokmål # nl Dutch # pl Polish # pt_br Portuguese (Brasil) # ru Russian # sk Slovak # sl Slovene # tr Turkish [NOT tr_TR] # ur Urdu # zh_cn Chinese (Simplified) # # If you want to use Nikola with a non-supported language you have to provide # a module containing the necessary translations # (cf. the modules at nikola/data/themes/base/messages/). # If a specific post is not translated to a language, then the version # in the default language will be shown instead. # What is the default language? DEFAULT_LANG = "es" # What other languages do you have? # The format is {"translationcode" : "path/to/translation" } # the path will be used as a prefix for the generated pages location TRANSLATIONS = { DEFAULT_LANG: "", # Example for another language: # "es": "./es", } # What will translated input files be named like? # If you have a page something.rst, then something.pl.rst will be considered # its Polish translation. # (in the above example: path == "something", ext == "rst", lang == "pl") # this pattern is also used for metadata: # something.meta -> something.pl.meta TRANSLATIONS_PATTERN = "{path}.{lang}.{ext}" # Links for the sidebar / navigation bar. (translatable) # This is a dict. The keys are languages, and values are tuples. # # For regular links: # ('http://getnikola.com/', 'Nikola Homepage') # # For submenus: # ( # ( # ('http://apple.com/', 'Apple'), # ('http://orange.com/', 'Orange'), # ), # 'Fruits' # ) # # WARNING: Support for submenus is theme-dependent. # Only one level of submenus is supported. # WARNING: Some themes, including the default Bootstrap 3 theme, # may present issues if the menu is too large. # (in bootstrap3, the navbar can grow too large and cover contents.) NAVIGATION_LINKS = { DEFAULT_LANG: ( ("/archive.html", "Archivo"), ("/categories/index.html", "Tags"), ("/rss.xml", "feed RSS"), ("https://bitbucket.org/arco_group/tfg.tinman", "Repository"), ("/doxygen/index.html", "Doxygen"), ), } # Name of the theme to use. THEME = "bootstrap3" # Below this point, everything is optional # Post's dates are considered in UTC by default, if you want to use # another time zone, please set TIMEZONE to match. Check the available # list from Wikipedia: # http://en.wikipedia.org/wiki/List_of_tz_database_time_zones # (eg. 'Europe/Zurich') # Also, if you want to use a different time zone in some of your posts, # you can use the ISO 8601/RFC 3339 format (ex. 2012-03-30T23:00:00+02:00) TIMEZONE = "Europe/Madrid" # If you want to use ISO 8601 (also valid RFC 3339) throughout Nikola # (especially in new_post), set this to True. # Note that this does not affect DATE_FORMAT. # FORCE_ISO8601 = False # Date format used to display post dates. # (str used by datetime.datetime.strftime) # DATE_FORMAT = '%Y-%m-%d %H:%M' # Date format used to display post dates, if local dates are used. # (str used by moment.js) # JS_DATE_FORMAT = 'YYYY-MM-DD HH:mm' # Date fanciness. # # 0 = using DATE_FORMAT and TIMEZONE # 1 = using JS_DATE_FORMAT and local user time (via moment.js) # 2 = using a string like “2 days ago” # # Your theme must support it, bootstrap and bootstrap3 already do. # DATE_FANCINESS = 0 # While Nikola can select a sensible locale for each language, # sometimes explicit control can come handy. # In this file we express locales in the string form that # python's locales will accept in your OS, by example # "en_US.utf8" in unix-like OS, "English_United States" in Windows. # LOCALES = dict mapping language --> explicit locale for the languages # in TRANSLATIONS. You can ommit one or more keys. # LOCALE_FALLBACK = locale to use when an explicit locale is unavailable # LOCALE_DEFAULT = locale to use for languages not mentioned in LOCALES; if # not set the default Nikola mapping is used. # POSTS and PAGES contains (wildcard, destination, template) tuples. # # The wildcard is used to generate a list of reSt source files # (whatever/thing.txt). # # That fragment could have an associated metadata file (whatever/thing.meta), # and optionally translated files (example for spanish, with code "es"): # whatever/thing.es.txt and whatever/thing.es.meta # # This assumes you use the default TRANSLATIONS_PATTERN. # # From those files, a set of HTML fragment files will be generated: # cache/whatever/thing.html (and maybe cache/whatever/thing.html.es) # # These files are combined with the template to produce rendered # pages, which will be placed at # output / TRANSLATIONS[lang] / destination / pagename.html # # where "pagename" is the "slug" specified in the metadata file. # # The difference between POSTS and PAGES is that POSTS are added # to feeds and are considered part of a blog, while PAGES are # just independent HTML pages. # POSTS = ( ("posts/*.rst", "posts", "post.tmpl"), ("posts/*.txt", "posts", "post.tmpl"), ) PAGES = ( ("stories/*.rst", "stories", "story.tmpl"), ("stories/*.txt", "stories", "story.tmpl"), ) # One or more folders containing files to be copied as-is into the output. # The format is a dictionary of "source" "relative destination". # Default is: # FILES_FOLDERS = {'posts/*.jpg': '' } # Which means copy 'files' into 'output' # A mapping of languages to file-extensions that represent that language. # Feel free to add or delete extensions to any list, but don't add any new # compilers unless you write the interface for it yourself. # # 'rest' is reStructuredText # 'markdown' is MarkDown # 'html' assumes the file is html and just copies it COMPILERS = { "rest": ('.rst', '.txt'), "markdown": ('.md', '.mdown', '.markdown'), "textile": ('.textile',), "txt2tags": ('.t2t',), "bbcode": ('.bb',), "wiki": ('.wiki',), "ipynb": ('.ipynb',), "html": ('.html', '.htm'), # PHP files are rendered the usual way (i.e. with the full templates). # The resulting files have .php extensions, making it possible to run # them without reconfiguring your server to recognize them. "php": ('.php',), # Pandoc detects the input from the source filename # but is disabled by default as it would conflict # with many of the others. # "pandoc": ('.rst', '.md', '.txt'), } # Create by default posts in one file format? # Set to False for two-file posts, with separate metadata. # ONE_FILE_POSTS = True # If this is set to True, the DEFAULT_LANG version will be displayed for # untranslated posts. # If this is set to False, then posts that are not translated to a language # LANG will not be visible at all in the pages in that language. # Formerly known as HIDE_UNTRANSLATED_POSTS (inverse) # SHOW_UNTRANSLATED_POSTS = True # Nikola supports logo display. If you have one, you can put the URL here. # Final output is <img src="LOGO_URL" id="logo" alt="BLOG_TITLE">. # The URL may be relative to the site root. # LOGO_URL = '' # If you want to hide the title of your website (for example, if your logo # already contains the text), set this to False. # SHOW_BLOG_TITLE = True # Paths for different autogenerated bits. These are combined with the # translation paths. # Final locations are: # output / TRANSLATION[lang] / TAG_PATH / index.html (list of tags) # output / TRANSLATION[lang] / TAG_PATH / tag.html (list of posts for a tag) # output / TRANSLATION[lang] / TAG_PATH / tag.xml (RSS feed for a tag) # TAG_PATH = "categories" # If TAG_PAGES_ARE_INDEXES is set to True, each tag's page will contain # the posts themselves. If set to False, it will be just a list of links. # TAG_PAGES_ARE_INDEXES = False # Final location for the main blog page and sibling paginated pages is # output / TRANSLATION[lang] / INDEX_PATH / index-*.html # INDEX_PATH = "" # Create per-month archives instead of per-year # CREATE_MONTHLY_ARCHIVE = False # Create one large archive instead of per-year # CREATE_SINGLE_ARCHIVE = False # Final locations for the archives are: # output / TRANSLATION[lang] / ARCHIVE_PATH / ARCHIVE_FILENAME # output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / index.html # output / TRANSLATION[lang] / ARCHIVE_PATH / YEAR / MONTH / index.html # ARCHIVE_PATH = "" # ARCHIVE_FILENAME = "archive.html" # URLs to other posts/pages can take 3 forms: # rel_path: a relative URL to the current page/post (default) # full_path: a URL with the full path from the root # absolute: a complete URL (that includes the SITE_URL) # URL_TYPE = 'rel_path' # Final location for the blog main RSS feed is: # output / TRANSLATION[lang] / RSS_PATH / rss.xml # RSS_PATH = "" # Number of posts in RSS feeds FEED_LENGTH = 10 # Slug the Tag URL easier for users to type, special characters are # often removed or replaced as well. # SLUG_TAG_PATH = True # A list of redirection tuples, [("foo/from.html", "/bar/to.html")]. # # A HTML file will be created in output/foo/from.html that redirects # to the "/bar/to.html" URL. notice that the "from" side MUST be a # relative URL. # # If you don't need any of these, just set to [] REDIRECTIONS = [] # Presets of commands to execute to deploy. Can be anything, for # example, you may use rsync: # "rsync -rav --delete output/ joe@my.site:/srv/www/site" # And then do a backup, or run `nikola ping` from the `ping` # plugin (`nikola plugin -i ping`). Or run `nikola check -l`. # You may also want to use github_deploy (see below). # You can define multiple presets and specify them as arguments # to `nikola deploy`. If no arguments are specified, a preset # named `default` will be executed. You canuse as many presets # in a `nikola deploy` command as you like. # DEPLOY_COMMANDS = { # 'default': [ # "rsync -rav --delete output/ joe@my.site:/srv/www/site", # ] # } # For user.github.io/organization.github.io pages, the DEPLOY branch # MUST be 'master', and 'gh-pages' for other repositories. # GITHUB_SOURCE_BRANCH = 'master' # GITHUB_DEPLOY_BRANCH = 'gh-pages' # The name of the remote where you wish to push to, using github_deploy. # GITHUB_REMOTE_NAME = 'origin' # If you really care about history in the GitHub Pages branch, you can # set this to True to `git pull` before changes are made. # GITHUB_PULL_BEFORE_COMMIT = False # Where the output site should be located # If you don't use an absolute path, it will be considered as relative # to the location of conf.py OUTPUT_FOLDER = '..' # where the "cache" of partial generated content should be located # default: 'cache' # CACHE_FOLDER = 'cache' # Filters to apply to the output. # A directory where the keys are either: a file extensions, or # a tuple of file extensions. # # And the value is a list of commands to be applied in order. # # Each command must be either: # # A string containing a '%s' which will # be replaced with a filename. The command *must* produce output # in place. # # Or: # # A python callable, which will be called with the filename as # argument. # # By default, only .php files uses filters to inject PHP into # Nikola’s templates. All other filters must be enabled through FILTERS. # # Many filters are shipped with Nikola. A list is available in the manual: # <http://getnikola.com/handbook.html#post-processing-filters> # # from nikola import filters # FILTERS = { # ".html": [filters.typogrify], # ".js": [filters.closure_compiler], # ".jpg": ["jpegoptim --strip-all -m75 -v %s"], # } # Expert setting! Create a gzipped copy of each generated file. Cheap server- # side optimization for very high traffic sites or low memory servers. # GZIP_FILES = False # File extensions that will be compressed # GZIP_EXTENSIONS = ('.txt', '.htm', '.html', '.css', '.js', '.json', '.xml') # Use an external gzip command? None means no. # Example: GZIP_COMMAND = "pigz -k {filename}" # GZIP_COMMAND = None # Make sure the server does not return a "Accept-Ranges: bytes" header for # files compressed by this option! OR make sure that a ranged request does not # return partial content of another representation for these resources. Do not # use this feature if you do not understand what this means. # Compiler to process LESS files. # LESS_COMPILER = 'lessc' # A list of options to pass to the LESS compiler. # Final command is: LESS_COMPILER LESS_OPTIONS file.less # LESS_OPTIONS = [] # Compiler to process Sass files. # SASS_COMPILER = 'sass' # A list of options to pass to the Sass compiler. # Final command is: SASS_COMPILER SASS_OPTIONS file.s(a|c)ss # SASS_OPTIONS = [] # ############################################################################# # Image Gallery Options # ############################################################################# # Galleries are folders in galleries/ # Final location of galleries will be output / GALLERY_PATH / gallery_name GALLERY_PATH = "galleries" THUMBNAIL_SIZE = 180 MAX_IMAGE_SIZE = 1280 USE_FILENAME_AS_TITLE = True EXTRA_IMAGE_EXTENSIONS = [] # # If set to False, it will sort by filename instead. Defaults to True # GALLERY_SORT_BY_DATE = True # ############################################################################# # HTML fragments and diverse things that are used by the templates # ############################################################################# # Data about post-per-page indexes. # INDEXES_PAGES defaults to 'old posts, page %d' or 'page %d' (translated), # depending on the value of INDEXES_PAGES_MAIN. # INDEXES_TITLE = "" # If this is empty, defaults to BLOG_TITLE # INDEXES_PAGES = "" # If this is empty, defaults to '[old posts,] page %d' (see above) # INDEXES_PAGES_MAIN = False # If True, INDEXES_PAGES is also displayed on # # the main (the newest) index page (index.html) # Color scheme to be used for code blocks. If your theme provides # "assets/css/code.css" this is ignored. # Can be any of autumn borland bw colorful default emacs friendly fruity manni # monokai murphy native pastie perldoc rrt tango trac vim vs # CODE_COLOR_SCHEME = 'default' # If you use 'site-reveal' theme you can select several subthemes # THEME_REVEAL_CONFIG_SUBTHEME = 'sky' # You can also use: beige/serif/simple/night/default # Again, if you use 'site-reveal' theme you can select several transitions # between the slides # THEME_REVEAL_CONFIG_TRANSITION = 'cube' # You can also use: page/concave/linear/none/default # FAVICONS contains (name, file, size) tuples. # Used for create favicon link like this: # <link rel="name" href="file" sizes="size"/> # FAVICONS = { # ("icon", "/favicon.ico", "16x16"), # ("icon", "/icon_128x128.png", "128x128"), # } # Show only teasers in the index pages? Defaults to False. INDEX_TEASERS = True # HTML fragments with the Read more... links. # The following tags exist and are replaced for you: # {link} A link to the full post page. # {read_more} The string “Read more” in the current language. # {reading_time} An estimate of how long it will take to read the post. # {remaining_reading_time} An estimate of how long it will take to read the post, sans the teaser. # {min_remaining_read} The string “{remaining_reading_time} min remaining to read” in the current language. # {paragraph_count} The amount of paragraphs in the post. # {remaining_paragraph_count} The amount of paragraphs in the post, sans the teaser. # {{ A literal { (U+007B LEFT CURLY BRACKET) # }} A literal } (U+007D RIGHT CURLY BRACKET) # 'Read more...' for the index page, if INDEX_TEASERS is True (translatable) INDEX_READ_MORE_LINK = '<p class="more"><a href="{link}">{read_more}…</a></p>' # 'Read more...' for the RSS_FEED, if RSS_TEASERS is True (translatable) RSS_READ_MORE_LINK = '<p><a href="{link}">{read_more}…</a> ({min_remaining_read})</p>' # A HTML fragment describing the license, for the sidebar. # (translatable) LICENSE = "" # I recommend using the Creative Commons' wizard: # http://creativecommons.org/choose/ # LICENSE = """ # <a rel="license" href="http://creativecommons.org/licenses/by-nc-sa/2.5/ar/"> # <img alt="Creative Commons License BY-NC-SA" # style="border-width:0; margin-bottom:12px;" # src="http://i.creativecommons.org/l/by-nc-sa/2.5/ar/88x31.png"></a>""" # A small copyright notice for the page footer (in HTML). # (translatable) CONTENT_FOOTER = 'Contents &copy; {date} <a href="mailto:{email}">{author}</a> - Powered by <a href="http://getnikola.com" rel="nofollow">Nikola</a> {license}' # Things that will be passed to CONTENT_FOOTER.format(). This is done # for translatability, as dicts are not formattable. Nikola will # intelligently format the setting properly. # The setting takes a dict. The keys are languages. The values are # tuples of tuples of positional arguments and dicts of keyword arguments # to format(). For example, {'en': (('Hello'), {'target': 'World'})} # results in CONTENT_FOOTER['en'].format('Hello', target='World'). # WARNING: If you do not use multiple languages with CONTENT_FOOTER, this # still needs to be a dict of this format. (it can be empty if you # do not need formatting) # (translatable) CONTENT_FOOTER_FORMATS = { DEFAULT_LANG: ( (), { "email": BLOG_EMAIL, "author": BLOG_AUTHOR, "date": time.gmtime().tm_year, "license": LICENSE } ) } # To use comments, you can choose between different third party comment # systems. The following comment systems are supported by Nikola: # disqus, facebook, googleplus, intensedebate, isso, livefyre, muut # You can leave this option blank to disable comments. COMMENT_SYSTEM = "" # And you also need to add your COMMENT_SYSTEM_ID which # depends on what comment system you use. The default is # "nikolademo" which is a test account for Disqus. More information # is in the manual. COMMENT_SYSTEM_ID = "" # Enable annotations using annotateit.org? # If set to False, you can still enable them for individual posts and pages # setting the "annotations" metadata. # If set to True, you can disable them for individual posts and pages using # the "noannotations" metadata. # ANNOTATIONS = False # Create index.html for page (story) folders? # WARNING: if a page would conflict with the index file (usually # caused by setting slug to `index`), the STORY_INDEX # will not be generated for that directory. # STORY_INDEX = False # Enable comments on story pages? # COMMENTS_IN_STORIES = False # Enable comments on picture gallery pages? # COMMENTS_IN_GALLERIES = False # What file should be used for directory indexes? # Defaults to index.html # Common other alternatives: default.html for IIS, index.php # INDEX_FILE = "index.html" # If a link ends in /index.html, drop the index.html part. # http://mysite/foo/bar/index.html => http://mysite/foo/bar/ # (Uses the INDEX_FILE setting, so if that is, say, default.html, # it will instead /foo/default.html => /foo) # (Note: This was briefly STRIP_INDEX_HTML in v 5.4.3 and 5.4.4) # Default = False # STRIP_INDEXES = False # Should the sitemap list directories which only include other directories # and no files. # Default to True # If this is False # e.g. /2012 includes only /01, /02, /03, /04, ...: don't add it to the sitemap # if /2012 includes any files (including index.html)... add it to the sitemap # SITEMAP_INCLUDE_FILELESS_DIRS = True # List of files relative to the server root (!) that will be asked to be excluded # from indexing and other robotic spidering. * is supported. Will only be effective # if SITE_URL points to server root. The list is used to exclude resources from # /robots.txt and /sitemap.xml, and to inform search engines about /sitemapindex.xml. # ROBOTS_EXCLUSIONS = ["/archive.html", "/category/*.html"] # Instead of putting files in <slug>.html, put them in # <slug>/index.html. Also enables STRIP_INDEXES # This can be disabled on a per-page/post basis by adding # .. pretty_url: False # to the metadata # PRETTY_URLS = False # If True, publish future dated posts right away instead of scheduling them. # Defaults to False. # FUTURE_IS_NOW = False # If True, future dated posts are allowed in deployed output # Only the individual posts are published/deployed; not in indexes/sitemap # Generally, you want FUTURE_IS_NOW and DEPLOY_FUTURE to be the same value. # DEPLOY_FUTURE = False # If False, draft posts will not be deployed # DEPLOY_DRAFTS = True # Allows scheduling of posts using the rule specified here (new_post -s) # Specify an iCal Recurrence Rule: http://www.kanzaki.com/docs/ical/rrule.html # SCHEDULE_RULE = '' # If True, use the scheduling rule to all posts by default # SCHEDULE_ALL = False # Do you want a add a Mathjax config file? # MATHJAX_CONFIG = "" # If you are using the compile-ipynb plugin, just add this one: # MATHJAX_CONFIG = """ # <script type="text/x-mathjax-config"> # MathJax.Hub.Config({ # tex2jax: { # inlineMath: [ ['$','$'], ["\\\(","\\\)"] ], # displayMath: [ ['$$','$$'], ["\\\[","\\\]"] ] # }, # displayAlign: 'left', // Change this to 'center' to center equations. # "HTML-CSS": { # styles: {'.MathJax_Display': {"margin": 0}} # } # }); # </script> # """ # Do you want to customize the nbconversion of your IPython notebook? # IPYNB_CONFIG = {} # With the following example configuration you can use a custom jinja template # called `toggle.tpl` which has to be located in your site/blog main folder: # IPYNB_CONFIG = {'Exporter':{'template_file': 'toggle'}} # What Markdown extensions to enable? # You will also get gist, nikola and podcast because those are # done in the code, hope you don't mind ;-) # Note: most Nikola-specific extensions are done via the Nikola plugin system, # with the MarkdownExtension class and should not be added here. # MARKDOWN_EXTENSIONS = ['fenced_code', 'codehilite'] # Social buttons. This is sample code for AddThis (which was the default for a # long time). Insert anything you want here, or even make it empty. # (translatable) # SOCIAL_BUTTONS_CODE = """ # <!-- Social buttons --> # <div id="addthisbox" class="addthis_toolbox addthis_peekaboo_style addthis_default_style addthis_label_style addthis_32x32_style"> # <a class="addthis_button_more">Share</a> # <ul><li><a class="addthis_button_facebook"></a> # <li><a class="addthis_button_google_plusone_share"></a> # <li><a class="addthis_button_linkedin"></a> # <li><a class="addthis_button_twitter"></a> # </ul> # </div> # <script src="//s7.addthis.com/js/300/addthis_widget.js#pubid=ra-4f7088a56bb93798"></script> # <!-- End of social buttons --> # """ # Show link to source for the posts? # Formerly known as HIDE_SOURCELINK (inverse) # SHOW_SOURCELINK = True # Copy the source files for your pages? # Setting it to False implies SHOW_SOURCELINK = False # COPY_SOURCES = True # Modify the number of Post per Index Page # Defaults to 10 INDEX_DISPLAY_POST_COUNT = 10 # By default, Nikola generates RSS files for the website and for tags, and # links to it. Set this to False to disable everything RSS-related. # GENERATE_RSS = True # RSS_LINK is a HTML fragment to link the RSS or Atom feeds. If set to None, # the base.tmpl will use the feed Nikola generates. However, you may want to # change it for a feedburner feed or something else. # RSS_LINK = None # Show only teasers in the RSS feed? Default to True RSS_TEASERS = True # Strip HTML in the RSS feed? Default to False # RSS_PLAIN = False # A search form to search this site, for the sidebar. You can use a Google # custom search (http://www.google.com/cse/) # Or a DuckDuckGo search: https://duckduckgo.com/search_box.html # Default is no search form. # (translatable) # SEARCH_FORM = "" # # This search form works for any site and looks good in the "site" theme where # it appears on the navigation bar: # # SEARCH_FORM = """ # <!-- Custom search --> # <form method="get" id="search" action="//duckduckgo.com/" # class="navbar-form pull-left"> # <input type="hidden" name="sites" value="%s"/> # <input type="hidden" name="k8" value="#444444"/> # <input type="hidden" name="k9" value="#D51920"/> # <input type="hidden" name="kt" value="h"/> # <input type="text" name="q" maxlength="255" # placeholder="Search&hellip;" class="span2" style="margin-top: 4px;"/> # <input type="submit" value="DuckDuckGo Search" style="visibility: hidden;" /> # </form> # <!-- End of custom search --> # """ % SITE_URL # # If you prefer a Google search form, here's an example that should just work: # SEARCH_FORM = """ # <!-- Custom search with Google--> # <form id="search" action="//www.google.com/search" method="get" class="navbar-form pull-left"> # <input type="hidden" name="q" value="site:%s" /> # <input type="text" name="q" maxlength="255" results="0" placeholder="Search"/> # </form> # <!-- End of custom search --> #""" % SITE_URL # Use content distribution networks for jquery, twitter-bootstrap css and js, # and html5shiv (for older versions of Internet Explorer) # If this is True, jquery and html5shiv is served from the Google and twitter- # bootstrap is served from the NetDNA CDN # Set this to False if you want to host your site without requiring access to # external resources. # USE_CDN = False # Check for USE_CDN compatibility. # If you are using custom themes, have configured the CSS properly and are # receiving warnings about incompatibility but believe they are incorrect, you # can set this to False. # USE_CDN_WARNING = True # Extra things you want in the pages HEAD tag. This will be added right # before </head> # (translatable) # EXTRA_HEAD_DATA = "" # Google Analytics or whatever else you use. Added to the bottom of <body> # in the default template (base.tmpl). # (translatable) BODY_END = "<script>(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)})(window,document,'script','//www.google-analytics.com/analytics.js','ga');ga('create','UA-57580832-1', 'auto');ga('send', 'pageview');</script>" # The possibility to extract metadata from the filename by using a # regular expression. # To make it work you need to name parts of your regular expression. # The following names will be used to extract metadata: # - title # - slug # - date # - tags # - link # - description # # An example re is the following: # '(?P<date>\d{4}-\d{2}-\d{2})-(?P<slug>.*)-(?P<title>.*)\.md' # FILE_METADATA_REGEXP = None # If you hate "Filenames with Capital Letters and Spaces.md", you should # set this to true. UNSLUGIFY_TITLES = True # Additional metadata that is added to a post when creating a new_post # ADDITIONAL_METADATA = {} # Nikola supports Open Graph Protocol data for enhancing link sharing and # discoverability of your site on Facebook, Google+, and other services. # Open Graph is enabled by default. # USE_OPEN_GRAPH = True # Nikola supports Twitter Card summaries # Twitter cards are disabled by default. They make it possible for you to # attach media to Tweets that link to your content. # # IMPORTANT: # Please note, that you need to opt-in for using Twitter Cards! # To do this please visit # https://dev.twitter.com/form/participate-twitter-cards # # Uncomment and modify to following lines to match your accounts. # Specifying the id for either 'site' or 'creator' will be preferred # over the cleartext username. Specifying an ID is not necessary. # Displaying images is currently not supported. # TWITTER_CARD = { # # 'use_twitter_cards': True, # enable Twitter Cards # # 'site': '@website', # twitter nick for the website # # 'site:id': 123456, # Same as site, but the website's Twitter user ID # # instead. # # 'creator': '@username', # Username for the content creator / author. # # 'creator:id': 654321, # Same as creator, but the Twitter user's ID. # } # If webassets is installed, bundle JS and CSS to make site loading faster # USE_BUNDLES = True # Plugins you don't want to use. Be careful :-) # DISABLED_PLUGINS = ["render_galleries"] # Add the absolute paths to directories containing plugins to use them. # For example, the `plugins` directory of your clone of the Nikola plugins # repository. # EXTRA_PLUGINS_DIRS = [] # List of regular expressions, links matching them will always be considered # valid by "nikola check -l" # LINK_CHECK_WHITELIST = [] # If set to True, enable optional hyphenation in your posts (requires pyphen) # HYPHENATE = False # The <hN> tags in HTML generated by certain compilers (reST/Markdown) # will be demoted by that much (1 → h1 will become h2 and so on) # This was a hidden feature of the Markdown and reST compilers in the # past. Useful especially if your post titles are in <h1> tags too, for # example. # (defaults to 1.) # DEMOTE_HEADERS = 1 # If you don’t like slugified file names ([a-z0-9] and a literal dash), # and would prefer to use all the characters your file system allows. # USE WITH CARE! This is also not guaranteed to be perfect, and may # sometimes crash Nikola, your web server, or eat your cat. # USE_SLUGIFY = True # You can configure the logging handlers installed as plugins or change the # log level of the default stderr handler. # WARNING: The stderr handler allows only the loglevels of 'INFO' and 'DEBUG'. # This is done for safety reasons, as blocking out anything other # than 'DEBUG' may hide important information and break the user # experience! LOGGING_HANDLERS = { 'stderr': {'loglevel': 'INFO', 'bubble': True}, # 'smtp': { # 'from_addr': 'test-errors@example.com', # 'recipients': ('test@example.com'), # 'credentials':('testusername', 'password'), # 'server_addr': ('127.0.0.1', 25), # 'secure': (), # 'level': 'DEBUG', # 'bubble': True # } } # Templates will use those filters, along with the defaults. # Consult your engine's documentation on filters if you need help defining # those. # TEMPLATE_FILTERS = {} # Put in global_context things you want available on all your templates. # It can be anything, data, functions, modules, etc. GLOBAL_CONTEXT = {} # Add functions here and they will be called with template # GLOBAL_CONTEXT as parameter when the template is about to be # rendered GLOBAL_CONTEXT_FILLER = []
isaaclacoba/tinman
src/conf.py
Python
gpl-3.0
32,031
[ "VisIt" ]
702a16c377de75ba1580e29e74519d846b6203a2d61b87af66b434ecbe0ee200
from __future__ import division from pymer4.models import Lmer, Lm, Lm2 from pymer4.utils import get_resource_path import pandas as pd import numpy as np from scipy.special import logit from scipy.stats import ttest_ind import os import pytest import seaborn as sns from rpy2.rinterface_lib.embedded import RRuntimeError # import re np.random.seed(10) os.environ[ "KMP_DUPLICATE_LIB_OK" ] = "True" # Recent versions of rpy2 sometimes cause the python kernel to die when running R code; this handles that def test_gaussian_lm2(): df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) model = Lm2("DV ~ IV3 + IV2", group="Group", data=df) model.fit(summarize=False) assert model.coefs.shape == (3, 8) estimates = np.array([16.11554138, -1.38425772, 0.59547697]) assert np.allclose(model.coefs["Estimate"], estimates, atol=0.001) assert model.fixef.shape == (47, 3) # Test bootstrapping and permutation tests model.fit(permute=500, conf_int="boot", n_boot=500, summarize=False) assert model.ci_type == "boot (500)" assert model.sig_type == "permutation (500)" def test_gaussian_lm(): df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) model = Lm("DV ~ IV1 + IV3", data=df) model.fit(summarize=False) assert model.coefs.shape == (3, 8) estimates = np.array([42.24840439, 0.24114414, -3.34057784]) assert np.allclose(model.coefs["Estimate"], estimates, atol=0.001) # Test robust SE against statsmodels standard_se = np.array([6.83783939, 0.30393886, 3.70656475]) assert np.allclose(model.coefs["SE"], standard_se, atol=0.001) hc0_se = np.array([7.16661817, 0.31713064, 3.81918182]) model.fit(robust="hc0", summarize=False) assert np.allclose(model.coefs["SE"], hc0_se, atol=0.001) hc1_se = np.array([7.1857547, 0.31797745, 3.82937992]) # hc1 is the default model.fit(robust=True, summarize=False) assert np.allclose(model.coefs["SE"], hc1_se, atol=0.001) hc2_se = np.array([7.185755, 0.317977, 3.829380]) model.fit(robust="hc1", summarize=False) assert np.allclose(model.coefs["SE"], hc2_se, atol=0.001) hc3_se = np.array([7.22466699, 0.31971942, 3.84863701]) model.fit(robust="hc3", summarize=False) assert np.allclose(model.coefs["SE"], hc3_se, atol=0.001) hac_lag1_se = np.array([8.20858448, 0.39184764, 3.60205873]) model.fit(robust="hac", summarize=False) assert np.allclose(model.coefs["SE"], hac_lag1_se, atol=0.001) # Test bootstrapping model.fit(summarize=False, conf_int="boot") assert model.ci_type == "boot (500)" # Test permutation model.fit(summarize=False, permute=500) assert model.sig_type == "permutation (500)" # Test WLS df_two_groups = df.query("IV3 in [0.5, 1.0]").reset_index(drop=True) x = df_two_groups.query("IV3 == 0.5").DV.values y = df_two_groups.query("IV3 == 1.0").DV.values # Fit new a model using a categorical predictor with unequal variances (WLS) model = Lm("DV ~ IV3", data=df_two_groups) model.fit(summarize=False, weights="IV3") assert model.estimator == "WLS" # Make sure welch's t-test lines up with scipy wls = np.abs(model.coefs.loc["IV3", ["T-stat", "P-val"]].values) scit = np.abs(ttest_ind(x, y, equal_var=False)) assert all([np.allclose(a, b) for a, b in zip(wls, scit)]) def test_gaussian_lmm(): df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) model = Lmer("DV ~ IV3 + IV2 + (IV2|Group) + (1|IV3)", data=df) opt_opts = "optimizer='Nelder_Mead', optCtrl = list(FtolAbs=1e-8, XtolRel=1e-8)" model.fit(summarize=False, control=opt_opts) assert model.coefs.shape == (3, 8) estimates = np.array([12.04334602, -1.52947016, 0.67768509]) assert np.allclose(model.coefs["Estimate"], estimates, atol=0.001) assert isinstance(model.fixef, list) assert (model.fixef[0].index.astype(int) == df.Group.unique()).all() assert (model.fixef[1].index.astype(float) == df.IV3.unique()).all() assert model.fixef[0].shape == (47, 3) assert model.fixef[1].shape == (3, 3) assert isinstance(model.ranef, list) assert model.ranef[0].shape == (47, 2) assert model.ranef[1].shape == (3, 1) assert (model.ranef[1].index == ["0.5", "1", "1.5"]).all() assert model.ranef_corr.shape == (1, 3) assert model.ranef_var.shape == (4, 3) assert np.allclose(model.coefs.loc[:, "Estimate"], model.fixef[0].mean(), atol=0.01) # Test predict # Little hairy to we test a few different cases. If a dataframe with non-matching # column names is passed in, but we only used fixed-effects to make predictions, # then R will not complain and will return population level predictions given the # model's original data. This is undesirable behavior, so pymer tries to naively # check column names in Python first and checks the predictions against the # originally fitted values second. This is works fine except when there are # categorical predictors which get expanded out to a design matrix internally in R. # Unfortunately we can't easily pre-expand this to check against the column names of # the model matrix. # Test circular prediction which should raise error with pytest.raises(ValueError): assert np.allclose(model.predict(model.data), model.data.fits) # Same thing, but skip the prediction verification; no error assert np.allclose( model.predict(model.data, verify_predictions=False), model.data.fits ) # Test on data that has no matching columns; X = pd.DataFrame(np.random.randn(model.data.shape[0], model.data.shape[1] - 1)) # Should raise error no matching columns, caught by checks in Python with pytest.raises(ValueError): model.predict(X) # If user skips Python checks, then pymer raises an error if the predictions match # the population predictions from the model's original data (which is what predict() # in R will do by default). with pytest.raises(ValueError): model.predict(X, skip_data_checks=True, use_rfx=False) # If the user skips check, but tries to predict with rfx then R will complain so we # can check for an exception raised from R rather than pymer with pytest.raises(RRuntimeError): model.predict(X, skip_data_checks=True, use_rfx=True) # Finally a user can turn off every kind of check in which case we expect circular predictions pop_preds = model.predict(model.data, use_rfx=False, verify_predictions=False) assert np.allclose( pop_preds, model.predict( X, use_rfx=False, skip_data_checks=True, verify_predictions=False, ), ) # Test prediction with categorical variables df["DV_ll"] = df.DV_l.apply(lambda x: "yes" if x == 1 else "no") m = Lmer("DV ~ IV3 + DV_ll + (IV2|Group) + (1|IV3)", data=df) m.fit(summarize=False) # Should fail because column name checks don't understand expanding levels of # categorical variable into new design matrix columns, as the checks are in Python # but R handles the design matrix conversion with pytest.raises(ValueError): m.predict(m.data, verify_predictions=False) # Should fail because of circular predictions with pytest.raises(ValueError): m.predict(m.data, skip_data_checks=True) # Test simulate out = model.simulate(2) assert isinstance(out, pd.DataFrame) assert out.shape == (model.data.shape[0], 2) out = model.simulate(2, use_rfx=True) assert isinstance(out, pd.DataFrame) assert out.shape == (model.data.shape[0], 2) # Smoketest for old_optimizer model.fit(summarize=False, old_optimizer=True) # test fixef code for 1 fixed effect model = Lmer("DV ~ IV3 + IV2 + (IV2|Group)", data=df) model.fit(summarize=False, control=opt_opts) assert (model.fixef.index.astype(int) == df.Group.unique()).all() assert model.fixef.shape == (47, 3) assert np.allclose(model.coefs.loc[:, "Estimate"], model.fixef.mean(), atol=0.01) # test fixef code for 0 fixed effects model = Lmer("DV ~ (IV2|Group) + (1|IV3)", data=df) model.fit(summarize=False, control=opt_opts) assert isinstance(model.fixef, list) assert (model.fixef[0].index.astype(int) == df.Group.unique()).all() assert (model.fixef[1].index.astype(float) == df.IV3.unique()).all() assert model.fixef[0].shape == (47, 2) assert model.fixef[1].shape == (3, 2) def test_contrasts(): df = sns.load_dataset("gammas").rename(columns={"BOLD signal": "bold"}) grouped_means = df.groupby("ROI")["bold"].mean() model = Lmer("bold ~ ROI + (1|subject)", data=df) custom_contrast = grouped_means["AG"] - np.mean( [grouped_means["IPS"], grouped_means["V1"]] ) grand_mean = grouped_means.mean() con1 = grouped_means["V1"] - grouped_means["IPS"] con2 = grouped_means["AG"] - grouped_means["IPS"] intercept = grouped_means["IPS"] # Treatment contrasts with non-alphabetic order model.fit(factors={"ROI": ["IPS", "V1", "AG"]}, summarize=False) assert np.allclose(model.coefs.loc["(Intercept)", "Estimate"], intercept) assert np.allclose(model.coefs.iloc[1, 0], con1) assert np.allclose(model.coefs.iloc[2, 0], con2) # Polynomial contrasts model.fit(factors={"ROI": ["IPS", "V1", "AG"]}, ordered=True, summarize=False) assert np.allclose(model.coefs.loc["(Intercept)", "Estimate"], grand_mean) assert np.allclose(model.coefs.iloc[1, 0], 0.870744) # From R assert np.allclose(model.coefs.iloc[2, 0], 0.609262) # From R # Custom contrasts model.fit(factors={"ROI": {"AG": 1, "IPS": -0.5, "V1": -0.5}}, summarize=False) assert np.allclose(model.coefs.loc["(Intercept)", "Estimate"], grand_mean) assert np.allclose(model.coefs.iloc[1, 0], custom_contrast) def test_post_hoc(): np.random.seed(1) df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) model = Lmer("DV ~ IV1*IV3*DV_l + (IV1|Group)", data=df, family="gaussian") model.fit( factors={"IV3": ["0.5", "1.0", "1.5"], "DV_l": ["0", "1"]}, summarize=False ) marginal, contrasts = model.post_hoc(marginal_vars="IV3", p_adjust="dunnet") assert marginal.shape[0] == 3 assert contrasts.shape[0] == 3 marginal, contrasts = model.post_hoc(marginal_vars=["IV3", "DV_l"]) assert marginal.shape[0] == 6 assert contrasts.shape[0] == 15 def test_logistic_lmm(): df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) model = Lmer("DV_l ~ IV1+ (IV1|Group)", data=df, family="binomial") model.fit(summarize=False) assert model.coefs.shape == (2, 13) estimates = np.array([-0.16098421, 0.00296261]) assert np.allclose(model.coefs["Estimate"], estimates, atol=0.001) assert isinstance(model.fixef, pd.core.frame.DataFrame) assert model.fixef.shape == (47, 2) assert isinstance(model.ranef, pd.core.frame.DataFrame) assert model.ranef.shape == (47, 2) assert np.allclose(model.coefs.loc[:, "Estimate"], model.fixef.mean(), atol=0.01) # Test prediction assert np.allclose( model.predict(model.data, use_rfx=True, verify_predictions=False), model.data.fits, ) assert np.allclose( model.predict(model.data, use_rfx=True, pred_type="link"), logit(model.data.fits), ) # Test RFX only model = Lmer("DV_l ~ 0 + (IV1|Group)", data=df, family="binomial") model.fit(summarize=False) assert model.fixef.shape == (47, 2) model = Lmer("DV_l ~ 0 + (IV1|Group) + (1|IV3)", data=df, family="binomial") model.fit(summarize=False) assert isinstance(model.fixef, list) assert model.fixef[0].shape == (47, 2) assert model.fixef[1].shape == (3, 2) def test_anova(): np.random.seed(1) data = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) data["DV_l2"] = np.random.randint(0, 4, data.shape[0]) model = Lmer("DV ~ IV3*DV_l2 + (IV3|Group)", data=data) model.fit(summarize=False) out = model.anova() assert out.shape == (3, 7) out = model.anova(force_orthogonal=True) assert out.shape == (3, 7) def test_poisson_lmm(): np.random.seed(1) df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) df["DV_int"] = np.random.randint(1, 10, df.shape[0]) m = Lmer("DV_int ~ IV3 + (1|Group)", data=df, family="poisson") m.fit(summarize=False) assert m.family == "poisson" assert m.coefs.shape == (2, 7) assert "Z-stat" in m.coefs.columns # Test RFX only model = Lmer("DV_int ~ 0 + (IV1|Group)", data=df, family="poisson") model.fit(summarize=False) assert model.fixef.shape == (47, 2) model = Lmer("DV_int ~ 0 + (IV1|Group) + (1|IV3)", data=df, family="poisson") model.fit(summarize=False) assert isinstance(model.fixef, list) assert model.fixef[0].shape == (47, 2) assert model.fixef[1].shape == (3, 2) def test_gamma_lmm(): np.random.seed(1) df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) df["DV_g"] = np.random.uniform(1, 2, size=df.shape[0]) m = Lmer("DV_g ~ IV3 + (1|Group)", data=df, family="gamma") m.fit(summarize=False) assert m.family == "gamma" assert m.coefs.shape == (2, 7) # Test RFX only; these work but the optimizer in R typically crashes if the model is especially bad fit so commenting out until a better dataset is acquired # model = Lmer("DV_g ~ 0 + (IV1|Group)", data=df, family="gamma") # model.fit(summarize=False) # assert model.fixef.shape == (47, 2) # model = Lmer("DV_g ~ 0 + (IV1|Group) + (1|IV3)", data=df, family="gamma") # model.fit(summarize=False) # assert isinstance(model.fixef, list) # assert model.fixef[0].shape == (47, 2) # assert model.fixef[1].shape == (3, 2) def test_inverse_gaussian_lmm(): np.random.seed(1) df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) df["DV_g"] = np.random.uniform(1, 2, size=df.shape[0]) m = Lmer("DV_g ~ IV3 + (1|Group)", data=df, family="inverse_gaussian") m.fit(summarize=False) assert m.family == "inverse_gaussian" assert m.coefs.shape == (2, 7) # Test RFX only; these work but the optimizer in R typically crashes if the model is especially bad fit so commenting out until a better dataset is acquired # model = Lmer("DV_g ~ 0 + (IV1|Group)", data=df, family="inverse_gaussian") # model.fit(summarize=False) # assert model.fixef.shape == (47, 2) # model = Lmer("DV_g ~ 0 + (IV1|Group) + (1|IV3)", data=df, family="inverse_gaussian") # model.fit(summarize=False) # assert isinstance(model.fixef, list) # assert model.fixef[0].shape == (47, 2) # assert model.fixef[1].shape == (3, 2) def test_lmer_opt_passing(): df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) model = Lmer("DV ~ IV2 + (IV2|Group)", data=df) opt_opts = "optCtrl = list(ftol_abs=1e-8, xtol_abs=1e-8)" model.fit(summarize=False, control=opt_opts) estimates = np.array([10.301072, 0.682124]) assert np.allclose(model.coefs["Estimate"], estimates, atol=0.001) # On some hardware the optimizer will still fail to converge # assert len(model.warnings) == 0 df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) model = Lmer("DV ~ IV2 + (IV2|Group)", data=df) opt_opts = "optCtrl = list(ftol_abs=1e-4, xtol_abs=1e-4)" model.fit(summarize=False, control=opt_opts) assert len(model.warnings) >= 1 def test_glmer_opt_passing(): np.random.seed(1) df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv")) df["DV_int"] = np.random.randint(1, 10, df.shape[0]) m = Lmer("DV_int ~ IV3 + (1|Group)", data=df, family="poisson") m.fit( summarize=False, control="optCtrl = list(FtolAbs=1e-1, FtolRel=1e-1, maxfun=10)" ) assert len(m.warnings) >= 1 # all or prune to suit # tests_ = [eval(v) for v in locals() if re.match(r"^test_", str(v))] tests_ = [ test_gaussian_lm2, test_gaussian_lm, test_gaussian_lmm, test_post_hoc, test_logistic_lmm, test_anova, test_poisson_lmm, test_gamma_lmm, test_inverse_gaussian_lmm, test_lmer_opt_passing, test_glmer_opt_passing, ] @pytest.mark.parametrize("model", tests_) def test_Pool(model): from multiprocessing import get_context # squeeze model functions through Pool pickling print("Pool", model.__name__) with get_context("spawn").Pool(1) as pool: _ = pool.apply(model, [])
ejolly/pymer4
pymer4/tests/test_models.py
Python
mit
16,723
[ "Gaussian" ]
9124eca1729c97ab49400ec841ff29e35105b0df0856ab1bca65810d50019cb9
# # Gramps - a GTK+/GNOME based genealogy program # # Copyright (C) 2000-2006 Donald N. Allingham # Copyright (C) 2008 Brian G. Matherly # Copyright (C) 2010 Jakim Friant # Copyright (C) 2012 Hans Ulrich Frink # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Version 3.6 # $Id: SourcesCitationsReport.py 2012-12-30 Frink hansulrich.frink@gmail.com $ """ Reports/Text Report. Developed for gramps 5.0.0.1 under win 10 64bit This is my first contribution to gramps, as well as my first python module, so the programming style may in some way be unusual. Thanks to Enno Borgsteede and Tim Lyons as well as other members of gramps dev for help PLEASE FEEL FREE TO CORRECT AND TEST. This report lists all the citations and their notes in the database. so it is possible to have all the copies made from e.g. parish books together grouped by source and ordered by citation.page. I needed such a report after I changed recording notes and media with the citations and no longer with the sources. works well in pdf, text and odf Format. The latter contains TOC which are also accepted by ms office 2012 Changelog: Version 2.5: - sources are sorted by source.author+title+publ+abbrev - no German non translatables - added Filter cf. PlaceReport.py - changed citasource from gramps_id to citation rsp. source Version 3.3: - constructing dic directly - or function - sorting direct - Stylesheet in Options Version 3.4: - added .lower to sortfunctions to sources and to citation Version 3.5: - get translation work - include Persons names and gramps_id cited in the notes. Version 3.6: - improved translation - Date formats. next steps: - have an index on Persons - have footer """ #------------------------------------------------------------------------ # # standard python modules # #------------------------------------------------------------------------ import time from collections import defaultdict #------------------------------------------------------------------------ # # Gramps modules # #------------------------------------------------------------------------ from gramps.gen.plug.menu import StringOption, FilterOption, BooleanOption from gramps.gen.plug.report import Report from gramps.gen.plug.report import MenuReportOptions from gramps.gen.plug.docgen import (IndexMark, FontStyle, ParagraphStyle, TableStyle, TableCellStyle, FONT_SERIF, FONT_SANS_SERIF, INDEX_TYPE_TOC, PARA_ALIGN_CENTER) from gramps.gen.plug.report import stdoptions from gramps.gen.const import GRAMPS_LOCALE as glocale try: _trans = glocale.get_addon_translator(__file__) except ValueError: _trans = glocale.translation _ = _trans.sgettext #------------------------------------------------------------------------ # # SourcesCitationsReport # #------------------------------------------------------------------------ class SourcesCitationsReport(Report): """ This report produces a summary of the objects in the database. """ def __init__(self, database, options, user): """ Create the SourceReport object that produces the report. The arguments are: database - the GRAMPS database instance options - instance of the Options class for this report user - a gen.user.User() instance This report needs the following parameters (class variables) that come in the options class. Sources - List of places to report on. """ Report.__init__(self, database, options, user) self.__db = database menu = options.menu self.title_string = menu.get_option_by_name('title').get_value() self.subtitle_string = menu.get_option_by_name('subtitle').get_value() self.footer_string = menu.get_option_by_name('footer').get_value() self.set_locale(menu.get_option_by_name('trans').get_value()) stdoptions.run_date_format_option(self, menu) self.showperson = menu.get_option_by_name('showperson').get_value() filter_option = menu.get_option_by_name('filter') self.filter = filter_option.get_filter() # self.sort = Sort.Sort(self.database) if self.filter.get_name() != '': # Use the selected filter to provide a list of source handles sourcefilterlist = self.__db.iter_source_handles() self.source_handles = self.filter.apply(self.__db, sourcefilterlist) else: self.source_handles = self.__db.get_source_handles() def write_report(self): """ Overridden function to generate the report. """ self.doc.start_paragraph("SRC-ReportTitle") title = self.title_string mark = IndexMark(title, INDEX_TYPE_TOC, 1) self.doc.write_text(title, mark) self.doc.end_paragraph() self.doc.start_paragraph("SRC-ReportTitle") title = self.subtitle_string mark = IndexMark(title, INDEX_TYPE_TOC, 2) self.doc.write_text(title, mark) self.doc.end_paragraph() self.listeventref() def _formatlocal_source_text(self, source): if not source: return src_txt = "" if source.get_author(): src_txt += source.get_author() if source.get_title(): if src_txt: src_txt += ", " src_txt += '"%s"' % source.get_title() if source.get_publication_info(): if src_txt: src_txt += ", " src_txt += source.get_publication_info() if source.get_abbreviation(): if src_txt: src_txt += ", " src_txt += "(%s)" % source.get_abbreviation() return src_txt def listeventref(self): sc = {'source': 'S_ID', 'citalist': 'C_ID' } stc = {} citation_without_notes = 0 EMPTY = " " def toYear(date): yeartext = date.get_year() return yeartext # build citasource dictionary and cl list sc = defaultdict(list) cl = [] i=1 for ci in self.__db.iter_citations(): if ci.source_handle in self.source_handles: sc[ci.source_handle].append(ci.handle) cl.append(ci.handle) # build citations - event dic xy #(a,b): set([('Citation', 'c4a8c46041e08799b17')]) # event: c4a8c4f95b01e38564a event: Taufe ci = defaultdict(list) for ev in self.__db.iter_events(): refhandlelist = ev.get_referenced_handles() for (a,b) in refhandlelist: if a == 'Citation': if b in cl: #! ci[b].append(ev.handle) # build eventpersonrole dictionary # event: c4a8c4f95b01e38564a event: Taufe refhandlelist =[] pedic ={} pedic = defaultdict(set) for pe in self.__db.get_person_handles(): for eventref in self.__db.get_person_from_handle(pe).event_ref_list: pedic[eventref.ref].add((eventref.get_role().xml_str(), pe)) # build eventfamily dictionary # event: c4a8c4f95b01e38564a event: Taufe refhandlelist =[] fedic ={} fedic = defaultdict(set) for fh in self.__db.get_family_handles(): for eventref in self.__db.get_family_from_handle(fh).event_ref_list: fedic[eventref.ref].add((self.__db.get_family_from_handle(fh).mother_handle,self.__db.get_family_from_handle(fh).father_handle,fh )) #source skeys = sorted(sc.keys(), key=str.lower) for s in skeys: self.doc.start_paragraph("SRC-SourceTitle") self.doc.write_text(self._formatlocal_source_text(self.__db.get_source_from_handle(s))) self.doc.end_paragraph() self.doc.start_paragraph("SRC-SourceDetails") self.doc.write_text(_(" key: %s") % self.__db.get_source_from_handle(s).gramps_id) self.doc.end_paragraph() i = 1 ckeys = sc[s] ckeys.sort(key=lambda x:self.__db.get_citation_from_handle(x).page) for c in ckeys: # c contains citationhandles self.doc.start_paragraph("SRC-CitationTitle") self.doc.write_text(_("%d") % i) self.doc.write_text(_(" %s") % self.__db.get_citation_from_handle(c).page) # self.doc.write_text(_(" Anno %s - ") % # self.__db.get_citation_from_handle(c).date) date = self._get_date(self.__db.get_citation_from_handle(c).get_date_object()) #print(date) self.doc.write_text(_(" - %s ") % date) self.doc.end_paragraph() # note for notehandle in self.__db.get_citation_from_handle(c).get_note_list(): self.doc.start_paragraph("SRC-SourceDetails") self.doc.write_text(_(" Type: %s") % self.__db.get_note_from_handle(notehandle).type) self.doc.write_text(_(" N-ID: %s") % self.__db.get_note_from_handle(notehandle).gramps_id) self.doc.end_paragraph() self.doc.start_paragraph("SRC-SourceDetails") self.doc.end_paragraph() self.doc.start_paragraph("SRC-SourceDetails") self.doc.write_text(_(" %s") % self.__db.get_note_from_handle(notehandle).text) self.doc.end_paragraph() # event as table for e in ci[c]: self.doc.start_paragraph("SRC-SourceDetails") self.doc.end_paragraph() # if it is a familyevent for k in fedic.keys(): if e == k: for (a,b,c) in fedic[k]: self.doc.start_paragraph("SRC-SourceDetails") self.doc.write_text(_("%s") % self.__db.get_event_from_handle(e).get_type()) self.doc.write_text(_(" ( %s )") % self.__db.get_event_from_handle(e).gramps_id) if b: father = self.__db.get_person_from_handle(b) if a: mother = self.__db.get_person_from_handle(a) self.doc.write_text(_(" Eheleute: ")) if father: self.doc.write_text(_("%s ") % self.__db.get_person_from_handle(b).primary_name.get_name()) if mother: self.doc.write_text(_("and %s ") % mother.primary_name.get_name()) self.doc.end_paragraph() for (a,b) in pedic[e]: if a == 'Primary': # FIXME self.doc.start_paragraph("SRC-SourceDetails") self.doc.write_text(_("%s") % self.__db.get_event_from_handle(e).get_type()) self.doc.write_text(_(" ( %s )") % self.__db.get_event_from_handle(e).gramps_id) self.doc.write_text(_(" %s") % self.__db.get_person_from_handle(b).primary_name.get_name()) self.doc.end_paragraph() if self.showperson: liste = pedic[e].copy() if len(liste)>0: self.doc.start_table("EventTable", "SRC-EventTable") column_titles = [_("Person"), _("ID"), _("Role")] self.doc.start_row() for title in column_titles: self.doc.start_cell("SRC-TableColumn") self.doc.start_paragraph("SRC-ColumnTitle") self.doc.write_text(title) self.doc.end_paragraph() self.doc.end_cell() self.doc.end_row() for (a,b) in liste: self.doc.start_row() self.doc.start_cell("SRC-Cell") self.doc.start_paragraph("SRC-SourceDetails") self.doc.write_text(_("%s") % self.__db.get_person_from_handle(b).primary_name.get_name()) self.doc.end_paragraph() self.doc.end_cell() self.doc.start_cell("SRC-Cell") self.doc.start_paragraph("SRC-SourceDetails") self.doc.write_text(_("%s") % self.__db.get_person_from_handle(b).gramps_id) self.doc.end_paragraph() self.doc.end_cell() self.doc.start_cell("SRC-Cell") self.doc.start_paragraph("SRC-SourceDetails") self.doc.write_text(_("%s") % a) self.doc.end_paragraph() self.doc.end_cell() self.doc.end_row() self.doc.end_table() i+=1 #------------------------------------------------------------------------ # # SourcesCitationsOptions # #------------------------------------------------------------------------ class SourcesCitationsOptions(MenuReportOptions): """ SourcesCitationsOptions provides the options for the SourcesCitationsReport. """ def __init__(self, name, dbase): self.__filter = None MenuReportOptions.__init__(self, name, dbase) def get_subject(self): """ Return a string that describes the subject of the report. """ return self.__filter.get_filter().get_name() def add_menu_options(self, menu): """ Add the options for this report """ category_name = _("Report Options") title = StringOption(_('Report Title'), _('Title of the Report') ) title.set_help(_("Title string for the report.")) menu.add_option(category_name, "title", title) subtitle = StringOption(_('Subtitle'), _('Subtitle of the Report') ) subtitle.set_help(_("Subtitle string for the report.")) menu.add_option(category_name, "subtitle", subtitle) dateinfo = time.localtime(time.time()) #rname = self.__db.get_researcher().get_name() rname = "researcher name" footer_string = _('Copyright %(year)d %(name)s') % { 'year' : dateinfo[0], 'name' : rname } footer = StringOption(_('Footer'), footer_string ) footer.set_help(_("Footer string for the page.")) menu.add_option(category_name, "footer", footer) # Reload filters to pick any new ones CustomFilters = None from gramps.gen.filters import CustomFilters, GenericFilter self.__filter = FilterOption(_("Select using filter"), 0) self.__filter.set_help(_("Select sources using a filter")) filter_list = [] filter_list.append(GenericFilter()) filter_list.extend(CustomFilters.get_filters('Source')) self.__filter.set_filters(filter_list) menu.add_option(category_name, "filter", self.__filter) showperson = BooleanOption(_("Show persons"), True) showperson.set_help(_("Whether to show events and persons mentioned in the note")) menu.add_option(category_name, "showperson", showperson) locale_opt = stdoptions.add_localization_option(menu, category_name) stdoptions.add_date_format_option(menu, category_name, locale_opt) def make_default_style(self, default_style): """ Make the default output style for the Place report. """ self.default_style = default_style self.__report_title_style() self.__source_title_style() self.__source_details_style() self.__citation_title_style() self.__column_title_style() self.__section_style() self.__event_table_style() self.__details_style() self.__cell_style() self.__table_column_style() def __report_title_style(self): """ Define the style used for the report title """ font = FontStyle() font.set(face=FONT_SANS_SERIF, size=16, bold=1) para = ParagraphStyle() para.set_font(font) para.set_header_level(1) para.set_top_margin(0.25) para.set_bottom_margin(0.25) para.set_alignment(PARA_ALIGN_CENTER) para.set_description(_('The style used for the title of the report.')) self.default_style.add_paragraph_style("SRC-ReportTitle", para) def __source_title_style(self): """ Define the style used for the source title """ font = FontStyle() font.set(face=FONT_SERIF, size=12, italic=0, bold=1) para = ParagraphStyle() para.set_font(font) para.set_header_level(2) para.set(first_indent=0.0, lmargin=0.0) para.set_top_margin(0.75) para.set_bottom_margin(0.25) para.set_description(_('The style used for source title.')) self.default_style.add_paragraph_style("SRC-SourceTitle", para) def __citation_title_style(self): """ Define the style used for the citation title """ font = FontStyle() font.set(face=FONT_SERIF, size=12, italic=0, bold=1) para = ParagraphStyle() para.set_font(font) para.set_header_level(3) para.set(first_indent=0.0, lmargin=0.0) para.set_top_margin(0.75) para.set_bottom_margin(0.0) para.set_description(_('The style used for citation title.')) self.default_style.add_paragraph_style("SRC-CitationTitle", para) def __source_details_style(self): """ Define the style used for the place details """ font = FontStyle() font.set(face=FONT_SERIF, size=10) para = ParagraphStyle() para.set_font(font) para.set(first_indent=0.0, lmargin=0.0) para.set_description(_('The style used for Source details.')) self.default_style.add_paragraph_style("SRC-SourceDetails", para) def __column_title_style(self): """ Define the style used for the event table column title """ font = FontStyle() font.set(face=FONT_SERIF, size=10, bold=1) para = ParagraphStyle() para.set_font(font) para.set(first_indent=0.0, lmargin=0.0) para.set_description(_('The style used for a column title.')) self.default_style.add_paragraph_style("SRC-ColumnTitle", para) def __section_style(self): """ Define the style used for each section """ font = FontStyle() font.set(face=FONT_SERIF, size=10, italic=0, bold=0) para = ParagraphStyle() para.set_font(font) # para.set(first_indent=-1.5, lmargin=1.5) para.set(first_indent=0.0, lmargin=0.0) para.set_top_margin(0.5) para.set_bottom_margin(0.25) para.set_description(_('The style used for each section.')) self.default_style.add_paragraph_style("SRC-Section", para) def __event_table_style(self): """ Define the style used for event table """ table = TableStyle() table.set_width(100) table.set_columns(3) table.set_column_width(0, 35) table.set_column_width(1, 15) table.set_column_width(2, 35) self.default_style.add_table_style("SRC-EventTable", table) table.set_width(100) table.set_columns(3) table.set_column_width(0, 35) table.set_column_width(1, 15) table.set_column_width(2, 35) self.default_style.add_table_style("SRC-PersonTable", table) def __details_style(self): """ Define the style used for person and event details """ font = FontStyle() font.set(face=FONT_SERIF, size=10) para = ParagraphStyle() para.set_font(font) para.set_description(_('The style used for event and person details.')) self.default_style.add_paragraph_style("SRC-Details", para) def __cell_style(self): """ Define the style used for cells in the event table """ cell = TableCellStyle() self.default_style.add_cell_style("SRC-Cell", cell) def __table_column_style(self): """ Define the style used for event table columns """ cell = TableCellStyle() cell.set_bottom_border(1) self.default_style.add_cell_style('SRC-TableColumn', cell)
sam-m888/addons-source
SourcesCitationsReport/SourcesCitationsReport.py
Python
gpl-2.0
23,556
[ "Brian" ]
dfa9c46cc8aa26d160743443cca88340ed9410458de3e30827325d859490d0bc
"""A convenience which constructs expression trees from an easy-to-read syntax Use this unless you have a compelling reason not to; it performs some optimizations that would be tedious to do when constructing an expression tree by hand. """ from collections import Mapping from inspect import isfunction, ismethod from parsimonious.exceptions import BadGrammar, UndefinedLabel from parsimonious.expressions import (Literal, Regex, Sequence, OneOf, Lookahead, Optional, ZeroOrMore, OneOrMore, Not, TokenMatcher, expression) from parsimonious.nodes import NodeVisitor from parsimonious.utils import StrAndRepr, evaluate_string class Grammar(StrAndRepr, Mapping): """A collection of rules that describe a language You can start parsing from the default rule by calling ``parse()`` directly on the ``Grammar`` object:: g = Grammar(''' polite_greeting = greeting ", my good " title greeting = "Hi" / "Hello" title = "madam" / "sir" ''') g.parse('Hello, my good sir') Or start parsing from any of the other rules; you can pull them out of the grammar as if it were a dictionary:: g['title'].parse('sir') You could also just construct a bunch of ``Expression`` objects yourself and stitch them together into a language, but using a ``Grammar`` has some important advantages: * Languages are much easier to define in the nice syntax it provides. * Circular references aren't a pain. * It does all kinds of whizzy space- and time-saving optimizations, like factoring up repeated subexpressions into a single object, which should increase cache hit ratio. [Is this implemented yet?] """ def __init__(self, rules='', **more_rules): """Construct a grammar. :arg rules: A string of production rules, one per line. :arg default_rule: The name of the rule invoked when you call :meth:`parse()` or :meth:`match()` on the grammar. Defaults to the first rule. Falls back to None if there are no string-based rules in this grammar. :arg more_rules: Additional kwargs whose names are rule names and values are Expressions or custom-coded callables which accomplish things the built-in rule syntax cannot. These take precedence over ``rules`` in case of naming conflicts. """ decorated_custom_rules = dict( (k, expression(v, k, self) if isfunction(v) or ismethod(v) else v) for k, v in more_rules.iteritems()) self._expressions, first = self._expressions_from_rules(rules, decorated_custom_rules) self.default_rule = first # may be None def __getitem__(self, rule_name): return self._expressions[rule_name] def __iter__(self): return self._expressions.iterkeys() def __len__(self): return len(self._expressions) def default(self, rule_name): """Return a new Grammar whose :term:`default rule` is ``rule_name``.""" new = self._copy() new.default_rule = new[rule_name] return new def _copy(self): """Return a shallow copy of myself. Deep is unnecessary, since Expression trees are immutable. Subgrammars recreate all the Expressions from scratch, and AbstractGrammars have no Expressions. """ new = Grammar(**self._expressions) new.default_rule = self.default_rule return new def _expressions_from_rules(self, rules, custom_rules): """Return a 2-tuple: a dict of rule names pointing to their expressions, and then the first rule. It's a web of expressions, all referencing each other. Typically, there's a single root to the web of references, and that root is the starting symbol for parsing, but there's nothing saying you can't have multiple roots. :arg custom_rules: A map of rule names to custom-coded rules: Expressions """ tree = rule_grammar.parse(rules) return RuleVisitor(custom_rules).visit(tree) def parse(self, text, pos=0): """Parse some text with the :term:`default rule`. :arg pos: The index at which to start parsing """ self._check_default_rule() return self.default_rule.parse(text, pos=pos) def match(self, text, pos=0): """Parse some text with the :term:`default rule` but not necessarily all the way to the end. :arg pos: The index at which to start parsing """ self._check_default_rule() return self.default_rule.match(text, pos=pos) def _check_default_rule(self): """Raise RuntimeError if there is no default rule defined.""" if not self.default_rule: raise RuntimeError("Can't call parse() on a Grammar that has no " "default rule. Choose a specific rule instead, " "like some_grammar['some_rule'].parse(...).") def __unicode__(self): """Return a rule string that, when passed to the constructor, would reconstitute the grammar.""" exprs = [self.default_rule] if self.default_rule else [] exprs.extend(expr for expr in self.itervalues() if expr is not self.default_rule) return '\n'.join(expr.as_rule() for expr in exprs) def __repr__(self): """Return an expression that will reconstitute the grammar.""" return "Grammar('%s')" % str(self).encode('string_escape') class TokenGrammar(Grammar): """A Grammar which takes a list of pre-lexed tokens instead of text This is useful if you want to do the lexing yourself, as a separate pass: for example, to implement indentation-based languages. """ def _expressions_from_rules(self, rules, custom_rules): tree = rule_grammar.parse(rules) return TokenRuleVisitor(custom_rules).visit(tree) class BootstrappingGrammar(Grammar): """The grammar used to recognize the textual rules that describe other grammars This grammar gets its start from some hard-coded Expressions and claws its way from there to an expression tree that describes how to parse the grammar description syntax. """ def _expressions_from_rules(self, rule_syntax, custom_rules): """Return the rules for parsing the grammar definition syntax. Return a 2-tuple: a dict of rule names pointing to their expressions, and then the top-level expression for the first rule. """ # Hard-code enough of the rules to parse the grammar that describes the # grammar description language, to bootstrap: comment = Regex(r'#[^\r\n]*', name='comment') meaninglessness = OneOf(Regex(r'\s+'), comment, name='meaninglessness') _ = ZeroOrMore(meaninglessness, name='_') equals = Sequence(Literal('='), _, name='equals') label = Sequence(Regex(r'[a-zA-Z_][a-zA-Z_0-9]*'), _, name='label') reference = Sequence(label, Not(equals), name='reference') quantifier = Sequence(Regex(r'[*+?]'), _, name='quantifier') # This pattern supports empty literals. TODO: A problem? spaceless_literal = Regex(r'u?r?"[^"\\]*(?:\\.[^"\\]*)*"', ignore_case=True, dot_all=True, name='spaceless_literal') literal = Sequence(spaceless_literal, _, name='literal') regex = Sequence(Literal('~'), literal, Regex('[ilmsux]*', ignore_case=True), _, name='regex') atom = OneOf(reference, literal, regex, name='atom') quantified = Sequence(atom, quantifier, name='quantified') term = OneOf(quantified, atom, name='term') not_term = Sequence(Literal('!'), term, _, name='not_term') term.members = (not_term,) + term.members sequence = Sequence(term, OneOrMore(term), name='sequence') or_term = Sequence(Literal('/'), _, term, name='or_term') ored = Sequence(term, OneOrMore(or_term), name='ored') expression = OneOf(ored, sequence, term, name='expression') rule = Sequence(label, equals, expression, name='rule') rules = Sequence(_, OneOrMore(rule), name='rules') # Use those hard-coded rules to parse the (more extensive) rule syntax. # (For example, unless I start using parentheses in the rule language # definition itself, I should never have to hard-code expressions for # those above.) rule_tree = rules.parse(rule_syntax) # Turn the parse tree into a map of expressions: return RuleVisitor().visit(rule_tree) # The grammar for parsing PEG grammar definitions: # This is a nice, simple grammar. We may someday add to it, but it's a safe bet # that the future will always be a superset of this. rule_syntax = (r''' # Ignored things (represented by _) are typically hung off the end of the # leafmost kinds of nodes. Literals like "/" count as leaves. rules = _ rule* rule = label equals expression equals = "=" _ literal = spaceless_literal _ # So you can't spell a regex like `~"..." ilm`: spaceless_literal = ~"u?r?\"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\""is / ~"u?r?'[^'\\\\]*(?:\\\\.[^'\\\\]*)*'"is expression = ored / sequence / term or_term = "/" _ term ored = term or_term+ sequence = term term+ not_term = "!" term _ lookahead_term = "&" term _ term = not_term / lookahead_term / quantified / atom quantified = atom quantifier atom = reference / literal / regex / parenthesized regex = "~" spaceless_literal ~"[ilmsux]*"i _ parenthesized = "(" _ expression ")" _ quantifier = ~"[*+?]" _ reference = label !equals # A subsequent equal sign is the only thing that distinguishes a label # (which begins a new rule) from a reference (which is just a pointer to a # rule defined somewhere else): label = ~"[a-zA-Z_][a-zA-Z_0-9]*" _ # _ = ~r"\s*(?:#[^\r\n]*)?\s*" _ = meaninglessness* meaninglessness = ~r"\s+" / comment comment = ~r"#[^\r\n]*" ''') class LazyReference(unicode): """A lazy reference to a rule, which we resolve after grokking all the rules""" name = u'' # Just for debugging: def _as_rhs(self): return u'<LazyReference to %s>' % self class RuleVisitor(NodeVisitor): """Turns a parse tree of a grammar definition into a map of ``Expression`` objects This is the magic piece that breathes life into a parsed bunch of parse rules, allowing them to go forth and parse other things. """ quantifier_classes = {'?': Optional, '*': ZeroOrMore, '+': OneOrMore} visit_expression = visit_term = visit_atom = NodeVisitor.lift_child def __init__(self, custom_rules=None): """Construct. :arg custom_rules: A dict of {rule name: expression} holding custom rules which will take precedence over the others """ self.custom_rules = custom_rules or {} def visit_parenthesized(self, parenthesized, (left_paren, _1, expression, right_paren, _2)): """Treat a parenthesized subexpression as just its contents. Its position in the tree suffices to maintain its grouping semantics. """ return expression def visit_quantifier(self, quantifier, (symbol, _)): """Turn a quantifier into just its symbol-matching node.""" return symbol def visit_quantified(self, quantified, (atom, quantifier)): return self.quantifier_classes[quantifier.text](atom) def visit_lookahead_term(self, lookahead_term, (ampersand, term, _)): return Lookahead(term) def visit_not_term(self, not_term, (exclamation, term, _)): return Not(term) def visit_rule(self, rule, (label, equals, expression)): """Assign a name to the Expression and return it.""" expression.name = label # Assign a name to the expr. return expression def visit_sequence(self, sequence, (term, other_terms)): """A parsed Sequence looks like [term node, OneOrMore node of ``another_term``s]. Flatten it out.""" return Sequence(term, *other_terms) def visit_ored(self, ored, (first_term, other_terms)): return OneOf(first_term, *other_terms) def visit_or_term(self, or_term, (slash, _, term)): """Return just the term from an ``or_term``. We already know it's going to be ored, from the containing ``ored``. """ return term def visit_label(self, label, (name, _)): """Turn a label into a unicode string.""" return name.text def visit_reference(self, reference, (label, not_equals)): """Stick a :class:`LazyReference` in the tree as a placeholder. We resolve them all later. """ return LazyReference(label) def visit_regex(self, regex, (tilde, literal, flags, _)): """Return a ``Regex`` expression.""" flags = flags.text.upper() pattern = literal.literal # Pull the string back out of the Literal # object. return Regex(pattern, ignore_case='I' in flags, locale='L' in flags, multiline='M' in flags, dot_all='S' in flags, unicode='U' in flags, verbose='X' in flags) def visit_spaceless_literal(self, spaceless_literal, visited_children): """Turn a string literal into a ``Literal`` that recognizes it.""" return Literal(evaluate_string(spaceless_literal.text)) def visit_literal(self, literal, (spaceless_literal, _)): """Pick just the literal out of a literal-and-junk combo.""" return spaceless_literal def generic_visit(self, node, visited_children): """Replace childbearing nodes with a list of their children; keep others untouched. For our case, if a node has children, only the children are important. Otherwise, keep the node around for (for example) the flags of the regex rule. Most of these kept-around nodes are subsequently thrown away by the other visitor methods. We can't simply hang the visited children off the original node; that would be disastrous if the node occurred in more than one place in the tree. """ return visited_children or node # should semantically be a tuple def _resolve_refs(self, rule_map, expr, done): """Return an expression with all its lazy references recursively resolved. Resolve any lazy references in the expression ``expr``, recursing into all subexpressions. :arg done: The set of Expressions that have already been or are currently being resolved, to ward off redundant work and prevent infinite recursion for circular refs """ if isinstance(expr, LazyReference): label = unicode(expr) try: reffed_expr = rule_map[label] except KeyError: raise UndefinedLabel(expr) return self._resolve_refs(rule_map, reffed_expr, done) else: if getattr(expr, 'members', ()) and expr not in done: # Prevents infinite recursion for circular refs. At worst, one # of `expr.members` can refer back to `expr`, but it can't go # any farther. done.add(expr) expr.members = [self._resolve_refs(rule_map, member, done) for member in expr.members] return expr def visit_rules(self, node, (_, rules)): """Collate all the rules into a map. Return (map, default rule). The default rule is the first one. Or, if you have more than one rule of that name, it's the last-occurring rule of that name. (This lets you override the default rule when you extend a grammar.) If there are no string-based rules, the default rule is None, because the custom rules, due to being kwarg-based, are unordered. """ # Map each rule's name to its Expression. Later rules of the same name # override earlier ones. This lets us define rules multiple times and # have the last declaration win, so you can extend grammars by # concatenation. rule_map = dict((expr.name, expr) for expr in rules) # And custom rules override string-based rules. This is the least # surprising choice when you compare the dict constructor: # dict({'x': 5}, x=6). rule_map.update(self.custom_rules) # Resolve references. This tolerates forward references. done = set() rule_map = dict((expr.name, self._resolve_refs(rule_map, expr, done)) for expr in rule_map.itervalues()) # isinstance() is a temporary hack around the fact that * rules don't # always get transformed into lists by NodeVisitor. We should fix that; # it's surprising and requires writing lame branches like this. return rule_map, (rule_map[rules[0].name] if isinstance(rules, list) and rules else None) class TokenRuleVisitor(RuleVisitor): """A visitor which builds expression trees meant to work on sequences of pre-lexed tokens rather than strings""" def visit_spaceless_literal(self, spaceless_literal, visited_children): """Turn a string literal into a ``TokenMatcher`` that matches ``Token`` objects by their ``type`` attributes.""" return TokenMatcher(evaluate_string(spaceless_literal.text)) def visit_regex(self, regex, (tilde, literal, flags, _)): raise BadGrammar('Regexes do not make sense in TokenGrammars, since ' 'TokenGrammars operate on pre-lexed tokens rather ' 'than characters.') # Bootstrap to level 1... rule_grammar = BootstrappingGrammar(rule_syntax) # ...and then to level 2. This establishes that the node tree of our rule # syntax is built by the same machinery that will build trees of our users' # grammars. And the correctness of that tree is tested, indirectly, in # test_grammar. rule_grammar = Grammar(rule_syntax) # TODO: Teach Expression trees how to spit out Python representations of # themselves. Then we can just paste that in above, and we won't have to # bootstrap on import. Though it'll be a little less DRY. [Ah, but this is not # so clean, because it would have to output multiple statements to get multiple # refs to a single expression hooked up.]
fjalex/parsimonious
parsimonious/grammar.py
Python
mit
19,197
[ "VisIt" ]
e4c63a924db8a80cf190c09827a00e5ab54c1cb16ffb31e12b7bbc31f9c6284b
""" ======================================== Example with the plotly graphing library ======================================== sphinx-gallery supports examples made with the `plotly library <https://plotly.com/python/>`_. sphinx-gallery is able to capture the ``_repr_html_`` of plotly figure objects (see :ref:`capture_repr`). To display the figure, the last line in your code block should therefore be the plotly figure object. In order to use plotly, the ``conf.py`` of the project should include the following lines to select the appropriate plotly renderer:: import plotly.io as pio pio.renderers.default = 'sphinx_gallery' **Optional**: the ``sphinx_gallery`` renderer of plotly will not generate png thumbnails. For png thumbnails, you can use instead the ``sphinx_gallery_png`` renderer, and add ``plotly.io._sg_scraper.plotly_sg_scraper`` to the list of :ref:`image_scrapers`. The scraper requires you to `install the orca package <https://plotly.com/python/static-image-export/>`_. This tutorial gives a few examples of plotly figures, starting with its high-level API `plotly express <https://plotly.com/python/plotly-express/>`_. """ import plotly.express as px import numpy as np df = px.data.tips() fig = px.bar(df, x='sex', y='total_bill', facet_col='day', color='smoker', barmode='group', template='presentation+plotly' ) fig.update_layout(height=400) fig #%% # In addition to the classical scatter or bar charts, plotly provides a large # variety of traces, such as the sunburst hierarchical trace of the following # example. plotly is an interactive library: click on one of the continents # for a more detailed view of the drill-down. df = px.data.gapminder().query("year == 2007") fig = px.sunburst(df, path=['continent', 'country'], values='pop', color='lifeExp', hover_data=['iso_alpha'], color_continuous_scale='RdBu', color_continuous_midpoint=np.average(df['lifeExp'], weights=df['pop'])) fig.update_layout(title_text='Life expectancy of countries and continents') fig #%% # While plotly express is often the high-level entry point of the plotly # library, complex figures mixing different types of traces can be made # with the low-level ``graph_objects`` imperative API. from plotly.subplots import make_subplots import plotly.graph_objects as go fig = make_subplots(rows=1, cols=2, specs=[[{}, {'type':'domain'}]]) fig.add_trace(go.Bar(x=[2018, 2019, 2020], y=[3, 2, 5], showlegend=False), 1, 1) fig.add_trace(go.Pie(labels=['A', 'B', 'C'], values=[1, 3, 6]), 1, 2) fig.update_layout(height=400, template='presentation', yaxis_title_text='revenue') fig # sphinx_gallery_thumbnail_path = '_static/plotly_logo.png'
Titan-C/sphinx-gallery
examples/plot_9_plotly.py
Python
bsd-3-clause
2,741
[ "ORCA" ]
8d97e75f94cb86bfb7b2cfdc976e0ea29e964f0fe8fb951c2ca156fe457f45d1
import search import array import random from math import(cos, pi) franklin_map = search.UndirectedGraph(dict( Hilliard=dict(UpperArlington=18, Valleyview=11, Dublin=15), GroveCity=dict(CanalWinchester=33, Valleyview=21, Obetz=18), UpperArlington=dict(Columbus=14, Worthington=17), Columbus=dict(Valleyview=11, CanalWinchester=27), Worthington=dict(NewAlbany=21, Gahanna=22), NewAlbany=dict(CanalWinchester=35, Gahanna=15), Gahanna=dict(Bexley=20, Reynoldsburg=13), Bexley=dict(Columbus=14, Renyoldsburg=28), CanalWinchester=dict(Reynoldsburg=27, Obetz=17), Valleyview=dict(Obetz=23) )) franklin_puzzle = search.GraphProblem('Hilliard', 'CanalWinchester', franklin_map) franklin_puzzle.label = 'Franklin' franklin_puzzle.description = ''' An abbreviated map of Franklin County, OH. This map is unique, to the best of my knowledge. ''' ohio_map = search.UndirectedGraph(dict( Kenton=dict(Ottawa=55, Troy=77,Mansfield=70, Urbana=53), Troy=dict(Dayton=28, Lima=54, Kenton= 75), London=dict(Dayton=60, Chillicothe=66, Urbana=35), Chillicothe=dict(Lebanon=77, Hillsboro=52), Columbus=dict(Athens=81, Mansfield=66, Urbana=51), Akron=dict(Mansfield=63, Cleveland=53, NewPhil=54), NewPhil=dict(Newark=79, Woodsfield=92), Lebanon=dict(Dayton=35, London=60, Hillsboro=56), )) ohio_puzzle = search.GraphProblem('Cleveland', 'Troy', ohio_map) ohio_puzzle.label = 'Ohio' romania_map = search.UndirectedGraph(dict( A=dict(Z=75,S=140,T=118), Z=dict(O=71,A=75), S=dict(O=151,R=80,F=99), T=dict(A=118,L=111), O=dict(Z=71,S=151), L=dict(T=111,M=70), M=dict(L=70,D=75), D=dict(M=75,C=120), R=dict(S=80,C=146,P=97), C=dict(R=146,P=138,D=120), F=dict(S=99,B=211), P=dict(R=97,C=138,B=101), B=dict(G=90,P=101,F=211), )) romania_puzzle = search.GraphProblem('A', 'B', romania_map) romania_puzzle.label = 'Romania' romania_puzzle.description = ''' The simplified map of Romania, per Russall & Norvig, 3rd Ed., p. 68. ''' # A trivial Problem definition class LightSwitch(search.Problem): def actions(self, state): return ['up', 'down'] def result(self, state, action): if action == 'up': return 'on' else: return 'off' def goal_test(self, state): return state == 'on' def h(self, node): state = node.state if self.goal_test(state): return 0 else: return 1 def grid_initial(grid_dimensions): grid = {} for x in range(grid_dimensions): for y in range(grid_dimensions): if y == 0: if x == 2: grid[x, y] = 'T' else: grid[x, y] = '.' elif y == 1: if x == 3: grid[x, y] = 'T' else: grid[x, y] = '.' elif y == 2: grid[x, y] = '.' elif y == 3: if x == 0: grid[x, y] = 'T' else: grid[x, y] = '.' # print(grid) return grid def grid_solved(grid_dimensions): grid = {} for x in range(grid_dimensions): for y in range(grid_dimensions): if y == 0: if x == 1: grid[x, y] = 't' elif x == 2: grid[x, y] = 'T' else: grid[x, y] = '.' elif y == 1: if x == 3: grid[x, y] = 'T' else: grid[x, y] = '.' elif y == 2: if x == 3: grid[x, y] = 't' else: grid[x, y] = '.' elif y == 3: if x == 0: grid[x, y] = 'T' elif x == 1: grid[x, y] = 't' else: grid[x, y] = '.' # print(grid) return grid class Tents(search.Problem): def __init__(self, initial, goal): self.initial = initial self.goal = goal self.current_state = initial def actions(self, state): return ['t', '.'] def result(self, state, action): updated_state = state # t = tent, T = tree, . = blank for x in range(4): for y in range(4): if action == 't': if self.current_state[x, y] == '.': self.current_state[x, y] = 't' elif self.current_state[x, y] == 'T': return state elif action == '.': if self.current_state[x, y] == 't': self.current_state[x, y] = '.' elif self.current_state[x, y] == 'T': return state state = updated_state return state def goal_test(self, state): for x in range(4): for y in range(4): # if they are the same, do nothing if state == self.goal[x, y]: return state else: # if it is not the same, try again state[x, y] = '.' return state def path_cost(self, c, state1, action, state2): return c + 1 def value(self, state): raise NotImplementedError def h(self, node): state = node.state if self.goal_test(state): return 0 else: return 1 def list_to_string(self, state): my_separator = " " for x in range(4): for y in range(4): string_state = my_separator.join(state[x, y]) return string_state # swiss_puzzle = search.GraphProblem('A', 'Z', sumner_map) tents_puzzle = Tents(grid_initial(4), grid_solved(4)) switch_puzzle = LightSwitch('off') switch_puzzle.label = 'Light Switch' mySearches = [ # swiss_puzzle, # tents_puzzle, ohio_puzzle, franklin_puzzle, romania_puzzle, # switch_puzzle, ]
WmHHooper/aima-python
submissions/Deas/mySearches.py
Python
mit
6,127
[ "COLUMBUS" ]
232483b89d1e3242e241df0c3ded4901db4013cacc06c67731ab167f50c0a665
tests = [ ("python", "UnitTestBuildComposite.py", {}), ("python", "UnitTestScreenComposite.py", {}), ("python", "UnitTestAnalyzeComposite.py", {}), ] for d in [ 'Cluster', 'Composite', 'Data', 'DecTree', 'Descriptors', 'InfoTheory', 'KNN', 'ModelPackage', 'NaiveBayes', 'Neural', 'SLT', 'Scoring' ]: tests.append(('python', 'test_list.py', {'dir': d})) longTests = [] if __name__ == '__main__': import sys from rdkit import TestRunner failed, tests = TestRunner.RunScript('test_list.py', 0, 1) sys.exit(len(failed))
greglandrum/rdkit
rdkit/ML/test_list.py
Python
bsd-3-clause
543
[ "RDKit" ]
8c1fdae6b42ffb41177bbaec27598d824044dc3287e78f97882fe0599f4c3e5c
# -*- coding: utf-8 -*- """ End-to-end tests for Student's Profile Page. """ from datetime import datetime from bok_choy.web_app_test import WebAppTest from ...pages.common.logout import LogoutPage from ...pages.lms.account_settings import AccountSettingsPage from ...pages.lms.auto_auth import AutoAuthPage from ...pages.lms.learner_profile import LearnerProfilePage from ...pages.lms.dashboard import DashboardPage from ..helpers import EventsTestMixin class LearnerProfileTestMixin(EventsTestMixin): """ Mixin with helper methods for testing learner profile pages. """ PRIVACY_PUBLIC = u'all_users' PRIVACY_PRIVATE = u'private' PUBLIC_PROFILE_FIELDS = ['username', 'country', 'language_proficiencies', 'bio'] PRIVATE_PROFILE_FIELDS = ['username'] PUBLIC_PROFILE_EDITABLE_FIELDS = ['country', 'language_proficiencies', 'bio'] USER_SETTINGS_CHANGED_EVENT_NAME = u"edx.user.settings.changed" def log_in_as_unique_user(self): """ Create a unique user and return the account's username and id. """ username = "test_{uuid}".format(uuid=self.unique_id[0:6]) auto_auth_page = AutoAuthPage(self.browser, username=username).visit() user_id = auto_auth_page.get_user_id() return username, user_id def set_public_profile_fields_data(self, profile_page): """ Fill in the public profile fields of a user. """ profile_page.value_for_dropdown_field('language_proficiencies', 'English') profile_page.value_for_dropdown_field('country', 'United Kingdom') profile_page.value_for_textarea_field('bio', 'Nothing Special') def visit_profile_page(self, username, privacy=None): """ Visits a user's profile page. """ profile_page = LearnerProfilePage(self.browser, username) # Change the privacy if requested by loading the page and # changing the drop down if privacy is not None: profile_page.visit() profile_page.wait_for_page() profile_page.privacy = privacy if privacy == self.PRIVACY_PUBLIC: self.set_public_profile_fields_data(profile_page) # Reset event tracking so that the tests only see events from # loading the profile page. self.reset_event_tracking() # Load the page profile_page.visit() profile_page.wait_for_page() return profile_page def set_birth_year(self, birth_year): """ Set birth year for the current user to the specified value. """ account_settings_page = AccountSettingsPage(self.browser) account_settings_page.visit() account_settings_page.wait_for_page() self.assertEqual( account_settings_page.value_for_dropdown_field('year_of_birth', str(birth_year)), str(birth_year) ) def verify_profile_page_is_public(self, profile_page, is_editable=True): """ Verify that the profile page is currently public. """ self.assertEqual(profile_page.visible_fields, self.PUBLIC_PROFILE_FIELDS) if is_editable: self.assertTrue(profile_page.privacy_field_visible) self.assertEqual(profile_page.editable_fields, self.PUBLIC_PROFILE_EDITABLE_FIELDS) else: self.assertEqual(profile_page.editable_fields, []) def verify_profile_page_is_private(self, profile_page, is_editable=True): """ Verify that the profile page is currently private. """ if is_editable: self.assertTrue(profile_page.privacy_field_visible) self.assertEqual(profile_page.visible_fields, self.PRIVATE_PROFILE_FIELDS) def verify_profile_page_view_event(self, requesting_username, profile_user_id, visibility=None): """ Verifies that the correct view event was captured for the profile page. """ self.verify_events_of_type( requesting_username, u"edx.user.settings.viewed", [{ u"user_id": int(profile_user_id), u"page": u"profile", u"visibility": unicode(visibility), }] ) def assert_event_emitted_num_times(self, profile_user_id, setting, num_times): """ Verify a particular user settings change event was emitted a certain number of times. """ # pylint disable=no-member super(LearnerProfileTestMixin, self).assert_event_emitted_num_times( self.USER_SETTINGS_CHANGED_EVENT_NAME, self.start_time, profile_user_id, num_times, setting=setting ) def verify_user_preference_changed_event(self, username, user_id, setting, old_value=None, new_value=None): """ Verifies that the correct user preference changed event was recorded. """ self.verify_events_of_type( username, self.USER_SETTINGS_CHANGED_EVENT_NAME, [{ u"user_id": long(user_id), u"table": u"user_api_userpreference", u"setting": unicode(setting), u"old": old_value, u"new": new_value, u"truncated": [], }], expected_referers=["/u/{username}".format(username=username)], ) class OwnLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest): """ Tests that verify a student's own profile page. """ def verify_profile_forced_private_message(self, username, birth_year, message=None): """ Verify age limit messages for a user. """ self.set_birth_year(birth_year=birth_year if birth_year is not None else "") profile_page = self.visit_profile_page(username) self.assertTrue(profile_page.privacy_field_visible) self.assertEqual(profile_page.age_limit_message_present, message is not None) self.assertIn(message, profile_page.profile_forced_private_message) def test_profile_defaults_to_public(self): """ Scenario: Verify that a new user's profile defaults to public. Given that I am a new user. When I go to my profile page. Then I see that the profile visibility is set to public. """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username) self.verify_profile_page_is_public(profile_page) def assert_default_image_has_public_access(self, profile_page): """ Assert that profile image has public access. """ self.assertTrue(profile_page.profile_has_default_image) self.assertTrue(profile_page.profile_has_image_with_public_access()) def test_make_profile_public(self): """ Scenario: Verify that the user can change their privacy. Given that I am a registered user And I visit my private profile page And I set the profile visibility to public Then a user preference changed event should be recorded When I reload the page Then the profile visibility should be shown as public """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE) profile_page.privacy = self.PRIVACY_PUBLIC self.verify_user_preference_changed_event( username, user_id, "account_privacy", old_value=self.PRIVACY_PRIVATE, # Note: default value was public, so we first change to private new_value=self.PRIVACY_PUBLIC, ) # Reload the page and verify that the profile is now public self.browser.refresh() profile_page.wait_for_page() self.verify_profile_page_is_public(profile_page) def test_make_profile_private(self): """ Scenario: Verify that the user can change their privacy. Given that I am a registered user And I visit my public profile page And I set the profile visibility to private Then a user preference changed event should be recorded When I reload the page Then the profile visibility should be shown as private """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) profile_page.privacy = self.PRIVACY_PRIVATE self.verify_user_preference_changed_event( username, user_id, "account_privacy", old_value=None, # Note: no old value as the default preference is public new_value=self.PRIVACY_PRIVATE, ) # Reload the page and verify that the profile is now private self.browser.refresh() profile_page.wait_for_page() self.verify_profile_page_is_private(profile_page) def test_dashboard_learner_profile_link(self): """ Scenario: Verify that my profile link is present on dashboard page and we can navigate to correct page. Given that I am a registered user. When I go to Dashboard page. And I click on username dropdown. Then I see My Profile link in the dropdown menu. When I click on My Profile link. Then I will be navigated to My Profile page. """ username, user_id = self.log_in_as_unique_user() dashboard_page = DashboardPage(self.browser) dashboard_page.visit() dashboard_page.click_username_dropdown() self.assertTrue('My Profile' in dashboard_page.username_dropdown_link_text) dashboard_page.click_my_profile_link() my_profile_page = LearnerProfilePage(self.browser, username) my_profile_page.wait_for_page() def test_fields_on_my_private_profile(self): """ Scenario: Verify that desired fields are shown when looking at her own private profile. Given that I am a registered user. And I visit My Profile page. And I set the profile visibility to private. And I reload the page. Then I should see the profile visibility selector dropdown. Then I see some of the profile fields are shown. """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE) self.verify_profile_page_is_private(profile_page) self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE) def test_fields_on_my_public_profile(self): """ Scenario: Verify that desired fields are shown when looking at her own public profile. Given that I am a registered user. And I visit My Profile page. And I set the profile visibility to public. And I reload the page. Then I should see the profile visibility selector dropdown. Then I see all the profile fields are shown. And `location`, `language` and `about me` fields are editable. """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) self.verify_profile_page_is_public(profile_page) self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PUBLIC) def _test_dropdown_field(self, profile_page, field_id, new_value, displayed_value, mode): """ Test behaviour of a dropdown field. """ profile_page.value_for_dropdown_field(field_id, new_value) self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value) self.assertTrue(profile_page.mode_for_field(field_id), mode) self.browser.refresh() profile_page.wait_for_page() self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value) self.assertTrue(profile_page.mode_for_field(field_id), mode) def _test_textarea_field(self, profile_page, field_id, new_value, displayed_value, mode): """ Test behaviour of a textarea field. """ profile_page.value_for_textarea_field(field_id, new_value) self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value) self.assertTrue(profile_page.mode_for_field(field_id), mode) self.browser.refresh() profile_page.wait_for_page() self.assertEqual(profile_page.get_non_editable_mode_value(field_id), displayed_value) self.assertTrue(profile_page.mode_for_field(field_id), mode) def test_country_field(self): """ Test behaviour of `Country` field. Given that I am a registered user. And I visit My Profile page. And I set the profile visibility to public and set default values for public fields. Then I set country value to `Pakistan`. Then displayed country should be `Pakistan` and country field mode should be `display` And I reload the page. Then displayed country should be `Pakistan` and country field mode should be `display` And I make `country` field editable Then `country` field mode should be `edit` And `country` field icon should be visible. """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) self._test_dropdown_field(profile_page, 'country', 'Pakistan', 'Pakistan', 'display') profile_page.make_field_editable('country') self.assertTrue(profile_page.mode_for_field('country'), 'edit') self.assertTrue(profile_page.field_icon_present('country')) def test_language_field(self): """ Test behaviour of `Language` field. Given that I am a registered user. And I visit My Profile page. And I set the profile visibility to public and set default values for public fields. Then I set language value to `Urdu`. Then displayed language should be `Urdu` and language field mode should be `display` And I reload the page. Then displayed language should be `Urdu` and language field mode should be `display` Then I set empty value for language. Then displayed language should be `Add language` and language field mode should be `placeholder` And I reload the page. Then displayed language should be `Add language` and language field mode should be `placeholder` And I make `language` field editable Then `language` field mode should be `edit` And `language` field icon should be visible. """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) self._test_dropdown_field(profile_page, 'language_proficiencies', 'Urdu', 'Urdu', 'display') self._test_dropdown_field(profile_page, 'language_proficiencies', '', 'Add language', 'placeholder') profile_page.make_field_editable('language_proficiencies') self.assertTrue(profile_page.mode_for_field('language_proficiencies'), 'edit') self.assertTrue(profile_page.field_icon_present('language_proficiencies')) def test_about_me_field(self): """ Test behaviour of `About Me` field. Given that I am a registered user. And I visit My Profile page. And I set the profile visibility to public and set default values for public fields. Then I set about me value to `Eat Sleep Code`. Then displayed about me should be `Eat Sleep Code` and about me field mode should be `display` And I reload the page. Then displayed about me should be `Eat Sleep Code` and about me field mode should be `display` Then I set empty value for about me. Then displayed about me should be `Tell other edX learners a little about yourself: where you live, what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me field mode should be `placeholder` And I reload the page. Then displayed about me should be `Tell other edX learners a little about yourself: where you live, what your interests are, why you're taking courses on edX, or what you hope to learn.` and about me field mode should be `placeholder` And I make `about me` field editable Then `about me` field mode should be `edit` """ placeholder_value = ( "Tell other edX learners a little about yourself: where you live, what your interests are, " "why you're taking courses on edX, or what you hope to learn." ) username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) self._test_textarea_field(profile_page, 'bio', 'Eat Sleep Code', 'Eat Sleep Code', 'display') self._test_textarea_field(profile_page, 'bio', '', placeholder_value, 'placeholder') profile_page.make_field_editable('bio') self.assertTrue(profile_page.mode_for_field('bio'), 'edit') def test_birth_year_not_set(self): """ Verify message if birth year is not set. Given that I am a registered user. And birth year is not set for the user. And I visit my profile page. Then I should see a message that the profile is private until the year of birth is set. """ username, user_id = self.log_in_as_unique_user() message = "You must specify your birth year before you can share your full profile." self.verify_profile_forced_private_message(username, birth_year=None, message=message) self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE) def test_user_is_under_age(self): """ Verify message if user is under age. Given that I am a registered user. And birth year is set so that age is less than 13. And I visit my profile page. Then I should see a message that the profile is private as I am under thirteen. """ username, user_id = self.log_in_as_unique_user() under_age_birth_year = datetime.now().year - 10 self.verify_profile_forced_private_message( username, birth_year=under_age_birth_year, message='You must be over 13 to share a full profile.' ) self.verify_profile_page_view_event(username, user_id, visibility=self.PRIVACY_PRIVATE) def test_user_can_only_see_default_image_for_private_profile(self): """ Scenario: Default profile image behaves correctly for under age user. Given that I am on my profile page with private access And I can see default image When I move my cursor to the image Then i cannot see the upload/remove image text And i cannot upload/remove the image. """ year_of_birth = datetime.now().year - 5 username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PRIVATE) self.verify_profile_forced_private_message( username, year_of_birth, message='You must be over 13 to share a full profile.' ) self.assertTrue(profile_page.profile_has_default_image) self.assertFalse(profile_page.profile_has_image_with_private_access()) def test_user_can_see_default_image_for_public_profile(self): """ Scenario: Default profile image behaves correctly for public profile. Given that I am on my profile page with public access And I can see default image When I move my cursor to the image Then i can see the upload/remove image text And i am able to upload new image """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) self.assert_default_image_has_public_access(profile_page) def test_user_can_upload_the_profile_image_with_success(self): """ Scenario: Upload profile image works correctly. Given that I am on my profile page with public access And I can see default image When I move my cursor to the image Then i can see the upload/remove image text When i upload new image via file uploader Then i can see the changed image And i can also see the latest image after reload. """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) self.assert_default_image_has_public_access(profile_page) profile_page.upload_file(filename='image.jpg') self.assertTrue(profile_page.image_upload_success) profile_page.visit() self.assertTrue(profile_page.image_upload_success) self.assert_event_emitted_num_times(user_id, 'profile_image_uploaded_at', 1) def test_user_can_see_error_for_exceeding_max_file_size_limit(self): """ Scenario: Upload profile image does not work for > 1MB image file. Given that I am on my profile page with public access And I can see default image When I move my cursor to the image Then i can see the upload/remove image text When i upload new > 1MB image via file uploader Then i can see the error message for file size limit And i can still see the default image after page reload. """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) self.assert_default_image_has_public_access(profile_page) profile_page.upload_file(filename='larger_image.jpg') self.assertEqual(profile_page.profile_image_message, "The file must be smaller than 1 MB in size.") profile_page.visit() self.assertTrue(profile_page.profile_has_default_image) self.assert_event_emitted_num_times(user_id, 'profile_image_uploaded_at', 0) def test_user_can_see_error_for_file_size_below_the_min_limit(self): """ Scenario: Upload profile image does not work for < 100 Bytes image file. Given that I am on my profile page with public access And I can see default image When I move my cursor to the image Then i can see the upload/remove image text When i upload new < 100 Bytes image via file uploader Then i can see the error message for minimum file size limit And i can still see the default image after page reload. """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) self.assert_default_image_has_public_access(profile_page) profile_page.upload_file(filename='list-icon-visited.png') self.assertEqual(profile_page.profile_image_message, "The file must be at least 100 bytes in size.") profile_page.visit() self.assertTrue(profile_page.profile_has_default_image) self.assert_event_emitted_num_times(user_id, 'profile_image_uploaded_at', 0) def test_user_can_see_error_for_wrong_file_type(self): """ Scenario: Upload profile image does not work for wrong file types. Given that I am on my profile page with public access And I can see default image When I move my cursor to the image Then i can see the upload/remove image text When i upload new csv file via file uploader Then i can see the error message for wrong/unsupported file type And i can still see the default image after page reload. """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) self.assert_default_image_has_public_access(profile_page) profile_page.upload_file(filename='cohort_users_only_username.csv') self.assertEqual( profile_page.profile_image_message, "The file must be one of the following types: .gif, .png, .jpeg, .jpg." ) profile_page.visit() self.assertTrue(profile_page.profile_has_default_image) self.assert_event_emitted_num_times(user_id, 'profile_image_uploaded_at', 0) def test_user_can_remove_profile_image(self): """ Scenario: Remove profile image works correctly. Given that I am on my profile page with public access And I can see default image When I move my cursor to the image Then i can see the upload/remove image text When i click on the remove image link Then i can see the default image And i can still see the default image after page reload. """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) self.assert_default_image_has_public_access(profile_page) profile_page.upload_file(filename='image.jpg') self.assertTrue(profile_page.image_upload_success) self.assertTrue(profile_page.remove_profile_image()) self.assertTrue(profile_page.profile_has_default_image) profile_page.visit() self.assertTrue(profile_page.profile_has_default_image) self.assert_event_emitted_num_times(user_id, 'profile_image_uploaded_at', 2) def test_user_cannot_remove_default_image(self): """ Scenario: Remove profile image does not works for default images. Given that I am on my profile page with public access And I can see default image When I move my cursor to the image Then i can see only the upload image text And i cannot see the remove image text """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) self.assert_default_image_has_public_access(profile_page) self.assertFalse(profile_page.remove_link_present) def test_eventing_after_multiple_uploads(self): """ Scenario: An event is fired when a user with a profile image uploads another image Given that I am on my profile page with public access And I upload a new image via file uploader When I upload another image via the file uploader Then two upload events have been emitted """ username, user_id = self.log_in_as_unique_user() profile_page = self.visit_profile_page(username, privacy=self.PRIVACY_PUBLIC) self.assert_default_image_has_public_access(profile_page) profile_page.upload_file(filename='image.jpg') self.assertTrue(profile_page.image_upload_success) profile_page.upload_file(filename='image.jpg', wait_for_upload_button=False) self.assert_event_emitted_num_times(user_id, 'profile_image_uploaded_at', 2) class DifferentUserLearnerProfilePageTest(LearnerProfileTestMixin, WebAppTest): """ Tests that verify viewing the profile page of a different user. """ def test_different_user_private_profile(self): """ Scenario: Verify that desired fields are shown when looking at a different user's private profile. Given that I am a registered user. And I visit a different user's private profile page. Then I shouldn't see the profile visibility selector dropdown. Then I see some of the profile fields are shown. """ different_username, different_user_id = self._initialize_different_user(privacy=self.PRIVACY_PRIVATE) username, __ = self.log_in_as_unique_user() profile_page = self.visit_profile_page(different_username) self.verify_profile_page_is_private(profile_page, is_editable=False) self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE) def test_different_user_under_age(self): """ Scenario: Verify that an under age user's profile is private to others. Given that I am a registered user. And I visit an under age user's profile page. Then I shouldn't see the profile visibility selector dropdown. Then I see that only the private fields are shown. """ under_age_birth_year = datetime.now().year - 10 different_username, different_user_id = self._initialize_different_user( privacy=self.PRIVACY_PUBLIC, birth_year=under_age_birth_year ) username, __ = self.log_in_as_unique_user() profile_page = self.visit_profile_page(different_username) self.verify_profile_page_is_private(profile_page, is_editable=False) self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PRIVATE) def test_different_user_public_profile(self): """ Scenario: Verify that desired fields are shown when looking at a different user's public profile. Given that I am a registered user. And I visit a different user's public profile page. Then I shouldn't see the profile visibility selector dropdown. Then all the profile fields are shown. Then I shouldn't see the profile visibility selector dropdown. Also `location`, `language` and `about me` fields are not editable. """ different_username, different_user_id = self._initialize_different_user(privacy=self.PRIVACY_PUBLIC) username, __ = self.log_in_as_unique_user() profile_page = self.visit_profile_page(different_username) profile_page.wait_for_public_fields() self.verify_profile_page_is_public(profile_page, is_editable=False) self.verify_profile_page_view_event(username, different_user_id, visibility=self.PRIVACY_PUBLIC) def _initialize_different_user(self, privacy=None, birth_year=None): """ Initialize the profile page for a different test user """ username, user_id = self.log_in_as_unique_user() # Set the privacy for the new user if privacy is None: privacy = self.PRIVACY_PUBLIC self.visit_profile_page(username, privacy=privacy) # Set the user's year of birth if birth_year: self.set_birth_year(birth_year) # Log the user out LogoutPage(self.browser).visit() return username, user_id
cselis86/edx-platform
common/test/acceptance/tests/lms/test_learner_profile.py
Python
agpl-3.0
30,810
[ "VisIt" ]
b5544554d7664554493520015cf45ffaf91d0a1ea6741738b26f2bcc1dc07847
from __future__ import absolute_import, division, unicode_literals import string EOF = None E = { "null-character": "Null character in input stream, replaced with U+FFFD.", "invalid-codepoint": "Invalid codepoint in stream.", "incorrectly-placed-solidus": "Solidus (/) incorrectly placed in tag.", "incorrect-cr-newline-entity": "Incorrect CR newline entity, replaced with LF.", "illegal-windows-1252-entity": "Entity used with illegal number (windows-1252 reference).", "cant-convert-numeric-entity": "Numeric entity couldn't be converted to character " "(codepoint U+%(charAsInt)08x).", "illegal-codepoint-for-numeric-entity": "Numeric entity represents an illegal codepoint: " "U+%(charAsInt)08x.", "numeric-entity-without-semicolon": "Numeric entity didn't end with ';'.", "expected-numeric-entity-but-got-eof": "Numeric entity expected. Got end of file instead.", "expected-numeric-entity": "Numeric entity expected but none found.", "named-entity-without-semicolon": "Named entity didn't end with ';'.", "expected-named-entity": "Named entity expected. Got none.", "attributes-in-end-tag": "End tag contains unexpected attributes.", 'self-closing-flag-on-end-tag': "End tag contains unexpected self-closing flag.", "expected-tag-name-but-got-right-bracket": "Expected tag name. Got '>' instead.", "expected-tag-name-but-got-question-mark": "Expected tag name. Got '?' instead. (HTML doesn't " "support processing instructions.)", "expected-tag-name": "Expected tag name. Got something else instead", "expected-closing-tag-but-got-right-bracket": "Expected closing tag. Got '>' instead. Ignoring '</>'.", "expected-closing-tag-but-got-eof": "Expected closing tag. Unexpected end of file.", "expected-closing-tag-but-got-char": "Expected closing tag. Unexpected character '%(data)s' found.", "eof-in-tag-name": "Unexpected end of file in the tag name.", "expected-attribute-name-but-got-eof": "Unexpected end of file. Expected attribute name instead.", "eof-in-attribute-name": "Unexpected end of file in attribute name.", "invalid-character-in-attribute-name": "Invalid character in attribute name", "duplicate-attribute": "Dropped duplicate attribute on tag.", "expected-end-of-tag-name-but-got-eof": "Unexpected end of file. Expected = or end of tag.", "expected-attribute-value-but-got-eof": "Unexpected end of file. Expected attribute value.", "expected-attribute-value-but-got-right-bracket": "Expected attribute value. Got '>' instead.", 'equals-in-unquoted-attribute-value': "Unexpected = in unquoted attribute", 'unexpected-character-in-unquoted-attribute-value': "Unexpected character in unquoted attribute", "invalid-character-after-attribute-name": "Unexpected character after attribute name.", "unexpected-character-after-attribute-value": "Unexpected character after attribute value.", "eof-in-attribute-value-double-quote": "Unexpected end of file in attribute value (\").", "eof-in-attribute-value-single-quote": "Unexpected end of file in attribute value (').", "eof-in-attribute-value-no-quotes": "Unexpected end of file in attribute value.", "unexpected-EOF-after-solidus-in-tag": "Unexpected end of file in tag. Expected >", "unexpected-character-after-solidus-in-tag": "Unexpected character after / in tag. Expected >", "expected-dashes-or-doctype": "Expected '--' or 'DOCTYPE'. Not found.", "unexpected-bang-after-double-dash-in-comment": "Unexpected ! after -- in comment", "unexpected-space-after-double-dash-in-comment": "Unexpected space after -- in comment", "incorrect-comment": "Incorrect comment.", "eof-in-comment": "Unexpected end of file in comment.", "eof-in-comment-end-dash": "Unexpected end of file in comment (-)", "unexpected-dash-after-double-dash-in-comment": "Unexpected '-' after '--' found in comment.", "eof-in-comment-double-dash": "Unexpected end of file in comment (--).", "eof-in-comment-end-space-state": "Unexpected end of file in comment.", "eof-in-comment-end-bang-state": "Unexpected end of file in comment.", "unexpected-char-in-comment": "Unexpected character in comment found.", "need-space-after-doctype": "No space after literal string 'DOCTYPE'.", "expected-doctype-name-but-got-right-bracket": "Unexpected > character. Expected DOCTYPE name.", "expected-doctype-name-but-got-eof": "Unexpected end of file. Expected DOCTYPE name.", "eof-in-doctype-name": "Unexpected end of file in DOCTYPE name.", "eof-in-doctype": "Unexpected end of file in DOCTYPE.", "expected-space-or-right-bracket-in-doctype": "Expected space or '>'. Got '%(data)s'", "unexpected-end-of-doctype": "Unexpected end of DOCTYPE.", "unexpected-char-in-doctype": "Unexpected character in DOCTYPE.", "eof-in-innerhtml": "XXX innerHTML EOF", "unexpected-doctype": "Unexpected DOCTYPE. Ignored.", "non-html-root": "html needs to be the first start tag.", "expected-doctype-but-got-eof": "Unexpected End of file. Expected DOCTYPE.", "unknown-doctype": "Erroneous DOCTYPE.", "expected-doctype-but-got-chars": "Unexpected non-space characters. Expected DOCTYPE.", "expected-doctype-but-got-start-tag": "Unexpected start tag (%(name)s). Expected DOCTYPE.", "expected-doctype-but-got-end-tag": "Unexpected end tag (%(name)s). Expected DOCTYPE.", "end-tag-after-implied-root": "Unexpected end tag (%(name)s) after the (implied) root element.", "expected-named-closing-tag-but-got-eof": "Unexpected end of file. Expected end tag (%(name)s).", "two-heads-are-not-better-than-one": "Unexpected start tag head in existing head. Ignored.", "unexpected-end-tag": "Unexpected end tag (%(name)s). Ignored.", "unexpected-start-tag-out-of-my-head": "Unexpected start tag (%(name)s) that can be in head. Moved.", "unexpected-start-tag": "Unexpected start tag (%(name)s).", "missing-end-tag": "Missing end tag (%(name)s).", "missing-end-tags": "Missing end tags (%(name)s).", "unexpected-start-tag-implies-end-tag": "Unexpected start tag (%(startName)s) " "implies end tag (%(endName)s).", "unexpected-start-tag-treated-as": "Unexpected start tag (%(originalName)s). Treated as %(newName)s.", "deprecated-tag": "Unexpected start tag %(name)s. Don't use it!", "unexpected-start-tag-ignored": "Unexpected start tag %(name)s. Ignored.", "expected-one-end-tag-but-got-another": "Unexpected end tag (%(gotName)s). " "Missing end tag (%(expectedName)s).", "end-tag-too-early": "End tag (%(name)s) seen too early. Expected other end tag.", "end-tag-too-early-named": "Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s).", "end-tag-too-early-ignored": "End tag (%(name)s) seen too early. Ignored.", "adoption-agency-1.1": "End tag (%(name)s) violates step 1, " "paragraph 1 of the adoption agency algorithm.", "adoption-agency-1.2": "End tag (%(name)s) violates step 1, " "paragraph 2 of the adoption agency algorithm.", "adoption-agency-1.3": "End tag (%(name)s) violates step 1, " "paragraph 3 of the adoption agency algorithm.", "adoption-agency-4.4": "End tag (%(name)s) violates step 4, " "paragraph 4 of the adoption agency algorithm.", "unexpected-end-tag-treated-as": "Unexpected end tag (%(originalName)s). Treated as %(newName)s.", "no-end-tag": "This element (%(name)s) has no end tag.", "unexpected-implied-end-tag-in-table": "Unexpected implied end tag (%(name)s) in the table phase.", "unexpected-implied-end-tag-in-table-body": "Unexpected implied end tag (%(name)s) in the table body phase.", "unexpected-char-implies-table-voodoo": "Unexpected non-space characters in " "table context caused voodoo mode.", "unexpected-hidden-input-in-table": "Unexpected input with type hidden in table context.", "unexpected-form-in-table": "Unexpected form in table context.", "unexpected-start-tag-implies-table-voodoo": "Unexpected start tag (%(name)s) in " "table context caused voodoo mode.", "unexpected-end-tag-implies-table-voodoo": "Unexpected end tag (%(name)s) in " "table context caused voodoo mode.", "unexpected-cell-in-table-body": "Unexpected table cell start tag (%(name)s) " "in the table body phase.", "unexpected-cell-end-tag": "Got table cell end tag (%(name)s) " "while required end tags are missing.", "unexpected-end-tag-in-table-body": "Unexpected end tag (%(name)s) in the table body phase. Ignored.", "unexpected-implied-end-tag-in-table-row": "Unexpected implied end tag (%(name)s) in the table row phase.", "unexpected-end-tag-in-table-row": "Unexpected end tag (%(name)s) in the table row phase. Ignored.", "unexpected-select-in-select": "Unexpected select start tag in the select phase " "treated as select end tag.", "unexpected-input-in-select": "Unexpected input start tag in the select phase.", "unexpected-start-tag-in-select": "Unexpected start tag token (%(name)s in the select phase. " "Ignored.", "unexpected-end-tag-in-select": "Unexpected end tag (%(name)s) in the select phase. Ignored.", "unexpected-table-element-start-tag-in-select-in-table": "Unexpected table element start tag (%(name)s) in the select in table phase.", "unexpected-table-element-end-tag-in-select-in-table": "Unexpected table element end tag (%(name)s) in the select in table phase.", "unexpected-char-after-body": "Unexpected non-space characters in the after body phase.", "unexpected-start-tag-after-body": "Unexpected start tag token (%(name)s)" " in the after body phase.", "unexpected-end-tag-after-body": "Unexpected end tag token (%(name)s)" " in the after body phase.", "unexpected-char-in-frameset": "Unexpected characters in the frameset phase. Characters ignored.", "unexpected-start-tag-in-frameset": "Unexpected start tag token (%(name)s)" " in the frameset phase. Ignored.", "unexpected-frameset-in-frameset-innerhtml": "Unexpected end tag token (frameset) " "in the frameset phase (innerHTML).", "unexpected-end-tag-in-frameset": "Unexpected end tag token (%(name)s)" " in the frameset phase. Ignored.", "unexpected-char-after-frameset": "Unexpected non-space characters in the " "after frameset phase. Ignored.", "unexpected-start-tag-after-frameset": "Unexpected start tag (%(name)s)" " in the after frameset phase. Ignored.", "unexpected-end-tag-after-frameset": "Unexpected end tag (%(name)s)" " in the after frameset phase. Ignored.", "unexpected-end-tag-after-body-innerhtml": "Unexpected end tag after body(innerHtml)", "expected-eof-but-got-char": "Unexpected non-space characters. Expected end of file.", "expected-eof-but-got-start-tag": "Unexpected start tag (%(name)s)" ". Expected end of file.", "expected-eof-but-got-end-tag": "Unexpected end tag (%(name)s)" ". Expected end of file.", "eof-in-table": "Unexpected end of file. Expected table content.", "eof-in-select": "Unexpected end of file. Expected select content.", "eof-in-frameset": "Unexpected end of file. Expected frameset content.", "eof-in-script-in-script": "Unexpected end of file. Expected script content.", "eof-in-foreign-lands": "Unexpected end of file. Expected foreign content", "non-void-element-with-trailing-solidus": "Trailing solidus not allowed on element %(name)s", "unexpected-html-element-in-foreign-content": "Element %(name)s not allowed in a non-html context", "unexpected-end-tag-before-html": "Unexpected end tag (%(name)s) before html.", "XXX-undefined-error": "Undefined error (this sucks and should be fixed)", } namespaces = { "html": "http://www.w3.org/1999/xhtml", "mathml": "http://www.w3.org/1998/Math/MathML", "svg": "http://www.w3.org/2000/svg", "xlink": "http://www.w3.org/1999/xlink", "xml": "http://www.w3.org/XML/1998/namespace", "xmlns": "http://www.w3.org/2000/xmlns/" } scopingElements = frozenset([ (namespaces["html"], "applet"), (namespaces["html"], "caption"), (namespaces["html"], "html"), (namespaces["html"], "marquee"), (namespaces["html"], "object"), (namespaces["html"], "table"), (namespaces["html"], "td"), (namespaces["html"], "th"), (namespaces["mathml"], "mi"), (namespaces["mathml"], "mo"), (namespaces["mathml"], "mn"), (namespaces["mathml"], "ms"), (namespaces["mathml"], "mtext"), (namespaces["mathml"], "annotation-xml"), (namespaces["svg"], "foreignObject"), (namespaces["svg"], "desc"), (namespaces["svg"], "title"), ]) formattingElements = frozenset([ (namespaces["html"], "a"), (namespaces["html"], "b"), (namespaces["html"], "big"), (namespaces["html"], "code"), (namespaces["html"], "em"), (namespaces["html"], "font"), (namespaces["html"], "i"), (namespaces["html"], "nobr"), (namespaces["html"], "s"), (namespaces["html"], "small"), (namespaces["html"], "strike"), (namespaces["html"], "strong"), (namespaces["html"], "tt"), (namespaces["html"], "u") ]) specialElements = frozenset([ (namespaces["html"], "address"), (namespaces["html"], "applet"), (namespaces["html"], "area"), (namespaces["html"], "article"), (namespaces["html"], "aside"), (namespaces["html"], "base"), (namespaces["html"], "basefont"), (namespaces["html"], "bgsound"), (namespaces["html"], "blockquote"), (namespaces["html"], "body"), (namespaces["html"], "br"), (namespaces["html"], "button"), (namespaces["html"], "caption"), (namespaces["html"], "center"), (namespaces["html"], "col"), (namespaces["html"], "colgroup"), (namespaces["html"], "command"), (namespaces["html"], "dd"), (namespaces["html"], "details"), (namespaces["html"], "dir"), (namespaces["html"], "div"), (namespaces["html"], "dl"), (namespaces["html"], "dt"), (namespaces["html"], "embed"), (namespaces["html"], "fieldset"), (namespaces["html"], "figure"), (namespaces["html"], "footer"), (namespaces["html"], "form"), (namespaces["html"], "frame"), (namespaces["html"], "frameset"), (namespaces["html"], "h1"), (namespaces["html"], "h2"), (namespaces["html"], "h3"), (namespaces["html"], "h4"), (namespaces["html"], "h5"), (namespaces["html"], "h6"), (namespaces["html"], "head"), (namespaces["html"], "header"), (namespaces["html"], "hr"), (namespaces["html"], "html"), (namespaces["html"], "iframe"), # Note that image is commented out in the spec as "this isn't an # element that can end up on the stack, so it doesn't matter," (namespaces["html"], "image"), (namespaces["html"], "img"), (namespaces["html"], "input"), (namespaces["html"], "isindex"), (namespaces["html"], "li"), (namespaces["html"], "link"), (namespaces["html"], "listing"), (namespaces["html"], "marquee"), (namespaces["html"], "menu"), (namespaces["html"], "meta"), (namespaces["html"], "nav"), (namespaces["html"], "noembed"), (namespaces["html"], "noframes"), (namespaces["html"], "noscript"), (namespaces["html"], "object"), (namespaces["html"], "ol"), (namespaces["html"], "p"), (namespaces["html"], "param"), (namespaces["html"], "plaintext"), (namespaces["html"], "pre"), (namespaces["html"], "script"), (namespaces["html"], "section"), (namespaces["html"], "select"), (namespaces["html"], "style"), (namespaces["html"], "table"), (namespaces["html"], "tbody"), (namespaces["html"], "td"), (namespaces["html"], "textarea"), (namespaces["html"], "tfoot"), (namespaces["html"], "th"), (namespaces["html"], "thead"), (namespaces["html"], "title"), (namespaces["html"], "tr"), (namespaces["html"], "ul"), (namespaces["html"], "wbr"), (namespaces["html"], "xmp"), (namespaces["svg"], "foreignObject") ]) htmlIntegrationPointElements = frozenset([ (namespaces["mathml"], "annotaion-xml"), (namespaces["svg"], "foreignObject"), (namespaces["svg"], "desc"), (namespaces["svg"], "title") ]) mathmlTextIntegrationPointElements = frozenset([ (namespaces["mathml"], "mi"), (namespaces["mathml"], "mo"), (namespaces["mathml"], "mn"), (namespaces["mathml"], "ms"), (namespaces["mathml"], "mtext") ]) adjustForeignAttributes = { "xlink:actuate": ("xlink", "actuate", namespaces["xlink"]), "xlink:arcrole": ("xlink", "arcrole", namespaces["xlink"]), "xlink:href": ("xlink", "href", namespaces["xlink"]), "xlink:role": ("xlink", "role", namespaces["xlink"]), "xlink:show": ("xlink", "show", namespaces["xlink"]), "xlink:title": ("xlink", "title", namespaces["xlink"]), "xlink:type": ("xlink", "type", namespaces["xlink"]), "xml:base": ("xml", "base", namespaces["xml"]), "xml:lang": ("xml", "lang", namespaces["xml"]), "xml:space": ("xml", "space", namespaces["xml"]), "xmlns": (None, "xmlns", namespaces["xmlns"]), "xmlns:xlink": ("xmlns", "xlink", namespaces["xmlns"]) } unadjustForeignAttributes = dict([((ns, local), qname) for qname, (prefix, local, ns) in adjustForeignAttributes.items()]) spaceCharacters = frozenset([ "\t", "\n", "\u000C", " ", "\r" ]) tableInsertModeElements = frozenset([ "table", "tbody", "tfoot", "thead", "tr" ]) asciiLowercase = frozenset(string.ascii_lowercase) asciiUppercase = frozenset(string.ascii_uppercase) asciiLetters = frozenset(string.ascii_letters) digits = frozenset(string.digits) hexDigits = frozenset(string.hexdigits) asciiUpper2Lower = dict([(ord(c), ord(c.lower())) for c in string.ascii_uppercase]) # Heading elements need to be ordered headingElements = ( "h1", "h2", "h3", "h4", "h5", "h6" ) voidElements = frozenset([ "base", "command", "event-source", "link", "meta", "hr", "br", "img", "embed", "param", "area", "col", "input", "source", "track" ]) cdataElements = frozenset(['title', 'textarea']) rcdataElements = frozenset([ 'style', 'script', 'xmp', 'iframe', 'noembed', 'noframes', 'noscript' ]) booleanAttributes = { "": frozenset(["irrelevant"]), "style": frozenset(["scoped"]), "img": frozenset(["ismap"]), "audio": frozenset(["autoplay", "controls"]), "video": frozenset(["autoplay", "controls"]), "script": frozenset(["defer", "async"]), "details": frozenset(["open"]), "datagrid": frozenset(["multiple", "disabled"]), "command": frozenset(["hidden", "disabled", "checked", "default"]), "hr": frozenset(["noshade"]), "menu": frozenset(["autosubmit"]), "fieldset": frozenset(["disabled", "readonly"]), "option": frozenset(["disabled", "readonly", "selected"]), "optgroup": frozenset(["disabled", "readonly"]), "button": frozenset(["disabled", "autofocus"]), "input": frozenset(["disabled", "readonly", "required", "autofocus", "checked", "ismap"]), "select": frozenset(["disabled", "readonly", "autofocus", "multiple"]), "output": frozenset(["disabled", "readonly"]), } # entitiesWindows1252 has to be _ordered_ and needs to have an index. It # therefore can't be a frozenset. entitiesWindows1252 = ( 8364, # 0x80 0x20AC EURO SIGN 65533, # 0x81 UNDEFINED 8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK 402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK 8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK 8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS 8224, # 0x86 0x2020 DAGGER 8225, # 0x87 0x2021 DOUBLE DAGGER 710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT 8240, # 0x89 0x2030 PER MILLE SIGN 352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON 8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK 338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE 65533, # 0x8D UNDEFINED 381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON 65533, # 0x8F UNDEFINED 65533, # 0x90 UNDEFINED 8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK 8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK 8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK 8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK 8226, # 0x95 0x2022 BULLET 8211, # 0x96 0x2013 EN DASH 8212, # 0x97 0x2014 EM DASH 732, # 0x98 0x02DC SMALL TILDE 8482, # 0x99 0x2122 TRADE MARK SIGN 353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON 8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK 339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE 65533, # 0x9D UNDEFINED 382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON 376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS ) xmlEntities = frozenset(['lt;', 'gt;', 'amp;', 'apos;', 'quot;']) entities = { "AElig": "\xc6", "AElig;": "\xc6", "AMP": "&", "AMP;": "&", "Aacute": "\xc1", "Aacute;": "\xc1", "Abreve;": "\u0102", "Acirc": "\xc2", "Acirc;": "\xc2", "Acy;": "\u0410", "Afr;": "\U0001d504", "Agrave": "\xc0", "Agrave;": "\xc0", "Alpha;": "\u0391", "Amacr;": "\u0100", "And;": "\u2a53", "Aogon;": "\u0104", "Aopf;": "\U0001d538", "ApplyFunction;": "\u2061", "Aring": "\xc5", "Aring;": "\xc5", "Ascr;": "\U0001d49c", "Assign;": "\u2254", "Atilde": "\xc3", "Atilde;": "\xc3", "Auml": "\xc4", "Auml;": "\xc4", "Backslash;": "\u2216", "Barv;": "\u2ae7", "Barwed;": "\u2306", "Bcy;": "\u0411", "Because;": "\u2235", "Bernoullis;": "\u212c", "Beta;": "\u0392", "Bfr;": "\U0001d505", "Bopf;": "\U0001d539", "Breve;": "\u02d8", "Bscr;": "\u212c", "Bumpeq;": "\u224e", "CHcy;": "\u0427", "COPY": "\xa9", "COPY;": "\xa9", "Cacute;": "\u0106", "Cap;": "\u22d2", "CapitalDifferentialD;": "\u2145", "Cayleys;": "\u212d", "Ccaron;": "\u010c", "Ccedil": "\xc7", "Ccedil;": "\xc7", "Ccirc;": "\u0108", "Cconint;": "\u2230", "Cdot;": "\u010a", "Cedilla;": "\xb8", "CenterDot;": "\xb7", "Cfr;": "\u212d", "Chi;": "\u03a7", "CircleDot;": "\u2299", "CircleMinus;": "\u2296", "CirclePlus;": "\u2295", "CircleTimes;": "\u2297", "ClockwiseContourIntegral;": "\u2232", "CloseCurlyDoubleQuote;": "\u201d", "CloseCurlyQuote;": "\u2019", "Colon;": "\u2237", "Colone;": "\u2a74", "Congruent;": "\u2261", "Conint;": "\u222f", "ContourIntegral;": "\u222e", "Copf;": "\u2102", "Coproduct;": "\u2210", "CounterClockwiseContourIntegral;": "\u2233", "Cross;": "\u2a2f", "Cscr;": "\U0001d49e", "Cup;": "\u22d3", "CupCap;": "\u224d", "DD;": "\u2145", "DDotrahd;": "\u2911", "DJcy;": "\u0402", "DScy;": "\u0405", "DZcy;": "\u040f", "Dagger;": "\u2021", "Darr;": "\u21a1", "Dashv;": "\u2ae4", "Dcaron;": "\u010e", "Dcy;": "\u0414", "Del;": "\u2207", "Delta;": "\u0394", "Dfr;": "\U0001d507", "DiacriticalAcute;": "\xb4", "DiacriticalDot;": "\u02d9", "DiacriticalDoubleAcute;": "\u02dd", "DiacriticalGrave;": "`", "DiacriticalTilde;": "\u02dc", "Diamond;": "\u22c4", "DifferentialD;": "\u2146", "Dopf;": "\U0001d53b", "Dot;": "\xa8", "DotDot;": "\u20dc", "DotEqual;": "\u2250", "DoubleContourIntegral;": "\u222f", "DoubleDot;": "\xa8", "DoubleDownArrow;": "\u21d3", "DoubleLeftArrow;": "\u21d0", "DoubleLeftRightArrow;": "\u21d4", "DoubleLeftTee;": "\u2ae4", "DoubleLongLeftArrow;": "\u27f8", "DoubleLongLeftRightArrow;": "\u27fa", "DoubleLongRightArrow;": "\u27f9", "DoubleRightArrow;": "\u21d2", "DoubleRightTee;": "\u22a8", "DoubleUpArrow;": "\u21d1", "DoubleUpDownArrow;": "\u21d5", "DoubleVerticalBar;": "\u2225", "DownArrow;": "\u2193", "DownArrowBar;": "\u2913", "DownArrowUpArrow;": "\u21f5", "DownBreve;": "\u0311", "DownLeftRightVector;": "\u2950", "DownLeftTeeVector;": "\u295e", "DownLeftVector;": "\u21bd", "DownLeftVectorBar;": "\u2956", "DownRightTeeVector;": "\u295f", "DownRightVector;": "\u21c1", "DownRightVectorBar;": "\u2957", "DownTee;": "\u22a4", "DownTeeArrow;": "\u21a7", "Downarrow;": "\u21d3", "Dscr;": "\U0001d49f", "Dstrok;": "\u0110", "ENG;": "\u014a", "ETH": "\xd0", "ETH;": "\xd0", "Eacute": "\xc9", "Eacute;": "\xc9", "Ecaron;": "\u011a", "Ecirc": "\xca", "Ecirc;": "\xca", "Ecy;": "\u042d", "Edot;": "\u0116", "Efr;": "\U0001d508", "Egrave": "\xc8", "Egrave;": "\xc8", "Element;": "\u2208", "Emacr;": "\u0112", "EmptySmallSquare;": "\u25fb", "EmptyVerySmallSquare;": "\u25ab", "Eogon;": "\u0118", "Eopf;": "\U0001d53c", "Epsilon;": "\u0395", "Equal;": "\u2a75", "EqualTilde;": "\u2242", "Equilibrium;": "\u21cc", "Escr;": "\u2130", "Esim;": "\u2a73", "Eta;": "\u0397", "Euml": "\xcb", "Euml;": "\xcb", "Exists;": "\u2203", "ExponentialE;": "\u2147", "Fcy;": "\u0424", "Ffr;": "\U0001d509", "FilledSmallSquare;": "\u25fc", "FilledVerySmallSquare;": "\u25aa", "Fopf;": "\U0001d53d", "ForAll;": "\u2200", "Fouriertrf;": "\u2131", "Fscr;": "\u2131", "GJcy;": "\u0403", "GT": ">", "GT;": ">", "Gamma;": "\u0393", "Gammad;": "\u03dc", "Gbreve;": "\u011e", "Gcedil;": "\u0122", "Gcirc;": "\u011c", "Gcy;": "\u0413", "Gdot;": "\u0120", "Gfr;": "\U0001d50a", "Gg;": "\u22d9", "Gopf;": "\U0001d53e", "GreaterEqual;": "\u2265", "GreaterEqualLess;": "\u22db", "GreaterFullEqual;": "\u2267", "GreaterGreater;": "\u2aa2", "GreaterLess;": "\u2277", "GreaterSlantEqual;": "\u2a7e", "GreaterTilde;": "\u2273", "Gscr;": "\U0001d4a2", "Gt;": "\u226b", "HARDcy;": "\u042a", "Hacek;": "\u02c7", "Hat;": "^", "Hcirc;": "\u0124", "Hfr;": "\u210c", "HilbertSpace;": "\u210b", "Hopf;": "\u210d", "HorizontalLine;": "\u2500", "Hscr;": "\u210b", "Hstrok;": "\u0126", "HumpDownHump;": "\u224e", "HumpEqual;": "\u224f", "IEcy;": "\u0415", "IJlig;": "\u0132", "IOcy;": "\u0401", "Iacute": "\xcd", "Iacute;": "\xcd", "Icirc": "\xce", "Icirc;": "\xce", "Icy;": "\u0418", "Idot;": "\u0130", "Ifr;": "\u2111", "Igrave": "\xcc", "Igrave;": "\xcc", "Im;": "\u2111", "Imacr;": "\u012a", "ImaginaryI;": "\u2148", "Implies;": "\u21d2", "Int;": "\u222c", "Integral;": "\u222b", "Intersection;": "\u22c2", "InvisibleComma;": "\u2063", "InvisibleTimes;": "\u2062", "Iogon;": "\u012e", "Iopf;": "\U0001d540", "Iota;": "\u0399", "Iscr;": "\u2110", "Itilde;": "\u0128", "Iukcy;": "\u0406", "Iuml": "\xcf", "Iuml;": "\xcf", "Jcirc;": "\u0134", "Jcy;": "\u0419", "Jfr;": "\U0001d50d", "Jopf;": "\U0001d541", "Jscr;": "\U0001d4a5", "Jsercy;": "\u0408", "Jukcy;": "\u0404", "KHcy;": "\u0425", "KJcy;": "\u040c", "Kappa;": "\u039a", "Kcedil;": "\u0136", "Kcy;": "\u041a", "Kfr;": "\U0001d50e", "Kopf;": "\U0001d542", "Kscr;": "\U0001d4a6", "LJcy;": "\u0409", "LT": "<", "LT;": "<", "Lacute;": "\u0139", "Lambda;": "\u039b", "Lang;": "\u27ea", "Laplacetrf;": "\u2112", "Larr;": "\u219e", "Lcaron;": "\u013d", "Lcedil;": "\u013b", "Lcy;": "\u041b", "LeftAngleBracket;": "\u27e8", "LeftArrow;": "\u2190", "LeftArrowBar;": "\u21e4", "LeftArrowRightArrow;": "\u21c6", "LeftCeiling;": "\u2308", "LeftDoubleBracket;": "\u27e6", "LeftDownTeeVector;": "\u2961", "LeftDownVector;": "\u21c3", "LeftDownVectorBar;": "\u2959", "LeftFloor;": "\u230a", "LeftRightArrow;": "\u2194", "LeftRightVector;": "\u294e", "LeftTee;": "\u22a3", "LeftTeeArrow;": "\u21a4", "LeftTeeVector;": "\u295a", "LeftTriangle;": "\u22b2", "LeftTriangleBar;": "\u29cf", "LeftTriangleEqual;": "\u22b4", "LeftUpDownVector;": "\u2951", "LeftUpTeeVector;": "\u2960", "LeftUpVector;": "\u21bf", "LeftUpVectorBar;": "\u2958", "LeftVector;": "\u21bc", "LeftVectorBar;": "\u2952", "Leftarrow;": "\u21d0", "Leftrightarrow;": "\u21d4", "LessEqualGreater;": "\u22da", "LessFullEqual;": "\u2266", "LessGreater;": "\u2276", "LessLess;": "\u2aa1", "LessSlantEqual;": "\u2a7d", "LessTilde;": "\u2272", "Lfr;": "\U0001d50f", "Ll;": "\u22d8", "Lleftarrow;": "\u21da", "Lmidot;": "\u013f", "LongLeftArrow;": "\u27f5", "LongLeftRightArrow;": "\u27f7", "LongRightArrow;": "\u27f6", "Longleftarrow;": "\u27f8", "Longleftrightarrow;": "\u27fa", "Longrightarrow;": "\u27f9", "Lopf;": "\U0001d543", "LowerLeftArrow;": "\u2199", "LowerRightArrow;": "\u2198", "Lscr;": "\u2112", "Lsh;": "\u21b0", "Lstrok;": "\u0141", "Lt;": "\u226a", "Map;": "\u2905", "Mcy;": "\u041c", "MediumSpace;": "\u205f", "Mellintrf;": "\u2133", "Mfr;": "\U0001d510", "MinusPlus;": "\u2213", "Mopf;": "\U0001d544", "Mscr;": "\u2133", "Mu;": "\u039c", "NJcy;": "\u040a", "Nacute;": "\u0143", "Ncaron;": "\u0147", "Ncedil;": "\u0145", "Ncy;": "\u041d", "NegativeMediumSpace;": "\u200b", "NegativeThickSpace;": "\u200b", "NegativeThinSpace;": "\u200b", "NegativeVeryThinSpace;": "\u200b", "NestedGreaterGreater;": "\u226b", "NestedLessLess;": "\u226a", "NewLine;": "\n", "Nfr;": "\U0001d511", "NoBreak;": "\u2060", "NonBreakingSpace;": "\xa0", "Nopf;": "\u2115", "Not;": "\u2aec", "NotCongruent;": "\u2262", "NotCupCap;": "\u226d", "NotDoubleVerticalBar;": "\u2226", "NotElement;": "\u2209", "NotEqual;": "\u2260", "NotEqualTilde;": "\u2242\u0338", "NotExists;": "\u2204", "NotGreater;": "\u226f", "NotGreaterEqual;": "\u2271", "NotGreaterFullEqual;": "\u2267\u0338", "NotGreaterGreater;": "\u226b\u0338", "NotGreaterLess;": "\u2279", "NotGreaterSlantEqual;": "\u2a7e\u0338", "NotGreaterTilde;": "\u2275", "NotHumpDownHump;": "\u224e\u0338", "NotHumpEqual;": "\u224f\u0338", "NotLeftTriangle;": "\u22ea", "NotLeftTriangleBar;": "\u29cf\u0338", "NotLeftTriangleEqual;": "\u22ec", "NotLess;": "\u226e", "NotLessEqual;": "\u2270", "NotLessGreater;": "\u2278", "NotLessLess;": "\u226a\u0338", "NotLessSlantEqual;": "\u2a7d\u0338", "NotLessTilde;": "\u2274", "NotNestedGreaterGreater;": "\u2aa2\u0338", "NotNestedLessLess;": "\u2aa1\u0338", "NotPrecedes;": "\u2280", "NotPrecedesEqual;": "\u2aaf\u0338", "NotPrecedesSlantEqual;": "\u22e0", "NotReverseElement;": "\u220c", "NotRightTriangle;": "\u22eb", "NotRightTriangleBar;": "\u29d0\u0338", "NotRightTriangleEqual;": "\u22ed", "NotSquareSubset;": "\u228f\u0338", "NotSquareSubsetEqual;": "\u22e2", "NotSquareSuperset;": "\u2290\u0338", "NotSquareSupersetEqual;": "\u22e3", "NotSubset;": "\u2282\u20d2", "NotSubsetEqual;": "\u2288", "NotSucceeds;": "\u2281", "NotSucceedsEqual;": "\u2ab0\u0338", "NotSucceedsSlantEqual;": "\u22e1", "NotSucceedsTilde;": "\u227f\u0338", "NotSuperset;": "\u2283\u20d2", "NotSupersetEqual;": "\u2289", "NotTilde;": "\u2241", "NotTildeEqual;": "\u2244", "NotTildeFullEqual;": "\u2247", "NotTildeTilde;": "\u2249", "NotVerticalBar;": "\u2224", "Nscr;": "\U0001d4a9", "Ntilde": "\xd1", "Ntilde;": "\xd1", "Nu;": "\u039d", "OElig;": "\u0152", "Oacute": "\xd3", "Oacute;": "\xd3", "Ocirc": "\xd4", "Ocirc;": "\xd4", "Ocy;": "\u041e", "Odblac;": "\u0150", "Ofr;": "\U0001d512", "Ograve": "\xd2", "Ograve;": "\xd2", "Omacr;": "\u014c", "Omega;": "\u03a9", "Omicron;": "\u039f", "Oopf;": "\U0001d546", "OpenCurlyDoubleQuote;": "\u201c", "OpenCurlyQuote;": "\u2018", "Or;": "\u2a54", "Oscr;": "\U0001d4aa", "Oslash": "\xd8", "Oslash;": "\xd8", "Otilde": "\xd5", "Otilde;": "\xd5", "Otimes;": "\u2a37", "Ouml": "\xd6", "Ouml;": "\xd6", "OverBar;": "\u203e", "OverBrace;": "\u23de", "OverBracket;": "\u23b4", "OverParenthesis;": "\u23dc", "PartialD;": "\u2202", "Pcy;": "\u041f", "Pfr;": "\U0001d513", "Phi;": "\u03a6", "Pi;": "\u03a0", "PlusMinus;": "\xb1", "Poincareplane;": "\u210c", "Popf;": "\u2119", "Pr;": "\u2abb", "Precedes;": "\u227a", "PrecedesEqual;": "\u2aaf", "PrecedesSlantEqual;": "\u227c", "PrecedesTilde;": "\u227e", "Prime;": "\u2033", "Product;": "\u220f", "Proportion;": "\u2237", "Proportional;": "\u221d", "Pscr;": "\U0001d4ab", "Psi;": "\u03a8", "QUOT": "\"", "QUOT;": "\"", "Qfr;": "\U0001d514", "Qopf;": "\u211a", "Qscr;": "\U0001d4ac", "RBarr;": "\u2910", "REG": "\xae", "REG;": "\xae", "Racute;": "\u0154", "Rang;": "\u27eb", "Rarr;": "\u21a0", "Rarrtl;": "\u2916", "Rcaron;": "\u0158", "Rcedil;": "\u0156", "Rcy;": "\u0420", "Re;": "\u211c", "ReverseElement;": "\u220b", "ReverseEquilibrium;": "\u21cb", "ReverseUpEquilibrium;": "\u296f", "Rfr;": "\u211c", "Rho;": "\u03a1", "RightAngleBracket;": "\u27e9", "RightArrow;": "\u2192", "RightArrowBar;": "\u21e5", "RightArrowLeftArrow;": "\u21c4", "RightCeiling;": "\u2309", "RightDoubleBracket;": "\u27e7", "RightDownTeeVector;": "\u295d", "RightDownVector;": "\u21c2", "RightDownVectorBar;": "\u2955", "RightFloor;": "\u230b", "RightTee;": "\u22a2", "RightTeeArrow;": "\u21a6", "RightTeeVector;": "\u295b", "RightTriangle;": "\u22b3", "RightTriangleBar;": "\u29d0", "RightTriangleEqual;": "\u22b5", "RightUpDownVector;": "\u294f", "RightUpTeeVector;": "\u295c", "RightUpVector;": "\u21be", "RightUpVectorBar;": "\u2954", "RightVector;": "\u21c0", "RightVectorBar;": "\u2953", "Rightarrow;": "\u21d2", "Ropf;": "\u211d", "RoundImplies;": "\u2970", "Rrightarrow;": "\u21db", "Rscr;": "\u211b", "Rsh;": "\u21b1", "RuleDelayed;": "\u29f4", "SHCHcy;": "\u0429", "SHcy;": "\u0428", "SOFTcy;": "\u042c", "Sacute;": "\u015a", "Sc;": "\u2abc", "Scaron;": "\u0160", "Scedil;": "\u015e", "Scirc;": "\u015c", "Scy;": "\u0421", "Sfr;": "\U0001d516", "ShortDownArrow;": "\u2193", "ShortLeftArrow;": "\u2190", "ShortRightArrow;": "\u2192", "ShortUpArrow;": "\u2191", "Sigma;": "\u03a3", "SmallCircle;": "\u2218", "Sopf;": "\U0001d54a", "Sqrt;": "\u221a", "Square;": "\u25a1", "SquareIntersection;": "\u2293", "SquareSubset;": "\u228f", "SquareSubsetEqual;": "\u2291", "SquareSuperset;": "\u2290", "SquareSupersetEqual;": "\u2292", "SquareUnion;": "\u2294", "Sscr;": "\U0001d4ae", "Star;": "\u22c6", "Sub;": "\u22d0", "Subset;": "\u22d0", "SubsetEqual;": "\u2286", "Succeeds;": "\u227b", "SucceedsEqual;": "\u2ab0", "SucceedsSlantEqual;": "\u227d", "SucceedsTilde;": "\u227f", "SuchThat;": "\u220b", "Sum;": "\u2211", "Sup;": "\u22d1", "Superset;": "\u2283", "SupersetEqual;": "\u2287", "Supset;": "\u22d1", "THORN": "\xde", "THORN;": "\xde", "TRADE;": "\u2122", "TSHcy;": "\u040b", "TScy;": "\u0426", "Tab;": "\t", "Tau;": "\u03a4", "Tcaron;": "\u0164", "Tcedil;": "\u0162", "Tcy;": "\u0422", "Tfr;": "\U0001d517", "Therefore;": "\u2234", "Theta;": "\u0398", "ThickSpace;": "\u205f\u200a", "ThinSpace;": "\u2009", "Tilde;": "\u223c", "TildeEqual;": "\u2243", "TildeFullEqual;": "\u2245", "TildeTilde;": "\u2248", "Topf;": "\U0001d54b", "TripleDot;": "\u20db", "Tscr;": "\U0001d4af", "Tstrok;": "\u0166", "Uacute": "\xda", "Uacute;": "\xda", "Uarr;": "\u219f", "Uarrocir;": "\u2949", "Ubrcy;": "\u040e", "Ubreve;": "\u016c", "Ucirc": "\xdb", "Ucirc;": "\xdb", "Ucy;": "\u0423", "Udblac;": "\u0170", "Ufr;": "\U0001d518", "Ugrave": "\xd9", "Ugrave;": "\xd9", "Umacr;": "\u016a", "UnderBar;": "_", "UnderBrace;": "\u23df", "UnderBracket;": "\u23b5", "UnderParenthesis;": "\u23dd", "Union;": "\u22c3", "UnionPlus;": "\u228e", "Uogon;": "\u0172", "Uopf;": "\U0001d54c", "UpArrow;": "\u2191", "UpArrowBar;": "\u2912", "UpArrowDownArrow;": "\u21c5", "UpDownArrow;": "\u2195", "UpEquilibrium;": "\u296e", "UpTee;": "\u22a5", "UpTeeArrow;": "\u21a5", "Uparrow;": "\u21d1", "Updownarrow;": "\u21d5", "UpperLeftArrow;": "\u2196", "UpperRightArrow;": "\u2197", "Upsi;": "\u03d2", "Upsilon;": "\u03a5", "Uring;": "\u016e", "Uscr;": "\U0001d4b0", "Utilde;": "\u0168", "Uuml": "\xdc", "Uuml;": "\xdc", "VDash;": "\u22ab", "Vbar;": "\u2aeb", "Vcy;": "\u0412", "Vdash;": "\u22a9", "Vdashl;": "\u2ae6", "Vee;": "\u22c1", "Verbar;": "\u2016", "Vert;": "\u2016", "VerticalBar;": "\u2223", "VerticalLine;": "|", "VerticalSeparator;": "\u2758", "VerticalTilde;": "\u2240", "VeryThinSpace;": "\u200a", "Vfr;": "\U0001d519", "Vopf;": "\U0001d54d", "Vscr;": "\U0001d4b1", "Vvdash;": "\u22aa", "Wcirc;": "\u0174", "Wedge;": "\u22c0", "Wfr;": "\U0001d51a", "Wopf;": "\U0001d54e", "Wscr;": "\U0001d4b2", "Xfr;": "\U0001d51b", "Xi;": "\u039e", "Xopf;": "\U0001d54f", "Xscr;": "\U0001d4b3", "YAcy;": "\u042f", "YIcy;": "\u0407", "YUcy;": "\u042e", "Yacute": "\xdd", "Yacute;": "\xdd", "Ycirc;": "\u0176", "Ycy;": "\u042b", "Yfr;": "\U0001d51c", "Yopf;": "\U0001d550", "Yscr;": "\U0001d4b4", "Yuml;": "\u0178", "ZHcy;": "\u0416", "Zacute;": "\u0179", "Zcaron;": "\u017d", "Zcy;": "\u0417", "Zdot;": "\u017b", "ZeroWidthSpace;": "\u200b", "Zeta;": "\u0396", "Zfr;": "\u2128", "Zopf;": "\u2124", "Zscr;": "\U0001d4b5", "aacute": "\xe1", "aacute;": "\xe1", "abreve;": "\u0103", "ac;": "\u223e", "acE;": "\u223e\u0333", "acd;": "\u223f", "acirc": "\xe2", "acirc;": "\xe2", "acute": "\xb4", "acute;": "\xb4", "acy;": "\u0430", "aelig": "\xe6", "aelig;": "\xe6", "af;": "\u2061", "afr;": "\U0001d51e", "agrave": "\xe0", "agrave;": "\xe0", "alefsym;": "\u2135", "aleph;": "\u2135", "alpha;": "\u03b1", "amacr;": "\u0101", "amalg;": "\u2a3f", "amp": "&", "amp;": "&", "and;": "\u2227", "andand;": "\u2a55", "andd;": "\u2a5c", "andslope;": "\u2a58", "andv;": "\u2a5a", "ang;": "\u2220", "ange;": "\u29a4", "angle;": "\u2220", "angmsd;": "\u2221", "angmsdaa;": "\u29a8", "angmsdab;": "\u29a9", "angmsdac;": "\u29aa", "angmsdad;": "\u29ab", "angmsdae;": "\u29ac", "angmsdaf;": "\u29ad", "angmsdag;": "\u29ae", "angmsdah;": "\u29af", "angrt;": "\u221f", "angrtvb;": "\u22be", "angrtvbd;": "\u299d", "angsph;": "\u2222", "angst;": "\xc5", "angzarr;": "\u237c", "aogon;": "\u0105", "aopf;": "\U0001d552", "ap;": "\u2248", "apE;": "\u2a70", "apacir;": "\u2a6f", "ape;": "\u224a", "apid;": "\u224b", "apos;": "'", "approx;": "\u2248", "approxeq;": "\u224a", "aring": "\xe5", "aring;": "\xe5", "ascr;": "\U0001d4b6", "ast;": "*", "asymp;": "\u2248", "asympeq;": "\u224d", "atilde": "\xe3", "atilde;": "\xe3", "auml": "\xe4", "auml;": "\xe4", "awconint;": "\u2233", "awint;": "\u2a11", "bNot;": "\u2aed", "backcong;": "\u224c", "backepsilon;": "\u03f6", "backprime;": "\u2035", "backsim;": "\u223d", "backsimeq;": "\u22cd", "barvee;": "\u22bd", "barwed;": "\u2305", "barwedge;": "\u2305", "bbrk;": "\u23b5", "bbrktbrk;": "\u23b6", "bcong;": "\u224c", "bcy;": "\u0431", "bdquo;": "\u201e", "becaus;": "\u2235", "because;": "\u2235", "bemptyv;": "\u29b0", "bepsi;": "\u03f6", "bernou;": "\u212c", "beta;": "\u03b2", "beth;": "\u2136", "between;": "\u226c", "bfr;": "\U0001d51f", "bigcap;": "\u22c2", "bigcirc;": "\u25ef", "bigcup;": "\u22c3", "bigodot;": "\u2a00", "bigoplus;": "\u2a01", "bigotimes;": "\u2a02", "bigsqcup;": "\u2a06", "bigstar;": "\u2605", "bigtriangledown;": "\u25bd", "bigtriangleup;": "\u25b3", "biguplus;": "\u2a04", "bigvee;": "\u22c1", "bigwedge;": "\u22c0", "bkarow;": "\u290d", "blacklozenge;": "\u29eb", "blacksquare;": "\u25aa", "blacktriangle;": "\u25b4", "blacktriangledown;": "\u25be", "blacktriangleleft;": "\u25c2", "blacktriangleright;": "\u25b8", "blank;": "\u2423", "blk12;": "\u2592", "blk14;": "\u2591", "blk34;": "\u2593", "block;": "\u2588", "bne;": "=\u20e5", "bnequiv;": "\u2261\u20e5", "bnot;": "\u2310", "bopf;": "\U0001d553", "bot;": "\u22a5", "bottom;": "\u22a5", "bowtie;": "\u22c8", "boxDL;": "\u2557", "boxDR;": "\u2554", "boxDl;": "\u2556", "boxDr;": "\u2553", "boxH;": "\u2550", "boxHD;": "\u2566", "boxHU;": "\u2569", "boxHd;": "\u2564", "boxHu;": "\u2567", "boxUL;": "\u255d", "boxUR;": "\u255a", "boxUl;": "\u255c", "boxUr;": "\u2559", "boxV;": "\u2551", "boxVH;": "\u256c", "boxVL;": "\u2563", "boxVR;": "\u2560", "boxVh;": "\u256b", "boxVl;": "\u2562", "boxVr;": "\u255f", "boxbox;": "\u29c9", "boxdL;": "\u2555", "boxdR;": "\u2552", "boxdl;": "\u2510", "boxdr;": "\u250c", "boxh;": "\u2500", "boxhD;": "\u2565", "boxhU;": "\u2568", "boxhd;": "\u252c", "boxhu;": "\u2534", "boxminus;": "\u229f", "boxplus;": "\u229e", "boxtimes;": "\u22a0", "boxuL;": "\u255b", "boxuR;": "\u2558", "boxul;": "\u2518", "boxur;": "\u2514", "boxv;": "\u2502", "boxvH;": "\u256a", "boxvL;": "\u2561", "boxvR;": "\u255e", "boxvh;": "\u253c", "boxvl;": "\u2524", "boxvr;": "\u251c", "bprime;": "\u2035", "breve;": "\u02d8", "brvbar": "\xa6", "brvbar;": "\xa6", "bscr;": "\U0001d4b7", "bsemi;": "\u204f", "bsim;": "\u223d", "bsime;": "\u22cd", "bsol;": "\\", "bsolb;": "\u29c5", "bsolhsub;": "\u27c8", "bull;": "\u2022", "bullet;": "\u2022", "bump;": "\u224e", "bumpE;": "\u2aae", "bumpe;": "\u224f", "bumpeq;": "\u224f", "cacute;": "\u0107", "cap;": "\u2229", "capand;": "\u2a44", "capbrcup;": "\u2a49", "capcap;": "\u2a4b", "capcup;": "\u2a47", "capdot;": "\u2a40", "caps;": "\u2229\ufe00", "caret;": "\u2041", "caron;": "\u02c7", "ccaps;": "\u2a4d", "ccaron;": "\u010d", "ccedil": "\xe7", "ccedil;": "\xe7", "ccirc;": "\u0109", "ccups;": "\u2a4c", "ccupssm;": "\u2a50", "cdot;": "\u010b", "cedil": "\xb8", "cedil;": "\xb8", "cemptyv;": "\u29b2", "cent": "\xa2", "cent;": "\xa2", "centerdot;": "\xb7", "cfr;": "\U0001d520", "chcy;": "\u0447", "check;": "\u2713", "checkmark;": "\u2713", "chi;": "\u03c7", "cir;": "\u25cb", "cirE;": "\u29c3", "circ;": "\u02c6", "circeq;": "\u2257", "circlearrowleft;": "\u21ba", "circlearrowright;": "\u21bb", "circledR;": "\xae", "circledS;": "\u24c8", "circledast;": "\u229b", "circledcirc;": "\u229a", "circleddash;": "\u229d", "cire;": "\u2257", "cirfnint;": "\u2a10", "cirmid;": "\u2aef", "cirscir;": "\u29c2", "clubs;": "\u2663", "clubsuit;": "\u2663", "colon;": ":", "colone;": "\u2254", "coloneq;": "\u2254", "comma;": ",", "commat;": "@", "comp;": "\u2201", "compfn;": "\u2218", "complement;": "\u2201", "complexes;": "\u2102", "cong;": "\u2245", "congdot;": "\u2a6d", "conint;": "\u222e", "copf;": "\U0001d554", "coprod;": "\u2210", "copy": "\xa9", "copy;": "\xa9", "copysr;": "\u2117", "crarr;": "\u21b5", "cross;": "\u2717", "cscr;": "\U0001d4b8", "csub;": "\u2acf", "csube;": "\u2ad1", "csup;": "\u2ad0", "csupe;": "\u2ad2", "ctdot;": "\u22ef", "cudarrl;": "\u2938", "cudarrr;": "\u2935", "cuepr;": "\u22de", "cuesc;": "\u22df", "cularr;": "\u21b6", "cularrp;": "\u293d", "cup;": "\u222a", "cupbrcap;": "\u2a48", "cupcap;": "\u2a46", "cupcup;": "\u2a4a", "cupdot;": "\u228d", "cupor;": "\u2a45", "cups;": "\u222a\ufe00", "curarr;": "\u21b7", "curarrm;": "\u293c", "curlyeqprec;": "\u22de", "curlyeqsucc;": "\u22df", "curlyvee;": "\u22ce", "curlywedge;": "\u22cf", "curren": "\xa4", "curren;": "\xa4", "curvearrowleft;": "\u21b6", "curvearrowright;": "\u21b7", "cuvee;": "\u22ce", "cuwed;": "\u22cf", "cwconint;": "\u2232", "cwint;": "\u2231", "cylcty;": "\u232d", "dArr;": "\u21d3", "dHar;": "\u2965", "dagger;": "\u2020", "daleth;": "\u2138", "darr;": "\u2193", "dash;": "\u2010", "dashv;": "\u22a3", "dbkarow;": "\u290f", "dblac;": "\u02dd", "dcaron;": "\u010f", "dcy;": "\u0434", "dd;": "\u2146", "ddagger;": "\u2021", "ddarr;": "\u21ca", "ddotseq;": "\u2a77", "deg": "\xb0", "deg;": "\xb0", "delta;": "\u03b4", "demptyv;": "\u29b1", "dfisht;": "\u297f", "dfr;": "\U0001d521", "dharl;": "\u21c3", "dharr;": "\u21c2", "diam;": "\u22c4", "diamond;": "\u22c4", "diamondsuit;": "\u2666", "diams;": "\u2666", "die;": "\xa8", "digamma;": "\u03dd", "disin;": "\u22f2", "div;": "\xf7", "divide": "\xf7", "divide;": "\xf7", "divideontimes;": "\u22c7", "divonx;": "\u22c7", "djcy;": "\u0452", "dlcorn;": "\u231e", "dlcrop;": "\u230d", "dollar;": "$", "dopf;": "\U0001d555", "dot;": "\u02d9", "doteq;": "\u2250", "doteqdot;": "\u2251", "dotminus;": "\u2238", "dotplus;": "\u2214", "dotsquare;": "\u22a1", "doublebarwedge;": "\u2306", "downarrow;": "\u2193", "downdownarrows;": "\u21ca", "downharpoonleft;": "\u21c3", "downharpoonright;": "\u21c2", "drbkarow;": "\u2910", "drcorn;": "\u231f", "drcrop;": "\u230c", "dscr;": "\U0001d4b9", "dscy;": "\u0455", "dsol;": "\u29f6", "dstrok;": "\u0111", "dtdot;": "\u22f1", "dtri;": "\u25bf", "dtrif;": "\u25be", "duarr;": "\u21f5", "duhar;": "\u296f", "dwangle;": "\u29a6", "dzcy;": "\u045f", "dzigrarr;": "\u27ff", "eDDot;": "\u2a77", "eDot;": "\u2251", "eacute": "\xe9", "eacute;": "\xe9", "easter;": "\u2a6e", "ecaron;": "\u011b", "ecir;": "\u2256", "ecirc": "\xea", "ecirc;": "\xea", "ecolon;": "\u2255", "ecy;": "\u044d", "edot;": "\u0117", "ee;": "\u2147", "efDot;": "\u2252", "efr;": "\U0001d522", "eg;": "\u2a9a", "egrave": "\xe8", "egrave;": "\xe8", "egs;": "\u2a96", "egsdot;": "\u2a98", "el;": "\u2a99", "elinters;": "\u23e7", "ell;": "\u2113", "els;": "\u2a95", "elsdot;": "\u2a97", "emacr;": "\u0113", "empty;": "\u2205", "emptyset;": "\u2205", "emptyv;": "\u2205", "emsp13;": "\u2004", "emsp14;": "\u2005", "emsp;": "\u2003", "eng;": "\u014b", "ensp;": "\u2002", "eogon;": "\u0119", "eopf;": "\U0001d556", "epar;": "\u22d5", "eparsl;": "\u29e3", "eplus;": "\u2a71", "epsi;": "\u03b5", "epsilon;": "\u03b5", "epsiv;": "\u03f5", "eqcirc;": "\u2256", "eqcolon;": "\u2255", "eqsim;": "\u2242", "eqslantgtr;": "\u2a96", "eqslantless;": "\u2a95", "equals;": "=", "equest;": "\u225f", "equiv;": "\u2261", "equivDD;": "\u2a78", "eqvparsl;": "\u29e5", "erDot;": "\u2253", "erarr;": "\u2971", "escr;": "\u212f", "esdot;": "\u2250", "esim;": "\u2242", "eta;": "\u03b7", "eth": "\xf0", "eth;": "\xf0", "euml": "\xeb", "euml;": "\xeb", "euro;": "\u20ac", "excl;": "!", "exist;": "\u2203", "expectation;": "\u2130", "exponentiale;": "\u2147", "fallingdotseq;": "\u2252", "fcy;": "\u0444", "female;": "\u2640", "ffilig;": "\ufb03", "fflig;": "\ufb00", "ffllig;": "\ufb04", "ffr;": "\U0001d523", "filig;": "\ufb01", "fjlig;": "fj", "flat;": "\u266d", "fllig;": "\ufb02", "fltns;": "\u25b1", "fnof;": "\u0192", "fopf;": "\U0001d557", "forall;": "\u2200", "fork;": "\u22d4", "forkv;": "\u2ad9", "fpartint;": "\u2a0d", "frac12": "\xbd", "frac12;": "\xbd", "frac13;": "\u2153", "frac14": "\xbc", "frac14;": "\xbc", "frac15;": "\u2155", "frac16;": "\u2159", "frac18;": "\u215b", "frac23;": "\u2154", "frac25;": "\u2156", "frac34": "\xbe", "frac34;": "\xbe", "frac35;": "\u2157", "frac38;": "\u215c", "frac45;": "\u2158", "frac56;": "\u215a", "frac58;": "\u215d", "frac78;": "\u215e", "frasl;": "\u2044", "frown;": "\u2322", "fscr;": "\U0001d4bb", "gE;": "\u2267", "gEl;": "\u2a8c", "gacute;": "\u01f5", "gamma;": "\u03b3", "gammad;": "\u03dd", "gap;": "\u2a86", "gbreve;": "\u011f", "gcirc;": "\u011d", "gcy;": "\u0433", "gdot;": "\u0121", "ge;": "\u2265", "gel;": "\u22db", "geq;": "\u2265", "geqq;": "\u2267", "geqslant;": "\u2a7e", "ges;": "\u2a7e", "gescc;": "\u2aa9", "gesdot;": "\u2a80", "gesdoto;": "\u2a82", "gesdotol;": "\u2a84", "gesl;": "\u22db\ufe00", "gesles;": "\u2a94", "gfr;": "\U0001d524", "gg;": "\u226b", "ggg;": "\u22d9", "gimel;": "\u2137", "gjcy;": "\u0453", "gl;": "\u2277", "glE;": "\u2a92", "gla;": "\u2aa5", "glj;": "\u2aa4", "gnE;": "\u2269", "gnap;": "\u2a8a", "gnapprox;": "\u2a8a", "gne;": "\u2a88", "gneq;": "\u2a88", "gneqq;": "\u2269", "gnsim;": "\u22e7", "gopf;": "\U0001d558", "grave;": "`", "gscr;": "\u210a", "gsim;": "\u2273", "gsime;": "\u2a8e", "gsiml;": "\u2a90", "gt": ">", "gt;": ">", "gtcc;": "\u2aa7", "gtcir;": "\u2a7a", "gtdot;": "\u22d7", "gtlPar;": "\u2995", "gtquest;": "\u2a7c", "gtrapprox;": "\u2a86", "gtrarr;": "\u2978", "gtrdot;": "\u22d7", "gtreqless;": "\u22db", "gtreqqless;": "\u2a8c", "gtrless;": "\u2277", "gtrsim;": "\u2273", "gvertneqq;": "\u2269\ufe00", "gvnE;": "\u2269\ufe00", "hArr;": "\u21d4", "hairsp;": "\u200a", "half;": "\xbd", "hamilt;": "\u210b", "hardcy;": "\u044a", "harr;": "\u2194", "harrcir;": "\u2948", "harrw;": "\u21ad", "hbar;": "\u210f", "hcirc;": "\u0125", "hearts;": "\u2665", "heartsuit;": "\u2665", "hellip;": "\u2026", "hercon;": "\u22b9", "hfr;": "\U0001d525", "hksearow;": "\u2925", "hkswarow;": "\u2926", "hoarr;": "\u21ff", "homtht;": "\u223b", "hookleftarrow;": "\u21a9", "hookrightarrow;": "\u21aa", "hopf;": "\U0001d559", "horbar;": "\u2015", "hscr;": "\U0001d4bd", "hslash;": "\u210f", "hstrok;": "\u0127", "hybull;": "\u2043", "hyphen;": "\u2010", "iacute": "\xed", "iacute;": "\xed", "ic;": "\u2063", "icirc": "\xee", "icirc;": "\xee", "icy;": "\u0438", "iecy;": "\u0435", "iexcl": "\xa1", "iexcl;": "\xa1", "iff;": "\u21d4", "ifr;": "\U0001d526", "igrave": "\xec", "igrave;": "\xec", "ii;": "\u2148", "iiiint;": "\u2a0c", "iiint;": "\u222d", "iinfin;": "\u29dc", "iiota;": "\u2129", "ijlig;": "\u0133", "imacr;": "\u012b", "image;": "\u2111", "imagline;": "\u2110", "imagpart;": "\u2111", "imath;": "\u0131", "imof;": "\u22b7", "imped;": "\u01b5", "in;": "\u2208", "incare;": "\u2105", "infin;": "\u221e", "infintie;": "\u29dd", "inodot;": "\u0131", "int;": "\u222b", "intcal;": "\u22ba", "integers;": "\u2124", "intercal;": "\u22ba", "intlarhk;": "\u2a17", "intprod;": "\u2a3c", "iocy;": "\u0451", "iogon;": "\u012f", "iopf;": "\U0001d55a", "iota;": "\u03b9", "iprod;": "\u2a3c", "iquest": "\xbf", "iquest;": "\xbf", "iscr;": "\U0001d4be", "isin;": "\u2208", "isinE;": "\u22f9", "isindot;": "\u22f5", "isins;": "\u22f4", "isinsv;": "\u22f3", "isinv;": "\u2208", "it;": "\u2062", "itilde;": "\u0129", "iukcy;": "\u0456", "iuml": "\xef", "iuml;": "\xef", "jcirc;": "\u0135", "jcy;": "\u0439", "jfr;": "\U0001d527", "jmath;": "\u0237", "jopf;": "\U0001d55b", "jscr;": "\U0001d4bf", "jsercy;": "\u0458", "jukcy;": "\u0454", "kappa;": "\u03ba", "kappav;": "\u03f0", "kcedil;": "\u0137", "kcy;": "\u043a", "kfr;": "\U0001d528", "kgreen;": "\u0138", "khcy;": "\u0445", "kjcy;": "\u045c", "kopf;": "\U0001d55c", "kscr;": "\U0001d4c0", "lAarr;": "\u21da", "lArr;": "\u21d0", "lAtail;": "\u291b", "lBarr;": "\u290e", "lE;": "\u2266", "lEg;": "\u2a8b", "lHar;": "\u2962", "lacute;": "\u013a", "laemptyv;": "\u29b4", "lagran;": "\u2112", "lambda;": "\u03bb", "lang;": "\u27e8", "langd;": "\u2991", "langle;": "\u27e8", "lap;": "\u2a85", "laquo": "\xab", "laquo;": "\xab", "larr;": "\u2190", "larrb;": "\u21e4", "larrbfs;": "\u291f", "larrfs;": "\u291d", "larrhk;": "\u21a9", "larrlp;": "\u21ab", "larrpl;": "\u2939", "larrsim;": "\u2973", "larrtl;": "\u21a2", "lat;": "\u2aab", "latail;": "\u2919", "late;": "\u2aad", "lates;": "\u2aad\ufe00", "lbarr;": "\u290c", "lbbrk;": "\u2772", "lbrace;": "{", "lbrack;": "[", "lbrke;": "\u298b", "lbrksld;": "\u298f", "lbrkslu;": "\u298d", "lcaron;": "\u013e", "lcedil;": "\u013c", "lceil;": "\u2308", "lcub;": "{", "lcy;": "\u043b", "ldca;": "\u2936", "ldquo;": "\u201c", "ldquor;": "\u201e", "ldrdhar;": "\u2967", "ldrushar;": "\u294b", "ldsh;": "\u21b2", "le;": "\u2264", "leftarrow;": "\u2190", "leftarrowtail;": "\u21a2", "leftharpoondown;": "\u21bd", "leftharpoonup;": "\u21bc", "leftleftarrows;": "\u21c7", "leftrightarrow;": "\u2194", "leftrightarrows;": "\u21c6", "leftrightharpoons;": "\u21cb", "leftrightsquigarrow;": "\u21ad", "leftthreetimes;": "\u22cb", "leg;": "\u22da", "leq;": "\u2264", "leqq;": "\u2266", "leqslant;": "\u2a7d", "les;": "\u2a7d", "lescc;": "\u2aa8", "lesdot;": "\u2a7f", "lesdoto;": "\u2a81", "lesdotor;": "\u2a83", "lesg;": "\u22da\ufe00", "lesges;": "\u2a93", "lessapprox;": "\u2a85", "lessdot;": "\u22d6", "lesseqgtr;": "\u22da", "lesseqqgtr;": "\u2a8b", "lessgtr;": "\u2276", "lesssim;": "\u2272", "lfisht;": "\u297c", "lfloor;": "\u230a", "lfr;": "\U0001d529", "lg;": "\u2276", "lgE;": "\u2a91", "lhard;": "\u21bd", "lharu;": "\u21bc", "lharul;": "\u296a", "lhblk;": "\u2584", "ljcy;": "\u0459", "ll;": "\u226a", "llarr;": "\u21c7", "llcorner;": "\u231e", "llhard;": "\u296b", "lltri;": "\u25fa", "lmidot;": "\u0140", "lmoust;": "\u23b0", "lmoustache;": "\u23b0", "lnE;": "\u2268", "lnap;": "\u2a89", "lnapprox;": "\u2a89", "lne;": "\u2a87", "lneq;": "\u2a87", "lneqq;": "\u2268", "lnsim;": "\u22e6", "loang;": "\u27ec", "loarr;": "\u21fd", "lobrk;": "\u27e6", "longleftarrow;": "\u27f5", "longleftrightarrow;": "\u27f7", "longmapsto;": "\u27fc", "longrightarrow;": "\u27f6", "looparrowleft;": "\u21ab", "looparrowright;": "\u21ac", "lopar;": "\u2985", "lopf;": "\U0001d55d", "loplus;": "\u2a2d", "lotimes;": "\u2a34", "lowast;": "\u2217", "lowbar;": "_", "loz;": "\u25ca", "lozenge;": "\u25ca", "lozf;": "\u29eb", "lpar;": "(", "lparlt;": "\u2993", "lrarr;": "\u21c6", "lrcorner;": "\u231f", "lrhar;": "\u21cb", "lrhard;": "\u296d", "lrm;": "\u200e", "lrtri;": "\u22bf", "lsaquo;": "\u2039", "lscr;": "\U0001d4c1", "lsh;": "\u21b0", "lsim;": "\u2272", "lsime;": "\u2a8d", "lsimg;": "\u2a8f", "lsqb;": "[", "lsquo;": "\u2018", "lsquor;": "\u201a", "lstrok;": "\u0142", "lt": "<", "lt;": "<", "ltcc;": "\u2aa6", "ltcir;": "\u2a79", "ltdot;": "\u22d6", "lthree;": "\u22cb", "ltimes;": "\u22c9", "ltlarr;": "\u2976", "ltquest;": "\u2a7b", "ltrPar;": "\u2996", "ltri;": "\u25c3", "ltrie;": "\u22b4", "ltrif;": "\u25c2", "lurdshar;": "\u294a", "luruhar;": "\u2966", "lvertneqq;": "\u2268\ufe00", "lvnE;": "\u2268\ufe00", "mDDot;": "\u223a", "macr": "\xaf", "macr;": "\xaf", "male;": "\u2642", "malt;": "\u2720", "maltese;": "\u2720", "map;": "\u21a6", "mapsto;": "\u21a6", "mapstodown;": "\u21a7", "mapstoleft;": "\u21a4", "mapstoup;": "\u21a5", "marker;": "\u25ae", "mcomma;": "\u2a29", "mcy;": "\u043c", "mdash;": "\u2014", "measuredangle;": "\u2221", "mfr;": "\U0001d52a", "mho;": "\u2127", "micro": "\xb5", "micro;": "\xb5", "mid;": "\u2223", "midast;": "*", "midcir;": "\u2af0", "middot": "\xb7", "middot;": "\xb7", "minus;": "\u2212", "minusb;": "\u229f", "minusd;": "\u2238", "minusdu;": "\u2a2a", "mlcp;": "\u2adb", "mldr;": "\u2026", "mnplus;": "\u2213", "models;": "\u22a7", "mopf;": "\U0001d55e", "mp;": "\u2213", "mscr;": "\U0001d4c2", "mstpos;": "\u223e", "mu;": "\u03bc", "multimap;": "\u22b8", "mumap;": "\u22b8", "nGg;": "\u22d9\u0338", "nGt;": "\u226b\u20d2", "nGtv;": "\u226b\u0338", "nLeftarrow;": "\u21cd", "nLeftrightarrow;": "\u21ce", "nLl;": "\u22d8\u0338", "nLt;": "\u226a\u20d2", "nLtv;": "\u226a\u0338", "nRightarrow;": "\u21cf", "nVDash;": "\u22af", "nVdash;": "\u22ae", "nabla;": "\u2207", "nacute;": "\u0144", "nang;": "\u2220\u20d2", "nap;": "\u2249", "napE;": "\u2a70\u0338", "napid;": "\u224b\u0338", "napos;": "\u0149", "napprox;": "\u2249", "natur;": "\u266e", "natural;": "\u266e", "naturals;": "\u2115", "nbsp": "\xa0", "nbsp;": "\xa0", "nbump;": "\u224e\u0338", "nbumpe;": "\u224f\u0338", "ncap;": "\u2a43", "ncaron;": "\u0148", "ncedil;": "\u0146", "ncong;": "\u2247", "ncongdot;": "\u2a6d\u0338", "ncup;": "\u2a42", "ncy;": "\u043d", "ndash;": "\u2013", "ne;": "\u2260", "neArr;": "\u21d7", "nearhk;": "\u2924", "nearr;": "\u2197", "nearrow;": "\u2197", "nedot;": "\u2250\u0338", "nequiv;": "\u2262", "nesear;": "\u2928", "nesim;": "\u2242\u0338", "nexist;": "\u2204", "nexists;": "\u2204", "nfr;": "\U0001d52b", "ngE;": "\u2267\u0338", "nge;": "\u2271", "ngeq;": "\u2271", "ngeqq;": "\u2267\u0338", "ngeqslant;": "\u2a7e\u0338", "nges;": "\u2a7e\u0338", "ngsim;": "\u2275", "ngt;": "\u226f", "ngtr;": "\u226f", "nhArr;": "\u21ce", "nharr;": "\u21ae", "nhpar;": "\u2af2", "ni;": "\u220b", "nis;": "\u22fc", "nisd;": "\u22fa", "niv;": "\u220b", "njcy;": "\u045a", "nlArr;": "\u21cd", "nlE;": "\u2266\u0338", "nlarr;": "\u219a", "nldr;": "\u2025", "nle;": "\u2270", "nleftarrow;": "\u219a", "nleftrightarrow;": "\u21ae", "nleq;": "\u2270", "nleqq;": "\u2266\u0338", "nleqslant;": "\u2a7d\u0338", "nles;": "\u2a7d\u0338", "nless;": "\u226e", "nlsim;": "\u2274", "nlt;": "\u226e", "nltri;": "\u22ea", "nltrie;": "\u22ec", "nmid;": "\u2224", "nopf;": "\U0001d55f", "not": "\xac", "not;": "\xac", "notin;": "\u2209", "notinE;": "\u22f9\u0338", "notindot;": "\u22f5\u0338", "notinva;": "\u2209", "notinvb;": "\u22f7", "notinvc;": "\u22f6", "notni;": "\u220c", "notniva;": "\u220c", "notnivb;": "\u22fe", "notnivc;": "\u22fd", "npar;": "\u2226", "nparallel;": "\u2226", "nparsl;": "\u2afd\u20e5", "npart;": "\u2202\u0338", "npolint;": "\u2a14", "npr;": "\u2280", "nprcue;": "\u22e0", "npre;": "\u2aaf\u0338", "nprec;": "\u2280", "npreceq;": "\u2aaf\u0338", "nrArr;": "\u21cf", "nrarr;": "\u219b", "nrarrc;": "\u2933\u0338", "nrarrw;": "\u219d\u0338", "nrightarrow;": "\u219b", "nrtri;": "\u22eb", "nrtrie;": "\u22ed", "nsc;": "\u2281", "nsccue;": "\u22e1", "nsce;": "\u2ab0\u0338", "nscr;": "\U0001d4c3", "nshortmid;": "\u2224", "nshortparallel;": "\u2226", "nsim;": "\u2241", "nsime;": "\u2244", "nsimeq;": "\u2244", "nsmid;": "\u2224", "nspar;": "\u2226", "nsqsube;": "\u22e2", "nsqsupe;": "\u22e3", "nsub;": "\u2284", "nsubE;": "\u2ac5\u0338", "nsube;": "\u2288", "nsubset;": "\u2282\u20d2", "nsubseteq;": "\u2288", "nsubseteqq;": "\u2ac5\u0338", "nsucc;": "\u2281", "nsucceq;": "\u2ab0\u0338", "nsup;": "\u2285", "nsupE;": "\u2ac6\u0338", "nsupe;": "\u2289", "nsupset;": "\u2283\u20d2", "nsupseteq;": "\u2289", "nsupseteqq;": "\u2ac6\u0338", "ntgl;": "\u2279", "ntilde": "\xf1", "ntilde;": "\xf1", "ntlg;": "\u2278", "ntriangleleft;": "\u22ea", "ntrianglelefteq;": "\u22ec", "ntriangleright;": "\u22eb", "ntrianglerighteq;": "\u22ed", "nu;": "\u03bd", "num;": "#", "numero;": "\u2116", "numsp;": "\u2007", "nvDash;": "\u22ad", "nvHarr;": "\u2904", "nvap;": "\u224d\u20d2", "nvdash;": "\u22ac", "nvge;": "\u2265\u20d2", "nvgt;": ">\u20d2", "nvinfin;": "\u29de", "nvlArr;": "\u2902", "nvle;": "\u2264\u20d2", "nvlt;": "<\u20d2", "nvltrie;": "\u22b4\u20d2", "nvrArr;": "\u2903", "nvrtrie;": "\u22b5\u20d2", "nvsim;": "\u223c\u20d2", "nwArr;": "\u21d6", "nwarhk;": "\u2923", "nwarr;": "\u2196", "nwarrow;": "\u2196", "nwnear;": "\u2927", "oS;": "\u24c8", "oacute": "\xf3", "oacute;": "\xf3", "oast;": "\u229b", "ocir;": "\u229a", "ocirc": "\xf4", "ocirc;": "\xf4", "ocy;": "\u043e", "odash;": "\u229d", "odblac;": "\u0151", "odiv;": "\u2a38", "odot;": "\u2299", "odsold;": "\u29bc", "oelig;": "\u0153", "ofcir;": "\u29bf", "ofr;": "\U0001d52c", "ogon;": "\u02db", "ograve": "\xf2", "ograve;": "\xf2", "ogt;": "\u29c1", "ohbar;": "\u29b5", "ohm;": "\u03a9", "oint;": "\u222e", "olarr;": "\u21ba", "olcir;": "\u29be", "olcross;": "\u29bb", "oline;": "\u203e", "olt;": "\u29c0", "omacr;": "\u014d", "omega;": "\u03c9", "omicron;": "\u03bf", "omid;": "\u29b6", "ominus;": "\u2296", "oopf;": "\U0001d560", "opar;": "\u29b7", "operp;": "\u29b9", "oplus;": "\u2295", "or;": "\u2228", "orarr;": "\u21bb", "ord;": "\u2a5d", "order;": "\u2134", "orderof;": "\u2134", "ordf": "\xaa", "ordf;": "\xaa", "ordm": "\xba", "ordm;": "\xba", "origof;": "\u22b6", "oror;": "\u2a56", "orslope;": "\u2a57", "orv;": "\u2a5b", "oscr;": "\u2134", "oslash": "\xf8", "oslash;": "\xf8", "osol;": "\u2298", "otilde": "\xf5", "otilde;": "\xf5", "otimes;": "\u2297", "otimesas;": "\u2a36", "ouml": "\xf6", "ouml;": "\xf6", "ovbar;": "\u233d", "par;": "\u2225", "para": "\xb6", "para;": "\xb6", "parallel;": "\u2225", "parsim;": "\u2af3", "parsl;": "\u2afd", "part;": "\u2202", "pcy;": "\u043f", "percnt;": "%", "period;": ".", "permil;": "\u2030", "perp;": "\u22a5", "pertenk;": "\u2031", "pfr;": "\U0001d52d", "phi;": "\u03c6", "phiv;": "\u03d5", "phmmat;": "\u2133", "phone;": "\u260e", "pi;": "\u03c0", "pitchfork;": "\u22d4", "piv;": "\u03d6", "planck;": "\u210f", "planckh;": "\u210e", "plankv;": "\u210f", "plus;": "+", "plusacir;": "\u2a23", "plusb;": "\u229e", "pluscir;": "\u2a22", "plusdo;": "\u2214", "plusdu;": "\u2a25", "pluse;": "\u2a72", "plusmn": "\xb1", "plusmn;": "\xb1", "plussim;": "\u2a26", "plustwo;": "\u2a27", "pm;": "\xb1", "pointint;": "\u2a15", "popf;": "\U0001d561", "pound": "\xa3", "pound;": "\xa3", "pr;": "\u227a", "prE;": "\u2ab3", "prap;": "\u2ab7", "prcue;": "\u227c", "pre;": "\u2aaf", "prec;": "\u227a", "precapprox;": "\u2ab7", "preccurlyeq;": "\u227c", "preceq;": "\u2aaf", "precnapprox;": "\u2ab9", "precneqq;": "\u2ab5", "precnsim;": "\u22e8", "precsim;": "\u227e", "prime;": "\u2032", "primes;": "\u2119", "prnE;": "\u2ab5", "prnap;": "\u2ab9", "prnsim;": "\u22e8", "prod;": "\u220f", "profalar;": "\u232e", "profline;": "\u2312", "profsurf;": "\u2313", "prop;": "\u221d", "propto;": "\u221d", "prsim;": "\u227e", "prurel;": "\u22b0", "pscr;": "\U0001d4c5", "psi;": "\u03c8", "puncsp;": "\u2008", "qfr;": "\U0001d52e", "qint;": "\u2a0c", "qopf;": "\U0001d562", "qprime;": "\u2057", "qscr;": "\U0001d4c6", "quaternions;": "\u210d", "quatint;": "\u2a16", "quest;": "?", "questeq;": "\u225f", "quot": "\"", "quot;": "\"", "rAarr;": "\u21db", "rArr;": "\u21d2", "rAtail;": "\u291c", "rBarr;": "\u290f", "rHar;": "\u2964", "race;": "\u223d\u0331", "racute;": "\u0155", "radic;": "\u221a", "raemptyv;": "\u29b3", "rang;": "\u27e9", "rangd;": "\u2992", "range;": "\u29a5", "rangle;": "\u27e9", "raquo": "\xbb", "raquo;": "\xbb", "rarr;": "\u2192", "rarrap;": "\u2975", "rarrb;": "\u21e5", "rarrbfs;": "\u2920", "rarrc;": "\u2933", "rarrfs;": "\u291e", "rarrhk;": "\u21aa", "rarrlp;": "\u21ac", "rarrpl;": "\u2945", "rarrsim;": "\u2974", "rarrtl;": "\u21a3", "rarrw;": "\u219d", "ratail;": "\u291a", "ratio;": "\u2236", "rationals;": "\u211a", "rbarr;": "\u290d", "rbbrk;": "\u2773", "rbrace;": "}", "rbrack;": "]", "rbrke;": "\u298c", "rbrksld;": "\u298e", "rbrkslu;": "\u2990", "rcaron;": "\u0159", "rcedil;": "\u0157", "rceil;": "\u2309", "rcub;": "}", "rcy;": "\u0440", "rdca;": "\u2937", "rdldhar;": "\u2969", "rdquo;": "\u201d", "rdquor;": "\u201d", "rdsh;": "\u21b3", "real;": "\u211c", "realine;": "\u211b", "realpart;": "\u211c", "reals;": "\u211d", "rect;": "\u25ad", "reg": "\xae", "reg;": "\xae", "rfisht;": "\u297d", "rfloor;": "\u230b", "rfr;": "\U0001d52f", "rhard;": "\u21c1", "rharu;": "\u21c0", "rharul;": "\u296c", "rho;": "\u03c1", "rhov;": "\u03f1", "rightarrow;": "\u2192", "rightarrowtail;": "\u21a3", "rightharpoondown;": "\u21c1", "rightharpoonup;": "\u21c0", "rightleftarrows;": "\u21c4", "rightleftharpoons;": "\u21cc", "rightrightarrows;": "\u21c9", "rightsquigarrow;": "\u219d", "rightthreetimes;": "\u22cc", "ring;": "\u02da", "risingdotseq;": "\u2253", "rlarr;": "\u21c4", "rlhar;": "\u21cc", "rlm;": "\u200f", "rmoust;": "\u23b1", "rmoustache;": "\u23b1", "rnmid;": "\u2aee", "roang;": "\u27ed", "roarr;": "\u21fe", "robrk;": "\u27e7", "ropar;": "\u2986", "ropf;": "\U0001d563", "roplus;": "\u2a2e", "rotimes;": "\u2a35", "rpar;": ")", "rpargt;": "\u2994", "rppolint;": "\u2a12", "rrarr;": "\u21c9", "rsaquo;": "\u203a", "rscr;": "\U0001d4c7", "rsh;": "\u21b1", "rsqb;": "]", "rsquo;": "\u2019", "rsquor;": "\u2019", "rthree;": "\u22cc", "rtimes;": "\u22ca", "rtri;": "\u25b9", "rtrie;": "\u22b5", "rtrif;": "\u25b8", "rtriltri;": "\u29ce", "ruluhar;": "\u2968", "rx;": "\u211e", "sacute;": "\u015b", "sbquo;": "\u201a", "sc;": "\u227b", "scE;": "\u2ab4", "scap;": "\u2ab8", "scaron;": "\u0161", "sccue;": "\u227d", "sce;": "\u2ab0", "scedil;": "\u015f", "scirc;": "\u015d", "scnE;": "\u2ab6", "scnap;": "\u2aba", "scnsim;": "\u22e9", "scpolint;": "\u2a13", "scsim;": "\u227f", "scy;": "\u0441", "sdot;": "\u22c5", "sdotb;": "\u22a1", "sdote;": "\u2a66", "seArr;": "\u21d8", "searhk;": "\u2925", "searr;": "\u2198", "searrow;": "\u2198", "sect": "\xa7", "sect;": "\xa7", "semi;": ";", "seswar;": "\u2929", "setminus;": "\u2216", "setmn;": "\u2216", "sext;": "\u2736", "sfr;": "\U0001d530", "sfrown;": "\u2322", "sharp;": "\u266f", "shchcy;": "\u0449", "shcy;": "\u0448", "shortmid;": "\u2223", "shortparallel;": "\u2225", "shy": "\xad", "shy;": "\xad", "sigma;": "\u03c3", "sigmaf;": "\u03c2", "sigmav;": "\u03c2", "sim;": "\u223c", "simdot;": "\u2a6a", "sime;": "\u2243", "simeq;": "\u2243", "simg;": "\u2a9e", "simgE;": "\u2aa0", "siml;": "\u2a9d", "simlE;": "\u2a9f", "simne;": "\u2246", "simplus;": "\u2a24", "simrarr;": "\u2972", "slarr;": "\u2190", "smallsetminus;": "\u2216", "smashp;": "\u2a33", "smeparsl;": "\u29e4", "smid;": "\u2223", "smile;": "\u2323", "smt;": "\u2aaa", "smte;": "\u2aac", "smtes;": "\u2aac\ufe00", "softcy;": "\u044c", "sol;": "/", "solb;": "\u29c4", "solbar;": "\u233f", "sopf;": "\U0001d564", "spades;": "\u2660", "spadesuit;": "\u2660", "spar;": "\u2225", "sqcap;": "\u2293", "sqcaps;": "\u2293\ufe00", "sqcup;": "\u2294", "sqcups;": "\u2294\ufe00", "sqsub;": "\u228f", "sqsube;": "\u2291", "sqsubset;": "\u228f", "sqsubseteq;": "\u2291", "sqsup;": "\u2290", "sqsupe;": "\u2292", "sqsupset;": "\u2290", "sqsupseteq;": "\u2292", "squ;": "\u25a1", "square;": "\u25a1", "squarf;": "\u25aa", "squf;": "\u25aa", "srarr;": "\u2192", "sscr;": "\U0001d4c8", "ssetmn;": "\u2216", "ssmile;": "\u2323", "sstarf;": "\u22c6", "star;": "\u2606", "starf;": "\u2605", "straightepsilon;": "\u03f5", "straightphi;": "\u03d5", "strns;": "\xaf", "sub;": "\u2282", "subE;": "\u2ac5", "subdot;": "\u2abd", "sube;": "\u2286", "subedot;": "\u2ac3", "submult;": "\u2ac1", "subnE;": "\u2acb", "subne;": "\u228a", "subplus;": "\u2abf", "subrarr;": "\u2979", "subset;": "\u2282", "subseteq;": "\u2286", "subseteqq;": "\u2ac5", "subsetneq;": "\u228a", "subsetneqq;": "\u2acb", "subsim;": "\u2ac7", "subsub;": "\u2ad5", "subsup;": "\u2ad3", "succ;": "\u227b", "succapprox;": "\u2ab8", "succcurlyeq;": "\u227d", "succeq;": "\u2ab0", "succnapprox;": "\u2aba", "succneqq;": "\u2ab6", "succnsim;": "\u22e9", "succsim;": "\u227f", "sum;": "\u2211", "sung;": "\u266a", "sup1": "\xb9", "sup1;": "\xb9", "sup2": "\xb2", "sup2;": "\xb2", "sup3": "\xb3", "sup3;": "\xb3", "sup;": "\u2283", "supE;": "\u2ac6", "supdot;": "\u2abe", "supdsub;": "\u2ad8", "supe;": "\u2287", "supedot;": "\u2ac4", "suphsol;": "\u27c9", "suphsub;": "\u2ad7", "suplarr;": "\u297b", "supmult;": "\u2ac2", "supnE;": "\u2acc", "supne;": "\u228b", "supplus;": "\u2ac0", "supset;": "\u2283", "supseteq;": "\u2287", "supseteqq;": "\u2ac6", "supsetneq;": "\u228b", "supsetneqq;": "\u2acc", "supsim;": "\u2ac8", "supsub;": "\u2ad4", "supsup;": "\u2ad6", "swArr;": "\u21d9", "swarhk;": "\u2926", "swarr;": "\u2199", "swarrow;": "\u2199", "swnwar;": "\u292a", "szlig": "\xdf", "szlig;": "\xdf", "target;": "\u2316", "tau;": "\u03c4", "tbrk;": "\u23b4", "tcaron;": "\u0165", "tcedil;": "\u0163", "tcy;": "\u0442", "tdot;": "\u20db", "telrec;": "\u2315", "tfr;": "\U0001d531", "there4;": "\u2234", "therefore;": "\u2234", "theta;": "\u03b8", "thetasym;": "\u03d1", "thetav;": "\u03d1", "thickapprox;": "\u2248", "thicksim;": "\u223c", "thinsp;": "\u2009", "thkap;": "\u2248", "thksim;": "\u223c", "thorn": "\xfe", "thorn;": "\xfe", "tilde;": "\u02dc", "times": "\xd7", "times;": "\xd7", "timesb;": "\u22a0", "timesbar;": "\u2a31", "timesd;": "\u2a30", "tint;": "\u222d", "toea;": "\u2928", "top;": "\u22a4", "topbot;": "\u2336", "topcir;": "\u2af1", "topf;": "\U0001d565", "topfork;": "\u2ada", "tosa;": "\u2929", "tprime;": "\u2034", "trade;": "\u2122", "triangle;": "\u25b5", "triangledown;": "\u25bf", "triangleleft;": "\u25c3", "trianglelefteq;": "\u22b4", "triangleq;": "\u225c", "triangleright;": "\u25b9", "trianglerighteq;": "\u22b5", "tridot;": "\u25ec", "trie;": "\u225c", "triminus;": "\u2a3a", "triplus;": "\u2a39", "trisb;": "\u29cd", "tritime;": "\u2a3b", "trpezium;": "\u23e2", "tscr;": "\U0001d4c9", "tscy;": "\u0446", "tshcy;": "\u045b", "tstrok;": "\u0167", "twixt;": "\u226c", "twoheadleftarrow;": "\u219e", "twoheadrightarrow;": "\u21a0", "uArr;": "\u21d1", "uHar;": "\u2963", "uacute": "\xfa", "uacute;": "\xfa", "uarr;": "\u2191", "ubrcy;": "\u045e", "ubreve;": "\u016d", "ucirc": "\xfb", "ucirc;": "\xfb", "ucy;": "\u0443", "udarr;": "\u21c5", "udblac;": "\u0171", "udhar;": "\u296e", "ufisht;": "\u297e", "ufr;": "\U0001d532", "ugrave": "\xf9", "ugrave;": "\xf9", "uharl;": "\u21bf", "uharr;": "\u21be", "uhblk;": "\u2580", "ulcorn;": "\u231c", "ulcorner;": "\u231c", "ulcrop;": "\u230f", "ultri;": "\u25f8", "umacr;": "\u016b", "uml": "\xa8", "uml;": "\xa8", "uogon;": "\u0173", "uopf;": "\U0001d566", "uparrow;": "\u2191", "updownarrow;": "\u2195", "upharpoonleft;": "\u21bf", "upharpoonright;": "\u21be", "uplus;": "\u228e", "upsi;": "\u03c5", "upsih;": "\u03d2", "upsilon;": "\u03c5", "upuparrows;": "\u21c8", "urcorn;": "\u231d", "urcorner;": "\u231d", "urcrop;": "\u230e", "uring;": "\u016f", "urtri;": "\u25f9", "uscr;": "\U0001d4ca", "utdot;": "\u22f0", "utilde;": "\u0169", "utri;": "\u25b5", "utrif;": "\u25b4", "uuarr;": "\u21c8", "uuml": "\xfc", "uuml;": "\xfc", "uwangle;": "\u29a7", "vArr;": "\u21d5", "vBar;": "\u2ae8", "vBarv;": "\u2ae9", "vDash;": "\u22a8", "vangrt;": "\u299c", "varepsilon;": "\u03f5", "varkappa;": "\u03f0", "varnothing;": "\u2205", "varphi;": "\u03d5", "varpi;": "\u03d6", "varpropto;": "\u221d", "varr;": "\u2195", "varrho;": "\u03f1", "varsigma;": "\u03c2", "varsubsetneq;": "\u228a\ufe00", "varsubsetneqq;": "\u2acb\ufe00", "varsupsetneq;": "\u228b\ufe00", "varsupsetneqq;": "\u2acc\ufe00", "vartheta;": "\u03d1", "vartriangleleft;": "\u22b2", "vartriangleright;": "\u22b3", "vcy;": "\u0432", "vdash;": "\u22a2", "vee;": "\u2228", "veebar;": "\u22bb", "veeeq;": "\u225a", "vellip;": "\u22ee", "verbar;": "|", "vert;": "|", "vfr;": "\U0001d533", "vltri;": "\u22b2", "vnsub;": "\u2282\u20d2", "vnsup;": "\u2283\u20d2", "vopf;": "\U0001d567", "vprop;": "\u221d", "vrtri;": "\u22b3", "vscr;": "\U0001d4cb", "vsubnE;": "\u2acb\ufe00", "vsubne;": "\u228a\ufe00", "vsupnE;": "\u2acc\ufe00", "vsupne;": "\u228b\ufe00", "vzigzag;": "\u299a", "wcirc;": "\u0175", "wedbar;": "\u2a5f", "wedge;": "\u2227", "wedgeq;": "\u2259", "weierp;": "\u2118", "wfr;": "\U0001d534", "wopf;": "\U0001d568", "wp;": "\u2118", "wr;": "\u2240", "wreath;": "\u2240", "wscr;": "\U0001d4cc", "xcap;": "\u22c2", "xcirc;": "\u25ef", "xcup;": "\u22c3", "xdtri;": "\u25bd", "xfr;": "\U0001d535", "xhArr;": "\u27fa", "xharr;": "\u27f7", "xi;": "\u03be", "xlArr;": "\u27f8", "xlarr;": "\u27f5", "xmap;": "\u27fc", "xnis;": "\u22fb", "xodot;": "\u2a00", "xopf;": "\U0001d569", "xoplus;": "\u2a01", "xotime;": "\u2a02", "xrArr;": "\u27f9", "xrarr;": "\u27f6", "xscr;": "\U0001d4cd", "xsqcup;": "\u2a06", "xuplus;": "\u2a04", "xutri;": "\u25b3", "xvee;": "\u22c1", "xwedge;": "\u22c0", "yacute": "\xfd", "yacute;": "\xfd", "yacy;": "\u044f", "ycirc;": "\u0177", "ycy;": "\u044b", "yen": "\xa5", "yen;": "\xa5", "yfr;": "\U0001d536", "yicy;": "\u0457", "yopf;": "\U0001d56a", "yscr;": "\U0001d4ce", "yucy;": "\u044e", "yuml": "\xff", "yuml;": "\xff", "zacute;": "\u017a", "zcaron;": "\u017e", "zcy;": "\u0437", "zdot;": "\u017c", "zeetrf;": "\u2128", "zeta;": "\u03b6", "zfr;": "\U0001d537", "zhcy;": "\u0436", "zigrarr;": "\u21dd", "zopf;": "\U0001d56b", "zscr;": "\U0001d4cf", "zwj;": "\u200d", "zwnj;": "\u200c", } replacementCharacters = { 0x0: "\uFFFD", 0x0d: "\u000D", 0x80: "\u20AC", 0x81: "\u0081", 0x81: "\u0081", 0x82: "\u201A", 0x83: "\u0192", 0x84: "\u201E", 0x85: "\u2026", 0x86: "\u2020", 0x87: "\u2021", 0x88: "\u02C6", 0x89: "\u2030", 0x8A: "\u0160", 0x8B: "\u2039", 0x8C: "\u0152", 0x8D: "\u008D", 0x8E: "\u017D", 0x8F: "\u008F", 0x90: "\u0090", 0x91: "\u2018", 0x92: "\u2019", 0x93: "\u201C", 0x94: "\u201D", 0x95: "\u2022", 0x96: "\u2013", 0x97: "\u2014", 0x98: "\u02DC", 0x99: "\u2122", 0x9A: "\u0161", 0x9B: "\u203A", 0x9C: "\u0153", 0x9D: "\u009D", 0x9E: "\u017E", 0x9F: "\u0178", } encodings = { '437': 'cp437', '850': 'cp850', '852': 'cp852', '855': 'cp855', '857': 'cp857', '860': 'cp860', '861': 'cp861', '862': 'cp862', '863': 'cp863', '865': 'cp865', '866': 'cp866', '869': 'cp869', 'ansix341968': 'ascii', 'ansix341986': 'ascii', 'arabic': 'iso8859-6', 'ascii': 'ascii', 'asmo708': 'iso8859-6', 'big5': 'big5', 'big5hkscs': 'big5hkscs', 'chinese': 'gbk', 'cp037': 'cp037', 'cp1026': 'cp1026', 'cp154': 'ptcp154', 'cp367': 'ascii', 'cp424': 'cp424', 'cp437': 'cp437', 'cp500': 'cp500', 'cp775': 'cp775', 'cp819': 'windows-1252', 'cp850': 'cp850', 'cp852': 'cp852', 'cp855': 'cp855', 'cp857': 'cp857', 'cp860': 'cp860', 'cp861': 'cp861', 'cp862': 'cp862', 'cp863': 'cp863', 'cp864': 'cp864', 'cp865': 'cp865', 'cp866': 'cp866', 'cp869': 'cp869', 'cp936': 'gbk', 'cpgr': 'cp869', 'cpis': 'cp861', 'csascii': 'ascii', 'csbig5': 'big5', 'cseuckr': 'cp949', 'cseucpkdfmtjapanese': 'euc_jp', 'csgb2312': 'gbk', 'cshproman8': 'hp-roman8', 'csibm037': 'cp037', 'csibm1026': 'cp1026', 'csibm424': 'cp424', 'csibm500': 'cp500', 'csibm855': 'cp855', 'csibm857': 'cp857', 'csibm860': 'cp860', 'csibm861': 'cp861', 'csibm863': 'cp863', 'csibm864': 'cp864', 'csibm865': 'cp865', 'csibm866': 'cp866', 'csibm869': 'cp869', 'csiso2022jp': 'iso2022_jp', 'csiso2022jp2': 'iso2022_jp_2', 'csiso2022kr': 'iso2022_kr', 'csiso58gb231280': 'gbk', 'csisolatin1': 'windows-1252', 'csisolatin2': 'iso8859-2', 'csisolatin3': 'iso8859-3', 'csisolatin4': 'iso8859-4', 'csisolatin5': 'windows-1254', 'csisolatin6': 'iso8859-10', 'csisolatinarabic': 'iso8859-6', 'csisolatincyrillic': 'iso8859-5', 'csisolatingreek': 'iso8859-7', 'csisolatinhebrew': 'iso8859-8', 'cskoi8r': 'koi8-r', 'csksc56011987': 'cp949', 'cspc775baltic': 'cp775', 'cspc850multilingual': 'cp850', 'cspc862latinhebrew': 'cp862', 'cspc8codepage437': 'cp437', 'cspcp852': 'cp852', 'csptcp154': 'ptcp154', 'csshiftjis': 'shift_jis', 'csunicode11utf7': 'utf-7', 'cyrillic': 'iso8859-5', 'cyrillicasian': 'ptcp154', 'ebcdiccpbe': 'cp500', 'ebcdiccpca': 'cp037', 'ebcdiccpch': 'cp500', 'ebcdiccphe': 'cp424', 'ebcdiccpnl': 'cp037', 'ebcdiccpus': 'cp037', 'ebcdiccpwt': 'cp037', 'ecma114': 'iso8859-6', 'ecma118': 'iso8859-7', 'elot928': 'iso8859-7', 'eucjp': 'euc_jp', 'euckr': 'cp949', 'extendedunixcodepackedformatforjapanese': 'euc_jp', 'gb18030': 'gb18030', 'gb2312': 'gbk', 'gb231280': 'gbk', 'gbk': 'gbk', 'greek': 'iso8859-7', 'greek8': 'iso8859-7', 'hebrew': 'iso8859-8', 'hproman8': 'hp-roman8', 'hzgb2312': 'hz', 'ibm037': 'cp037', 'ibm1026': 'cp1026', 'ibm367': 'ascii', 'ibm424': 'cp424', 'ibm437': 'cp437', 'ibm500': 'cp500', 'ibm775': 'cp775', 'ibm819': 'windows-1252', 'ibm850': 'cp850', 'ibm852': 'cp852', 'ibm855': 'cp855', 'ibm857': 'cp857', 'ibm860': 'cp860', 'ibm861': 'cp861', 'ibm862': 'cp862', 'ibm863': 'cp863', 'ibm864': 'cp864', 'ibm865': 'cp865', 'ibm866': 'cp866', 'ibm869': 'cp869', 'iso2022jp': 'iso2022_jp', 'iso2022jp2': 'iso2022_jp_2', 'iso2022kr': 'iso2022_kr', 'iso646irv1991': 'ascii', 'iso646us': 'ascii', 'iso88591': 'windows-1252', 'iso885910': 'iso8859-10', 'iso8859101992': 'iso8859-10', 'iso885911987': 'windows-1252', 'iso885913': 'iso8859-13', 'iso885914': 'iso8859-14', 'iso8859141998': 'iso8859-14', 'iso885915': 'iso8859-15', 'iso885916': 'iso8859-16', 'iso8859162001': 'iso8859-16', 'iso88592': 'iso8859-2', 'iso885921987': 'iso8859-2', 'iso88593': 'iso8859-3', 'iso885931988': 'iso8859-3', 'iso88594': 'iso8859-4', 'iso885941988': 'iso8859-4', 'iso88595': 'iso8859-5', 'iso885951988': 'iso8859-5', 'iso88596': 'iso8859-6', 'iso885961987': 'iso8859-6', 'iso88597': 'iso8859-7', 'iso885971987': 'iso8859-7', 'iso88598': 'iso8859-8', 'iso885981988': 'iso8859-8', 'iso88599': 'windows-1254', 'iso885991989': 'windows-1254', 'isoceltic': 'iso8859-14', 'isoir100': 'windows-1252', 'isoir101': 'iso8859-2', 'isoir109': 'iso8859-3', 'isoir110': 'iso8859-4', 'isoir126': 'iso8859-7', 'isoir127': 'iso8859-6', 'isoir138': 'iso8859-8', 'isoir144': 'iso8859-5', 'isoir148': 'windows-1254', 'isoir149': 'cp949', 'isoir157': 'iso8859-10', 'isoir199': 'iso8859-14', 'isoir226': 'iso8859-16', 'isoir58': 'gbk', 'isoir6': 'ascii', 'koi8r': 'koi8-r', 'koi8u': 'koi8-u', 'korean': 'cp949', 'ksc5601': 'cp949', 'ksc56011987': 'cp949', 'ksc56011989': 'cp949', 'l1': 'windows-1252', 'l10': 'iso8859-16', 'l2': 'iso8859-2', 'l3': 'iso8859-3', 'l4': 'iso8859-4', 'l5': 'windows-1254', 'l6': 'iso8859-10', 'l8': 'iso8859-14', 'latin1': 'windows-1252', 'latin10': 'iso8859-16', 'latin2': 'iso8859-2', 'latin3': 'iso8859-3', 'latin4': 'iso8859-4', 'latin5': 'windows-1254', 'latin6': 'iso8859-10', 'latin8': 'iso8859-14', 'latin9': 'iso8859-15', 'ms936': 'gbk', 'mskanji': 'shift_jis', 'pt154': 'ptcp154', 'ptcp154': 'ptcp154', 'r8': 'hp-roman8', 'roman8': 'hp-roman8', 'shiftjis': 'shift_jis', 'tis620': 'cp874', 'unicode11utf7': 'utf-7', 'us': 'ascii', 'usascii': 'ascii', 'utf16': 'utf-16', 'utf16be': 'utf-16-be', 'utf16le': 'utf-16-le', 'utf8': 'utf-8', 'windows1250': 'cp1250', 'windows1251': 'cp1251', 'windows1252': 'cp1252', 'windows1253': 'cp1253', 'windows1254': 'cp1254', 'windows1255': 'cp1255', 'windows1256': 'cp1256', 'windows1257': 'cp1257', 'windows1258': 'cp1258', 'windows936': 'gbk', 'x-x-big5': 'big5'} tokenTypes = { "Doctype": 0, "Characters": 1, "SpaceCharacters": 2, "StartTag": 3, "EndTag": 4, "EmptyTag": 5, "Comment": 6, "ParseError": 7 } tagTokenTypes = frozenset([tokenTypes["StartTag"], tokenTypes["EndTag"], tokenTypes["EmptyTag"]]) prefixes = dict([(v, k) for k, v in namespaces.items()]) prefixes["http://www.w3.org/1998/Math/MathML"] = "math" class DataLossWarning(UserWarning): pass class ReparseException(Exception): pass
zwChan/VATEC
~/eb-virt/Lib/site-packages/pip/_vendor/html5lib/constants.py
Python
apache-2.0
86,873
[ "Bowtie" ]
0792cdd8330ffba944a7dc2938de1e717dcac74d67fdc6cc8ee19de80b5e8b11
#!/usr/bin/env python """ Generate a moltemplate (.lt) file containing a definition of a polymer molecule whose monomers are located at the positions specified in "coords.raw" (a 3-column text file). Monomers will be rotated so that they point in the direction connecting neighbors (r[i+1]-r[i]) The user can specify the subunits to use when building the polymer, the atoms to to build bonds (and angles, and dihedrals) between monomers and the helical pitch of the polymer. The output of this program is a text file in moltemplate (.lt) format containing the sequence of moltemplate commands needed to build this polymer molecule(s). (One must then run moltemplate on this file to build the LAMMPS simulation files.) Multiple Polymers: To make it easier to create polymer melts, multiple polymers can be created from coordinates in the same file by using the "-cuts" command line argument. Encapsulation: If the "-polymer-name PolyName" command line option is given, then these moltemplate commands will be nested within the definition of a moltemplate object (named "PolyName", in this example. Later in your moltemplate files, you must remember to instantiate a copy of this moltemplate object using a command like "polymer = new PolyName" Atoms within this object will share the same molecule-ID number.) If multiple polymers are requested, then each of them will have their own polymer object. """ g_usage_msg = """ Usage: genpoly_lt.py \\ [-bond btype a1 a2] \\ [-helix deltaphi] \\ [-axis x,y,z] \\ [-circular yes/no/connected] \\ [-dir-indices ia ib] \\ [-angle atype a1 a2 a3 i1 i2 i3] \\ [-dihedral dtype a1 a2 a3 a4 i1 i2 i3 i4] \\ [-improper itype a1 a2 a3 a4 i1 i2 i3 i4] \\ [-monomer-name mname] \\ [-sequence sequence.txt] \\ [-polymer-name pname] \\ [-inherits ForceFieldObject] \\ [-header "import \"monomer.lt\""] \\ [-cuts cuts.txt] \\ [-box paddingX,paddingY,paddingZ] \\ < coords.raw > polymer.lt """ import sys import random from math import * class InputError(Exception): """ A generic exception object containing a string for error reporting. (Raising this exception implies that the caller has provided a faulty input file or argument.) """ def __init__(self, err_msg): self.err_msg = err_msg def __str__(self): return self.err_msg def __repr__(self): return str(self) class GPSettings(object): def __init__(self): self.direction_orig = [1.0, 0.0, 0.0] self.is_circular = False self.connect_ends = False self.delta_phi = 0.0 self.header = 'import \"forcefield.lt\"' self.name_monomer = 'Monomer' self.name_polymer = '' self.inherits = '' self.name_sequence = [] self.dir_index_offsets = (-1,1) self.cuts = [] self.box_padding = None self.bonds_name = [] self.bonds_type = [] self.bonds_atoms = [] self.bonds_index_offsets = [] self.angles_name = [] self.angles_type = [] self.angles_atoms = [] self.angles_index_offsets = [] self.dihedrals_name = [] self.dihedrals_type = [] self.dihedrals_atoms = [] self.dihedrals_index_offsets = [] self.impropers_name = [] self.impropers_type = [] self.impropers_atoms = [] self.impropers_index_offsets = [] def ParseArgs(self, argv): i = 1 while i < len(argv): #sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n') if argv[i].lower() == '-bond': if i + 3 >= len(argv): raise InputError( 'Error: ' + argv[i] + ' flag should be followed by 4 strings.\n') # self.bonds_name.append(argv[i+1]) self.bonds_type.append(argv[i + 1]) self.bonds_atoms.append((argv[i + 2], argv[i + 3])) self.bonds_index_offsets.append((0, 1)) del(argv[i:i + 4]) elif argv[i].lower() == '-angle': if i + 7 >= len(argv): raise InputError( 'Error: ' + argv[i] + ' flag should be followed by 5 strings and 3 integers.\n') # self.angles_name.append(argv[i+1]) self.angles_type.append(argv[i + 1]) self.angles_atoms.append((argv[i + 2], argv[i + 3], argv[i + 4])) self.angles_index_offsets.append((int(argv[i + 5]), int(argv[i + 6]), int(argv[i + 7]))) if ((self.angles_index_offsets[-1][0] < 0) or (self.angles_index_offsets[-1][1] < 0) or (self.angles_index_offsets[-1][2] < 0)): raise InputError( 'Error: ' + argv[i] + ' indices (i1 i2 i3) must be >= 0\n') del(argv[i:i + 8]) elif argv[i].lower() == '-dihedral': if i + 9 >= len(argv): raise InputError( 'Error: ' + argv[i] + ' flag should be followed by 6 strings and 4 integers.\n') # self.dihedrals_name.append(argv[i+1]) self.dihedrals_type.append(argv[i + 1]) self.dihedrals_atoms.append((argv[i + 2], argv[i + 3], argv[i + 4], argv[i + 5])) self.dihedrals_index_offsets.append((int(argv[i + 6]), int(argv[i + 7]), int(argv[i + 8]), int(argv[i + 9]))) if ((self.dihedrals_index_offsets[-1][0] < 0) or (self.dihedrals_index_offsets[-1][1] < 0) or (self.dihedrals_index_offsets[-1][2] < 0) or (self.dihedrals_index_offsets[-1][3] < 0)): raise InputError( 'Error: ' + argv[i] + ' indices (i1 i2 i3 i4) must be >= 0\n') del(argv[i:i + 10]) elif argv[i].lower() == '-improper': if i + 9 >= len(argv): raise InputError( 'Error: ' + argv[i] + ' flag should be followed by 6 strings and 4 integers.\n') # self.impropers_name.append(argv[i+1]) self.impropers_type.append(argv[i + 1]) self.impropers_atoms.append((argv[i + 2], argv[i + 3], argv[i + 4], argv[i + 5])) self.impropers_index_offsets.append((int(argv[i + 6]), int(argv[i + 7]), int(argv[i + 8]), int(argv[i + 9]))) if ((self.impropers_index_offsets[-1][0] < 0) or (self.impropers_index_offsets[-1][1] < 0) or (self.impropers_index_offsets[-1][2] < 0) or (self.impropers_index_offsets[-1][3] < 0)): raise InputError( 'Error: ' + argv[i] + ' indices (i1 i2 i3 i4) must be >= 0\n') del(argv[i:i + 10]) elif (argv[i].lower() == '-monomer-name'): if i + 1 >= len(argv): raise InputError( 'Error: ' + argv[i] + ' flag should be followed by a string\n') self.name_monomer = argv[i + 1] del(argv[i:i + 2]) elif (argv[i].lower() == '-sequence'): if i + 1 >= len(argv): raise InputError( 'Error: ' + argv[i] + ' flag should be followed by a file name\n') try: f = open(argv[i + 1], "r") except IOError: raise InputError( 'Error: file ' + argv[i + 1] + ' could not be opened for reading\n') self.name_sequence = [] for line_orig in f: line = line_orig.strip() ic = line.find('#') if ic != -1: line = line[:ic] else: line = line.strip() if len(line) > 0: self.name_sequence.append(line) del(argv[i:i + 2]) elif (argv[i].lower() == '-cuts'): if i + 1 >= len(argv): raise InputError( 'Error: ' + argv[i] + ' flag should be followed by a file name\n') try: f = open(argv[i + 1], "r") except IOError: raise InputError( 'Error: file ' + argv[i + 1] + ' could not be opened for reading\n') self.name_sequence = [] for line_orig in f: line = line_orig.strip() ic = line.find('#') if ic != -1: line = line[:ic] else: line = line.strip() if len(line) > 0: try: self.cuts.append(int(line)) except ValueError: raise InputError( 'Error: file ' + argv[i + 1] + ' should contain only nonnegative integers.\n') del(argv[i:i + 2]) elif (argv[i].lower() == '-polymer-name'): if i + 1 >= len(argv): raise InputError( 'Error: ' + argv[i] + ' flag should be followed by a string\n') self.name_polymer = argv[i + 1] del(argv[i:i + 2]) elif (argv[i].lower() == '-inherits'): if i + 1 >= len(argv): raise InputError( 'Error: ' + argv[i] + ' flag should be followed by a string\n') self.inherits = argv[i + 1] if self.inherits.find('inherits ') == 0: self.inherits = ' ' + self.inherits else: self.inherits = ' inherits ' + self.inherits if self.name_polymer == '': self.name_polymer = 'Polymer' # supply a default name del(argv[i:i + 2]) elif (argv[i].lower() == '-header'): if i + 1 >= len(argv): raise InputError( 'Error: ' + argv[i] + ' flag should be followed by a string (usually in quotes)\n') self.header = argv[i + 1] del(argv[i:i + 2]) elif argv[i].lower() == '-axis': if i + 1 >= len(argv): raise InputError('Error: ' + argv[i] + ' flag should be followed ' + 'by 3 numbers separated by commas (no spaces)\n') self.direction_orig = map(float, argv[i + 1].split(',')) del(argv[i:i + 2]) elif argv[i].lower() == '-circular': if i + 1 >= len(argv): raise InputError('Error: ' + argv[i] + ' flag should be followed by an argument\n' + ' ("yes", "no", or "connected")\n') if argv[i + 1].lower() == 'yes': self.connect_ends = True self.is_circular = True elif argv[i + 1].lower() == 'connected': self.connect_ends = True self.is_circular = False elif argv[i + 1].lower() == 'no': self.connect_ends = False self.is_circular = False else: raise InputError('Error: ' + argv[i] + ' flag should be followed by an argument\n' + ' ("yes", "no", or "connected")\n') del(argv[i:i + 2]) elif argv[i].lower() == '-helix': if i + 1 >= len(argv): raise InputError( 'Error: ' + argv[i] + ' flag should be followed by a number (angle in degrees)\n') self.delta_phi = float(argv[i + 1]) del(argv[i:i + 2]) elif (argv[i].lower() == '-dir-indices'): if i + 2 >= len(argv): raise InputError( 'Error: ' + argv[i] + ' flag should be followed by two integers\n') self.dir_index_offsets = (int(argv[i + 1]), int(argv[i + 2])) if self.dir_index_offsets[0] == self.dir_index_offsets[1]: raise InputError( 'Error: The two numbers following ' + argv[i] + ' must not be equal.\n') del(argv[i:i + 3]) elif (argv[i].lower() == '-box'): if i + 1 >= len(argv): raise InputError('Error: ' + argv[i] + ' flag should be followed ' + 'by 3 numbers separated by commas (no spaces)\n') self.box_padding = map(float, argv[i + 1].split(',')) if len(self.box_padding) == 1: self.box_padding = self.box_padding * 3 del(argv[i:i + 2]) # elif ((argv[i][0] == '-') and (__name__ == '__main__')): # # raise InputError('Error('+g_program_name+'):\n'+\ # 'Unrecogized command line argument \"'+argv[i]+\ # '\"\n\n'+\ # __doc__) else: i += 1 for b in range(0, len(self.bonds_type)): if len(self.bonds_type) > 1: self.bonds_name.append('genpoly' + str(b + 1) + '_') else: self.bonds_name.append('genpoly') for b in range(0, len(self.angles_type)): if len(self.angles_type) > 1: self.angles_name.append('genpoly' + str(b + 1) + '_') else: self.angles_name.append('genpoly') for b in range(0, len(self.dihedrals_type)): if len(self.dihedrals_type) > 1: self.dihedrals_name.append('genpoly' + str(b + 1) + '_') else: self.dihedrals_name.append('genpoly') for b in range(0, len(self.impropers_type)): if len(self.impropers_type) > 1: self.impropers_name.append('genpoly' + str(b + 1) + '_') else: self.impropers_name.append('genpoly') class WrapPeriodic(object): """ Wrap() calculates the remainder of i % N. It turns out to be convenient to do this multiple times and later query whether i/N != 0 in any of them once (by checking bounds_err). """ bounds_err = False @classmethod def Wrap(obj, i, N): if i / N != 0: obj.bounds_err = True return i % N def WrapF(obj, x, L): i = floor(x / L) if i != 0: obj.bounds_err = True return x - i * L class GenPoly(object): """ Read coordinates from a file, and generate a list of \"new\" commands in moltemplate format with the position of each monomer located at these positions, oriented appropriately, with bonds (and angles, dihedrals, etc...) connecting successive monomers together. By default (if settings.cuts==False) only a single polymer is created. However this class can create multiple polymers of different lengths. The list of coordinates for each polymer are saved separately within the "self.coords_multi" member. """ def __init__(self): self.settings = GPSettings() self.coords_multi = [] # a list-of-list-of-lists of numbers Nxnx3 self.direction_vects = [] self.box_bounds_min = [0.0, 0.0, 0.0] self.box_bounds_max = [0.0, 0.0, 0.0] self.N = 0 def ParseArgs(self, argv): # The command above will remove arguments from argv which are # understood by GPSettings.ParseArgs(argv). # The remaining arguments will be handled below. self.settings.ParseArgs(argv) def ReadCoords(self, infile): coords = [] lines = infile.readlines() for i in range(0, len(lines)): tokens = lines[i].strip().split() if (len(tokens) == 3): coords.append(map(float, tokens)) self.N = len(coords) if self.N < 2: raise InputError( "Error: Coordinate file must have at least 2 positions.\n") # Now generate self.settings.name_sequence: if len(self.settings.name_sequence) != self.N: self.settings.name_sequence = [self.settings.name_monomer] * self.N self.settings.cuts.append(self.N + 1) self.settings.cuts.sort() i = 0 for j in self.settings.cuts: self.coords_multi.append(coords[i:j]) i = j def ChooseDirections(self, coords): """ Calculate the direction each monomer subunit should be pointing at: """ self.N = len(coords) self.direction_vects = [[0.0, 0.0, 0.0] for i in range(0, self.N + 1)] if self.settings.is_circular: for i in range(0, self.N): # By default, the direction that monomer "i" is pointing is # determined by the position of the monomers before and after it # (at index i-1, and i+1). More generally, we allow the user # to choose what these offsets are ("dir_index_offsets[") ia = WrapPeriodic.Wrap(i + self.settings.dir_index_offsets[0], self.N) ib = WrapPeriodic.Wrap(i + self.settings.dir_index_offsets[1], self.N) for d in range(0, 3): self.direction_vects[i][d] = coords[ ib][d] - coords[ia][d] else: for i in range(1, self.N - 1): for d in range(0, 3): self.direction_vects[i][d] = coords[ i + self.settings.dir_index_offsets[1]][d] - coords[ i + self.settings.dir_index_offsets[0]][d] for d in range(0, 3): self.direction_vects[0][d] = coords[1][d] - coords[0][d] self.direction_vects[ self.N - 1][d] = coords[self.N - 1][d] - coords[self.N - 2][d] # Optional: normalize the direction vectors for i in range(0, self.N): direction_len = 0.0 for d in range(0, 3): direction_len += (self.direction_vects[i][d])**2 direction_len = sqrt(direction_len) for d in range(0, 3): self.direction_vects[i][d] /= direction_len # Special case: self.direction_vects[-1] is the direction that the original monomer # in "monomer.lt" was pointing. (By default, 1,0,0 <--> the "x" # direction) self.direction_vects[-1] = self.settings.direction_orig def WriteLTFile(self, outfile): """ Write an moltemplate (.lt) file containing the definition of this polymer object. (If multiple polymer objects were requested by the user (using the -cuts argument), then their definitions will appear nested within this object, and each of them will be instantiated once when the parent object is instantiated.) """ outfile.write(self.settings.header + "\n\n\n") if len(self.coords_multi) == 1: self.WritePolymer(outfile, self.settings.name_polymer + self.settings.inherits, self.coords_multi[0]) else: if self.settings.name_polymer != '': outfile.write(self.settings.name_polymer + " {\n\n") outfile.write('# Definitions of individual polymers to follow\n\n') for i in range(0, len(self.coords_multi)): self.WritePolymer(outfile, self.settings.name_polymer + '_sub' + str(i + 1) + self.settings.inherits, self.coords_multi[i]) outfile.write('\n\n' '# Now instantiate all the polymers (once each)\n\n') for i in range(0, len(self.coords_multi)): outfile.write('polymers[' + str(i) + '] = new ' + self.settings.name_polymer + '_sub' + str(i + 1) + '\n') if self.settings.name_polymer != '': outfile.write('\n\n' '} # ' + self.settings.name_polymer + '\n\n') if self.settings.box_padding != None: for i in range(0, len(self.coords_multi)): # calculate the box big enough to collectively enclose # all of the coordinates (even multiple coordinate sets) self.CalcBoxBoundaries(self.coords_multi[i]) self.WriteBoxBoundaries(outfile) def WritePolymer(self, outfile, name_polymer, coords): """ Write a single polymer object to a file. This function is invoked by WriteLTFile() """ self.ChooseDirections(coords) if name_polymer != '': outfile.write(name_polymer + ' {\n' '\n\n\n' 'create_var {$mol}\n' '# The line above forces all monomer subunits to share the same molecule-ID\n' '# (Note: Setting the molecule-ID number is optional and is usually ignored.)\n\n\n\n') outfile.write(""" # ------------ List of Monomers: ------------ # # (Note: move(), rot(), and rotvv() commands control the position # of each monomer. (See the moltemplate manual for an explanation # of what they do.) Commands enclosed in push() are cumulative # and remain in effect until removed by pop().) """ ) outfile.write("push(move(0,0,0))\n") for i in range(0, self.N): #im1 = i-1 # if im1 < 0 or self.settings.connect_ends: # if im1 < 0: # im1 += self.N outfile.write("pop()\n") outfile.write("push(rotvv(" + str(self.direction_vects[i - 1][0]) + "," + str(self.direction_vects[i - 1][1]) + "," + str(self.direction_vects[i - 1][2]) + "," + str(self.direction_vects[i][0]) + "," + str(self.direction_vects[i][1]) + "," + str(self.direction_vects[i][2]) + "))\n") # Recall that self.direction_vects[-1] = # self.settings.direction_orig (usually 1,0,0) outfile.write("push(move(" + str(coords[i][0]) + "," + str(coords[i][1]) + "," + str(coords[i][2]) + "))\n") outfile.write("mon[" + str(i) + "] = new " + self.settings.name_sequence[i] + ".rot(" + str(self.settings.delta_phi * i) + ",1,0,0)\n") assert(len(self.settings.bonds_name) == len(self.settings.bonds_type) == len(self.settings.bonds_atoms) == len(self.settings.bonds_index_offsets)) if len(self.settings.bonds_type) > 0: outfile.write("\n" "\n" "write(\"Data Bonds\") {\n") WrapPeriodic.bounds_err = False for i in range(0, self.N): test = False for b in range(0, len(self.settings.bonds_type)): I = i + self.settings.bonds_index_offsets[b][0] J = i + self.settings.bonds_index_offsets[b][1] I = WrapPeriodic.Wrap(I, self.N) J = WrapPeriodic.Wrap(J, self.N) if WrapPeriodic.bounds_err: WrapPeriodic.bounds_err = False if not self.settings.connect_ends: continue outfile.write( " $bond:" + self.settings.bonds_name[b] + str(i + 1)) if len(self.settings.bonds_type) > 1: outfile.write("_" + str(b + 1)) outfile.write(" @bond:" + self.settings.bonds_type[b] + " $atom:mon[" + str(I) + "]/" + self.settings.bonds_atoms[ b][0] + " $atom:mon[" + str(J) + "]/" + self.settings.bonds_atoms[b][1] + "\n") if len(self.settings.bonds_type) > 0: outfile.write("} # write(\"Data Bonds\") {...\n\n\n") assert(len(self.settings.angles_name) == len(self.settings.angles_type) == len(self.settings.angles_atoms) == len(self.settings.angles_index_offsets)) if len(self.settings.angles_type) > 0: outfile.write("\n" "\n" "write(\"Data Angles\") {\n") for i in range(0, self.N): for b in range(0, len(self.settings.angles_type)): I = i + self.settings.angles_index_offsets[b][0] J = i + self.settings.angles_index_offsets[b][1] K = i + self.settings.angles_index_offsets[b][2] I = WrapPeriodic.Wrap(I, self.N) J = WrapPeriodic.Wrap(J, self.N) K = WrapPeriodic.Wrap(K, self.N) if WrapPeriodic.bounds_err: WrapPeriodic.bounds_err = False if not self.settings.connect_ends: continue outfile.write( " $angle:" + self.settings.angles_name[b] + str(i + 1)) if len(self.settings.angles_type) > 1: outfile.write("_" + str(b + 1)) outfile.write(" @angle:" + self.settings.angles_type[b] + " $atom:mon[" + str(I) + "]/" + self.settings.angles_atoms[b][0] + " $atom:mon[" + str(J) + "]/" + self.settings.angles_atoms[b][1] + " $atom:mon[" + str(K) + "]/" + self.settings.angles_atoms[b][2] + "\n") if len(self.settings.angles_type) > 0: outfile.write("} # write(\"Data Angles\") {...\n\n\n") assert(len(self.settings.dihedrals_name) == len(self.settings.dihedrals_type) == len(self.settings.dihedrals_atoms) == len(self.settings.dihedrals_index_offsets)) if len(self.settings.dihedrals_type) > 0: outfile.write("\n" "\n" "write(\"Data Dihedrals\") {\n") for i in range(0, self.N): for b in range(0, len(self.settings.dihedrals_type)): I = i + self.settings.dihedrals_index_offsets[b][0] J = i + self.settings.dihedrals_index_offsets[b][1] K = i + self.settings.dihedrals_index_offsets[b][2] L = i + self.settings.dihedrals_index_offsets[b][3] I = WrapPeriodic.Wrap(I, self.N) J = WrapPeriodic.Wrap(J, self.N) K = WrapPeriodic.Wrap(K, self.N) L = WrapPeriodic.Wrap(L, self.N) if WrapPeriodic.bounds_err: WrapPeriodic.bounds_err = False if not self.settings.connect_ends: continue outfile.write(" $dihedral:" + self.settings.dihedrals_name[b] + str(i + 1)) if len(self.settings.dihedrals_type) > 1: outfile.write("_" + str(b + 1)) outfile.write(" @dihedral:" + self.settings.dihedrals_type[b] + " $atom:mon[" + str(I) + "]/" + self.settings.dihedrals_atoms[b][0] + " $atom:mon[" + str(J) + "]/" + self.settings.dihedrals_atoms[b][1] + " $atom:mon[" + str(K) + "]/" + self.settings.dihedrals_atoms[b][2] + " $atom:mon[" + str(L) + "]/" + self.settings.dihedrals_atoms[b][3] + "\n") if len(self.settings.dihedrals_type) > 0: outfile.write("} # write(\"Data Dihedrals\") {...\n\n\n") assert(len(self.settings.impropers_name) == len(self.settings.impropers_type) == len(self.settings.impropers_atoms) == len(self.settings.impropers_index_offsets)) if len(self.settings.impropers_type) > 0: outfile.write("\n" "\n" "write(\"Data Impropers\") {\n") for i in range(0, self.N): for b in range(0, len(self.settings.impropers_type)): I = i + self.settings.impropers_index_offsets[b][0] J = i + self.settings.impropers_index_offsets[b][1] K = i + self.settings.impropers_index_offsets[b][2] L = i + self.settings.impropers_index_offsets[b][3] I = WrapPeriodic.Wrap(I, self.N) J = WrapPeriodic.Wrap(J, self.N) K = WrapPeriodic.Wrap(K, self.N) L = WrapPeriodic.Wrap(L, self.N) if WrapPeriodic.bounds_err: WrapPeriodic.bounds_err = False if not self.settings.connect_ends: continue outfile.write(" $improper:" + self.settings.impropers_name[b] + str(i + 1)) if len(self.settings.impropers_type) > 1: outfile.write("_" + str(b + 1)) outfile.write(" @improper:" + self.settings.impropers_type[b] + " $atom:mon[" + str(I) + "]/" + self.settings.impropers_atoms[b][0] + " $atom:mon[" + str(J) + "]/" + self.settings.impropers_atoms[b][1] + " $atom:mon[" + str(K) + "]/" + self.settings.impropers_atoms[b][2] + " $atom:mon[" + str(L) + "]/" + self.settings.impropers_atoms[b][3] + "\n") if len(self.settings.impropers_type) > 0: outfile.write("} # write(\"Data Impropers\") {...\n\n\n") if name_polymer != '': outfile.write("} # " + name_polymer + "\n\n\n\n") def CalcBoxBoundaries(self, coords): N = len(coords) for i in range(0, N): for d in range(0, 3): if not self.box_bounds_min: assert(not self.box_bounds_max) self.box_bounds_min = [xd for xd in coords[i]] self.box_bounds_max = [xd for xd in coords[i]] else: if coords[i][d] > self.box_bounds_max[d]: self.box_bounds_max[d] = coords[i][d] if coords[i][d] < self.box_bounds_min[d]: self.box_bounds_min[d] = coords[i][d] def WriteBoxBoundaries(self, outfile): for d in range(0, 3): self.box_bounds_min[d] -= self.settings.box_padding[d] self.box_bounds_max[d] += self.settings.box_padding[d] outfile.write("\n# ---------------- simulation box -----------------\n" "# Now define a box big enough to hold a polymer with this (initial) shape\n" "\n\n" "write_once(\"Data Boundary\") {\n" + str(self.box_bounds_min[0]) + " " + str(self.box_bounds_max[0]) + " xlo xhi\n" + str(self.box_bounds_min[1]) + " " + str(self.box_bounds_max[1]) + " ylo yhi\n" + str(self.box_bounds_min[2]) + " " + str(self.box_bounds_max[2]) + " zlo zhi\n" "}\n\n\n") def main(): try: g_program_name = __file__.split('/')[-1] g_version_str = '0.0.5' g_date_str = '2017-4-14' sys.stderr.write(g_program_name + ' v' + g_version_str + ' ' + g_date_str + '\n') argv = [arg for arg in sys.argv] infile = sys.stdin outfile = sys.stdout genpoly = GenPoly() genpoly.ParseArgs(argv) # Any remain arguments? if len(argv) > 1: raise InputError('Error(' + g_program_name + '):\n' + 'Unrecogized command line argument \"' + argv[1] + '\"\n\n' + g_usage_msg) genpoly.ReadCoords(infile) genpoly.WriteLTFile(outfile) except (ValueError, InputError) as err: sys.stderr.write('\n' + str(err) + '\n') sys.exit(-1) return if __name__ == '__main__': main()
ramisetti/lammps
tools/moltemplate/moltemplate/genpoly_lt.py
Python
gpl-2.0
34,035
[ "LAMMPS" ]
6679d3c6daed7906664504260bf33c48284a90d161716917c7ed6d562f3e2768
# Parallel IO # # Written by Konrad Hinsen <hinsen@cnrs-orleans.fr> # last revision: 2002-3-22 # """This module provides parallel acces to netCDF files. One netCDF dimension is defined for splitting the data among processors such that each processor is responsible for one slice of the file along that dimension. Since netCDF files can be very big, the distribution algorithm gives priority to memory efficiency over CPU time efficiency. The processor that handles the file treats only one slice per superstep, which means that at no time more than one slice must be stored in any processor. """ from Scientific.IO.NetCDF import NetCDFFile from core import ParClass, ParBase, ParInvalid, is_invalid import Numeric class _ParNetCDFFile(ParBase): """Distributed netCDF file Constructor: ParNetCDFFile(|filename|, |split_dimension|, |mode|='r', |local_access| = 0) Arguments: |filename| -- the name of the netCDF file |split_dimension| -- the name of the dimension along which the data is distributed over the processors |mode| -- read ('r'), write ('w'), or append ('a'). Default is 'r'. |local_access| -- if 0 (default), processor 0 is the only one to access the file, all others communicate with processor 0. If 1 (only for reading), each processor accesses the file directly. In the latter case, the file must be accessible on all processors under the same name. A third mode is 'auto', which uses some heuristics to decide if the file is accessible everywhere: it checks for existence of the file, then compares the size on all processors, and finally verifies that the same variables exist everywhere, with identical names, types, and sizes. A ParNetCDFFile object acts as much as possible like a NetCDFFile object. Variables become ParNetCDFVariable objects, which behave like distributed sequences. Variables that use the dimension named by |split_dimension| are automatically distributed among the processors such that each treats only one slice of the whole file. """ def __parinit__(self, pid, nprocs, filename, split_dimension, mode = 'r', local_access = 0): if mode != 'r': local_access = 0 self.pid = pid self.nprocs = nprocs self.filename = filename self.split = split_dimension self.local_access = local_access self.read_only = mode == 'r' if local_access or pid == 0: self.file = NetCDFFile(filename, mode) try: length = self.file.dimensions[split_dimension] if length is None: length = -1 except KeyError: length = None vars = {} for name, var in self.file.variables.items(): vars[name] = (name, var.dimensions) if length < 0 and split_dimension in var.dimensions: index = list(var.dimensions).index(split_dimension) length = var.shape[index] else: self.file = None self.split = split_dimension length = None vars = None if not local_access: length = self.broadcast(length) vars = self.broadcast(vars) if length is not None: self._divideData(length) self.variables = {} for name, var in vars.items(): self.variables[name] = _ParNetCDFVariable(self, var[0], var[1], split_dimension) def __repr__(self): return repr(self.filename) def close(self): if self.local_access or self.pid == 0: self.file.close() def createDimension(self, name, length): if name == self.split: if length is None: raise ValueError, "Split dimension cannot be unlimited" self._divideData(length) if self.pid == 0: self.file.createDimension(name, length) def createVariable(self, name, typecode, dimensions): if self.pid == 0: var = self.file.createVariable(name, typecode, dimensions) dim = var.dimensions else: dim = 0 name, dim = self.broadcast((name, dim)) self.variables[name] = _ParNetCDFVariable(self, name, dim, self.split) return self.variables[name] def _divideData(self, length): chunk = (length+self.nprocs-1)/self.nprocs self.first = min(self.pid*chunk, length) self.last = min(self.first+chunk, length) if (not self.local_access) and self.pid == 0: self.parts = [] for pid in range(self.nprocs): first = pid*chunk last = min(first+chunk, length) self.parts.append((first, last)) def sync(self): if self.pid == 0: self.file.sync() flush = sync class _ParNetCDFVariable(ParBase): def __init__(self, file, name, dimensions, split_dimension): self.file = file self.pid = file.pid self.nprocs = file.nprocs self.name = name self.dimensions = dimensions self.value = self self.attributes = {} try: self.index = list(dimensions).index(split_dimension) except ValueError: self.index = None def __repr__(self): return repr(self.name) def __getitem__(self, item): item = self._prepareIndices(item) if self.file.local_access : data = self._readData(item, self.file.first, self.file.last) elif self.pid == 0: for pid in range(1, self.nprocs): first, last = self.file.parts[pid] data = self._readData(item, first, last) self.put(data, [pid]) data = self._readData(item, self.file.first, self.file.last) else: for pid in range(1, self.nprocs): messages = self.put(None, []) if messages: data = messages[0] if data is None: return ParInvalid else: return data def __getslice__(self, first, last): return self.__getitem__(slice(first, last)) def __setitem__(self, item, value): item = self._prepareIndices(item) if is_invalid(value): value = None if self.pid == 0: if value is not None: self._writeData(item, value, self.file.first, self.file.last) if self.index is not None: for pid in range(1, self.nprocs): first, last = self.file.parts[pid] data = self.put(None, []) if data and data[0] is not None: self._writeData(item, data[0], first, last) else: if self.index is not None: for pid in range(1, self.nprocs): if pid == self.pid: self.put(value, [0]) else: self.put(None, []) def __setslice__(self, first, last, value): self.__setitem__(slice(first, last), value) def _prepareIndices(self, item): if not hasattr(item, 'is_parindex'): if type(item) != type(()): item = (item,) item = item + (len(self.dimensions)-len(item))*(slice(None),) return item def _readData(self, item, part_first, part_last): item = self._indices(item, part_first, part_last) if item is None: return None else: return self.file.file.variables[self.name][item] def _writeData(self, item, data, part_first, part_last): try: if len(data) == 0: return except TypeError: pass item = self._indices(item, part_first, part_last) if item is not None: try: self.file.file.variables[self.name][item] = Numeric.array(data) except: print self.file.file.variables[self.name].shape print item print Numeric.array(data).shape raise def _indices(self, item, part_first, part_last): if hasattr(item, 'is_parindex'): if not item.valid: return None if item.skip == 0: return item.start+part_first else: return slice(item.start+part_first, item.stop+part_first, item.skip) if self.index is not None: split = item[self.index] if type(split) == type(0): raise ValueError, "Must use slice along split dimension" first, last, skip = split.start, split.stop, split.step if first is None: first = 0 if skip is None: skip = 1 n1 = max(0, (part_first-first+skip-1)/skip) first = first + n1*skip if last is None: last = part_last last = min(last, part_last) item = item[:self.index] + (slice(first, last, skip),) + \ item[self.index+1:] return item def __getattr__(self, attr): if self.file.local_access: return getattr(self.file.file.variables[self.name], attr) try: return self.attributes[attr] except KeyError: pass if self.pid == 0: value = getattr(self.file.file.variables[self.name], attr) else: value = None value = self.broadcast(value) if self.file.read_only: self.attributes[attr] = value return value def __len__(self): return self.file.last - self.file.first ParNetCDFVariable = ParClass(_ParNetCDFVariable) ParNetCDFFile = ParClass(_ParNetCDFFile)
OS2World/DEV-PYTHON-UTIL-ScientificPython
src/Lib/site-packages/Scientific/BSP/IO.py
Python
isc
10,176
[ "NetCDF" ]
70205a4b4a2c848792c2043825927a159bbede5249668ee6dcda8dfe60202236
#!/usr/bin/python # # This source file is part of appleseed. # Visit https://appleseedhq.net/ for additional information and resources. # # This software is released under the MIT license. # # Copyright (c) 2010-2013 Francois Beaune, Jupiter Jazz Limited # Copyright (c) 2014-2018 Francois Beaune, The appleseedhq Organization # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # from __future__ import print_function import argparse import datetime import os import re import subprocess import sys from utils import print_runtime_details # local module # ------------------------------------------------------------------------------------------------- # Constants. # ------------------------------------------------------------------------------------------------- VERSION = "1.0" # ------------------------------------------------------------------------------------------------- # Utility functions. # ------------------------------------------------------------------------------------------------- def safe_make_directory(path): if not os.path.isdir(path): os.makedirs(path) # ------------------------------------------------------------------------------------------------- # Logger. # ------------------------------------------------------------------------------------------------- class Logger: def __init__(self, directory): now = datetime.datetime.now() self.filename = now.strftime("benchmark.%Y%m%d.%H%M%S.txt") self.filepath = os.path.join(directory, self.filename) self.file = open(self.filepath, "w", 0) # 0: no buffering def get_log_file_path(self): return self.filepath def write(self, s=""): self.file.write(s + "\n") print(s) # ------------------------------------------------------------------------------------------------- # Benchmarking and reporting code. # ------------------------------------------------------------------------------------------------- def benchmark_projects(appleseed_path, appleseed_args, logger): for dirpath, dirnames, filenames in os.walk("."): if dirpath.endswith(".skip"): continue for filename in filenames: if os.path.splitext(filename)[1] == ".appleseed": benchmark_project(os.path.join(dirpath, filename), appleseed_path, appleseed_args, logger) def benchmark_project(project_path, appleseed_path, appleseed_args, logger): project_name = os.path.splitext(os.path.split(project_path)[1])[0] logger.write("Benchmarking {0} scene...".format(project_name)) command_line = [appleseed_path, project_path] + appleseed_args command_line += ["--benchmark-mode"] command_line += ["-o", os.path.join("renders", project_name + ".png")] output = subprocess.check_output(command_line, stderr=subprocess.STDOUT) if was_successful(output): process_output(output, logger) else: logger.write(output) def was_successful(output): return get_value(output, "result") == "success" def process_output(output, logger): setup_time = float(get_value(output, "setup_time")) render_time = float(get_value(output, "render_time")) total_time = float(get_value(output, "total_time")) logger.write(" Setup Time : {0} seconds".format(setup_time)) logger.write(" Render Time : {0} seconds".format(render_time)) logger.write(" Total Time : {0} seconds".format(total_time)) logger.write() def get_value(output, key): pattern = r"^{0}=(.*)[\r\n]+$".format(key) match = re.search(pattern, output, re.MULTILINE) return match.group(1) if match else None # ------------------------------------------------------------------------------------------------- # Entry point. # ------------------------------------------------------------------------------------------------- def print_configuration(appleseed_path, appleseed_args, logger): logger.write("Configuration:") logger.write(" Log file : {0}".format(logger.get_log_file_path())) logger.write(" Path to appleseed : {0}".format(appleseed_path)) logger.write(" appleseed command line : {0}".format(" ".join(appleseed_args))) logger.write() def main(): parser = argparse.ArgumentParser(description="benchmark appleseed") parser.add_argument("appleseed_path", help="set the path to the appleseed.cli tool") parser.add_argument("appleseed_args", nargs=argparse.REMAINDER, help="forward additional arguments to appleseed") args = parser.parse_args() appleseed_path = args.appleseed_path appleseed_args = args.appleseed_args safe_make_directory("logs") logger = Logger("logs") print_runtime_details("appleseed.benchmark", VERSION, os.path.realpath(__file__), print_function=logger.write) print_configuration(appleseed_path, appleseed_args, logger) safe_make_directory("renders") start_time = datetime.datetime.now() benchmark_projects(appleseed_path, appleseed_args, logger) elapsed_time = datetime.datetime.now() - start_time logger.write("\nTotal suite time: {0}\n".format(elapsed_time)) if __name__ == "__main__": main()
est77/appleseed
scripts/appleseed.benchmark.py
Python
mit
6,209
[ "VisIt" ]
445e6208731d46e924f5a1802fbcb31fe40734e329995136bcf9a8d7cca857c1
"""Generate example cube plots for dissertation Author ------ Shankar Kulumani GWU skulumani@gwu.edu """ import os import numpy as np from point_cloud import wavefront from visualization import graphics view = {'azimuth': 16.944197132093564, 'elevation': 66.34177792039738, 'distance': 2.9356815748114435, 'focalpoint': np.array([0.20105769, -0.00420018, -0.016934045])} def plot_data(pt, v, f, D, P, V, E, F, string='Closest Primitive', radius=0.1, size=(800*1.6, 800)): # draw the mayavi figure mfig = graphics.mayavi_figure(size=size) graphics.mayavi_addMesh(mfig, v, f) graphics.mayavi_addPoint(mfig, pt, radius=radius, color=(0, 1, 0)) if P.any(): graphics.mayavi_points3d(mfig, P, scale_factor=radius, color=(1, 0, 0)) # different color for each face if F.size: try: _ = iter(F[0]) for f_list in F: for f_ind in f_list: face_verts = v[f[f_ind,:],:] graphics.mayavi_addMesh(mfig, face_verts, [(0, 1, 2)], color=tuple(np.random.rand(3))) except IndexError as err: face_verts = v[f[F,:],:] graphics.mayavi_addMesh(mfig, face_verts, [(0, 1, 2)], color=tuple(np.random.rand(3))) except (TypeError,) as err: for f_ind in F: face_verts = v[f[f_ind,:],:] graphics.mayavi_addMesh(mfig, face_verts, [(0, 1, 2)], color=tuple(np.random.rand(3))) # draw the points which make up the edges and draw a line for the edge if V.size: try: _ = iter(V) for v_ind in V: graphics.mayavi_addPoint(mfig, v[v_ind,:], radius=radius, color=(0, 0, 1)) except TypeError as err: graphics.mayavi_addPoint(mfig, v[V, :], radius=radius, color=(0, 0, 1)) # draw edges if E.size: try: _ = iter(E[0][0]) for e_list in E: for e_ind in e_list: graphics.mayavi_addLine(mfig, v[e_ind[0],:], v[e_ind[1], :], color=(0, 0, 0)) except IndexError as err: graphics.mayavi_addLine(mfig, v[E[0],:], v[E[1], :], color=(0, 0, 0)) except (TypeError,) as err: for e_ind in E: graphics.mayavi_addLine(mfig, v[e_ind[0],:], v[e_ind[1], :], color=(0, 0, 0)) graphics.mayavi_addTitle(mfig, string, color=(0, 0, 0), size=0.5) return mfig def cube_mesh_with_vertices_edges_faces(img_path): """Plot the example cube with all faces, vertices, and edges """ filename = os.path.join(img_path,'cube_mesh.jpg') size = (800*1.618, 800) # read the cube v, f = wavefront.read_obj('./integration/cube.obj') # create the figure mfig = graphics.mayavi_figure(size=size, bg=(1, 1, 1)) # draw the mesh mesh = graphics.mayavi_addMesh(mfig, v, f, color=(0.5, 0.5, 0.5), representation='surface') # draw all the vertices points = graphics.mayavi_points3d(mfig, v, scale_factor=0.1, color=(0, 0, 1)) # draw the edges mesh_edges = graphics.mayavi_addMesh(mfig, v, f, color=(1, 0, 0), representation='mesh') graphics.mlab.view(azimuth=view['azimuth'], elevation=view['elevation'], distance=view['distance'], focalpoint=view['focalpoint'], figure=mfig) # save the figure to eps graphics.mlab.savefig(filename, magnification=4) def cube_closest_vertex(img_path): filename = os.path.join(img_path, 'cube_closest_vertex.jpg') size = (800*1.618, 800) pt = np.array([0.7, 0.7, 0.7]) v, f = wavefront.read_obj('./integration/cube.obj') mesh_parameters = wavefront.polyhedron_parameters(v, f) edge_vertex_map = mesh_parameters.edge_vertex_map edge_face_map = mesh_parameters.edge_face_map normal_face = mesh_parameters.normal_face vf_map = mesh_parameters.vertex_face_map D, P, V, E, F = wavefront.distance_to_vertices(pt, v, f, normal_face, edge_vertex_map, edge_face_map, vf_map) mfig = plot_data(pt, v, f, D, P, V, E, F, '') graphics.mlab.view(azimuth=view['azimuth'], elevation=view['elevation'], distance=view['distance'], focalpoint=view['focalpoint'], figure=mfig) graphics.mlab.savefig(filename, magnification=4) return mfig def cube_closest_edge(img_path): filename = os.path.join(img_path, 'cube_closest_edge.jpg') size = (800*1.618, 800) pt = np.array([0.7, 0.7, 0]) v, f = wavefront.read_obj('./integration/cube.obj') mesh_parameters = wavefront.polyhedron_parameters(v, f) edge_vertex_map = mesh_parameters.edge_vertex_map edge_face_map = mesh_parameters.edge_face_map normal_face = mesh_parameters.normal_face vf_map = mesh_parameters.vertex_face_map D, P, V, E, F = wavefront.distance_to_edges(pt, v, f, normal_face, edge_vertex_map, edge_face_map, vf_map) mfig = plot_data(pt, v, f, D, P, V, E, F, '') graphics.mlab.view(azimuth=view['azimuth'], elevation=view['elevation'], distance=view['distance'], focalpoint=view['focalpoint'], figure=mfig) graphics.mlab.savefig(filename, magnification=4) return mfig def cube_closest_face(img_path): filename = os.path.join(img_path, 'cube_closest_face.jpg') size = (800*1.618, 800) pt = np.array([0.7, 0.2, 0]) v, f = wavefront.read_obj('./integration/cube.obj') mesh_parameters = wavefront.polyhedron_parameters(v, f) edge_vertex_map = mesh_parameters.edge_vertex_map edge_face_map = mesh_parameters.edge_face_map normal_face = mesh_parameters.normal_face vf_map = mesh_parameters.vertex_face_map D, P, V, E, F = wavefront.distance_to_faces(pt, v, f, normal_face, edge_vertex_map, edge_face_map, vf_map) mfig = plot_data(pt, v, f, D, P, V, E, F, '') graphics.mlab.view(azimuth=view['azimuth'], elevation=view['elevation'], distance=view['distance'], focalpoint=view['focalpoint'], figure=mfig) graphics.mlab.savefig(filename, magnification=4) return mfig if __name__ == "__main__": img_path = '/tmp/mayavi_figure' if not os.path.exists(img_path): os.makedirs(img_path) cube_mesh_with_vertices_edges_faces(img_path) cube_closest_vertex(img_path) cube_closest_edge(img_path) cube_closest_face(img_path)
skulumani/asteroid_dumbbell
dissertation/mesh_update.py
Python
gpl-3.0
7,250
[ "Mayavi" ]
bd323f21e9070711a88cc00c722be1a85a99eea02a9f4912fc22a309775b9e5d
import numpy as np import h5py, warnings, sys from .util import ProgressBar, Progress from .config import * try: import cv2 except: cv2 = None from skimage import transform as sktf from skimage.feature import match_template def motion_correct(mov, max_iters=5, shift_threshold=1., reslice=slice(None,None), in_place=True, verbose=True, compute_kwargs={}, apply_kwargs={}): """Perform motion correction using template matching. max_iters : int maximum number of iterations shift_threshold : float absolute max shift value below which to exit reslice : slice used to reslice movie, example: slice(1,None,2) gives every other frame starting from 2nd frame in_place : bool perform on same memory as supplied verbose : bool show status compute_kwargs : dict kwargs for compute_motion_correction apply_kwargs : dict kwargs for apply_motion_correction Returns -------- corrected movie, template, values Note that this function is a convenience function for calling compute_motion_correction followed by apply_motion_correction, multiple times and combining results """ if not in_place: mov = mov.copy() mov = mov[reslice] all_vals = [] for it in range(max_iters): if verbose: print('Iteration {}'.format(it)); sys.stdout.flush() template,vals = compute_motion(mov, **compute_kwargs) mov = apply_motion_correction(mov, vals, **apply_kwargs) maxshifts = np.abs(vals[:,[0,1]]).max(axis=0) all_vals.append(vals) if verbose: print('Shifts: {}'.format(str(maxshifts))); sys.stdout.flush() if np.all(maxshifts < shift_threshold): break # combine values from iterations all_vals = np.array(all_vals) return_vals = np.empty([all_vals.shape[1],all_vals.shape[2]]) return_vals[:,[0,1]] = all_vals[:,:,[0,1]].sum(axis=0) return_vals[:,2] = all_vals[-1,:,2] return mov,template,return_vals def retrieve_motion_correction_data(datafile, filename): with h5py.File(datafile) as mc: shifts_local = np.asarray(mc[filename]['shifts']) shifts_global = np.asarray(mc['global_shifts']) global_names = mc['global_shifts'].attrs['filenames'] maxshift = mc.attrs['max_shift'] global_names = [i.decode('UTF8') for i in global_names] shift_global = shifts_global[global_names.index(filename)] shifts = (shifts_local+shift_global)[:,:2] return shifts, maxshift def apply_motion_correction(mov, shifts, interpolation=None, crop=None, in_place=False, verbose=True): """Apply shifts to mov in order to correct motion Parameters ---------- mov : pyfluo.Movie input movie shifts : np.ndarray obtained from the function compute_motion, list of [x_shift, y_shift] for each frame. if more than 2 columns, assumes first 2 are the desired ones interpolation : def interpolation flag for cv2.warpAffine, defaults to cv2.INTER_LINEAR crop : bool / int whether to crop image to borders of correction. if True, crops to maximum adjustments. if int, crops that number of pixels off all sides in_place : bool in place verbose : bool display progress This supports the correction of single frames as well, given a single shift """ if interpolation is None and cv2 is not None: interpolation = cv2.INTER_LINEAR if not in_place: mov=mov.copy() if mov.ndim==2: mov = mov[None,...] if type(shifts) in [str] and mov.filename: shifts,crop_ = retrieve_motion_correction_data(shifts, mov.filename) if crop is None: crop = crop_ if shifts.ndim==1: shifts = shifts[None,...] if shifts.ndim==2 and shifts.shape[1]==3: shifts = shifts[:,:2] assert shifts.ndim==2 and shifts.shape[1]==2 t,h,w=mov.shape if verbose: print('Applying shifts:') pbar = ProgressBar(maxval=len(mov)).start() for i,frame in enumerate(mov): sh_x_n, sh_y_n = shifts[i] if cv2 is not None: M = np.float32([[1,0,sh_x_n],[0,1,sh_y_n]]) mov[i] = cv2.warpAffine(frame,M,(w,h),flags=interpolation) elif cv2 is None: M = np.float32([[1,0,sh_y_n],[0,1,sh_x_n],[0,0,1]]) transform = sktf.AffineTransform(matrix=M) mov[i] = sktf.warp(frame, transform) if verbose: pbar.update(i) if verbose: pbar.finish() if crop: if crop == True: ymax = int(min([0, min(shifts[:,0])]) or None) xmax = int(min([0, min(shifts[:,1])]) or None) ymin = int(max(shifts[:,0])) xmin = int(max(shifts[:,1])) elif isinstance(crop, PF_numeric_types): crop = int(crop) ymax,xmax = -crop,-crop ymin,xmin = crop,crop mov = mov[:, ymin:ymax, xmin:xmax] return mov.squeeze() def compute_motion(mov, max_shift=(25,25), template=np.median, template_matching_method=None, resample=4, verbose=True): """Compute template, shifts, and correlations associated with template-matching-based motion correction Parameters ---------- mov : pyfluo.Movie input movie max_shift : int / list-like maximum number of pixels to shift frame on each iteration (by axis if list-like) template : np.ndarray / def if array, template to be used. if function, that used to compute template (defaults to np.median) template_matching_method : opencv constant method parameter for cv2.matchTemplate resample : int avg every n frames before computing template verbose : bool show progress details, defaults to True Returns ------- template: np.ndarray the template used shifts : np.ndarray one row per frame, (y, x, metric) """ if template_matching_method is None and cv2 is not None: template_matching_method = cv2.TM_CCORR_NORMED # Parse movie mov = mov.astype(np.float32) n_frames,h_i, w_i = mov.shape # Parse max_shift param if type(max_shift) in [int,float]: ms_h = max_shift ms_w = max_shift elif type(max_shift) in [tuple, list, np.ndarray]: ms_h,ms_w = max_shift else: raise Exception('Max shift should be given as value or 2-item list') # Parse/generate template if callable(template): movr = mov.resample(resample) with Progress(msg='Computing template', verbose=verbose): template=template(movr,axis=0) elif not isinstance(template, np.ndarray): raise Exception('Template parameter should be an array or function') template_uncropped = template.astype(np.float32) template=template_uncropped[ms_h:h_i-ms_h,ms_w:w_i-ms_w] h,w = template.shape vals = np.zeros([n_frames,3]) if verbose: print('Computing shifts:'); sys.stdout.flush() pbar = ProgressBar(maxval=n_frames).start() for i,frame in enumerate(mov): if verbose: pbar.update(i) if cv2 is not None: res = cv2.matchTemplate(frame, template, template_matching_method) avg_metric = np.mean(res) top_left = cv2.minMaxLoc(res)[3] elif cv2 is None: res = match_template(frame, template) avg_metric = np.mean(res) top_left = np.unravel_index(np.argmax(res), res.shape) ## from hereon in, x and y are reversed in naming convention sh_y,sh_x = top_left if (0 < top_left[1] < 2 * ms_h-1) & (0 < top_left[0] < 2 * ms_w-1): # if max is internal, check for subpixel shift using gaussian peak registration log_xm1_y = np.log(res[sh_x-1,sh_y]) log_xp1_y = np.log(res[sh_x+1,sh_y]) log_x_ym1 = np.log(res[sh_x,sh_y-1]) log_x_yp1 = np.log(res[sh_x,sh_y+1]) four_log_xy = 4*np.log(res[sh_x,sh_y]) sh_x_n = -(sh_x - ms_h + (log_xm1_y - log_xp1_y) / (2 * log_xm1_y - four_log_xy + 2 * log_xp1_y)) sh_y_n = -(sh_y - ms_w + (log_x_ym1 - log_x_yp1) / (2 * log_x_ym1 - four_log_xy + 2 * log_x_yp1)) else: sh_x_n = -(sh_x - ms_h) sh_y_n = -(sh_y - ms_w) # NOTE: to correct for reversal in naming convention, vals are placed y, x -- but their meaning is x,y vals[i,:] = [sh_y_n, sh_x_n, avg_metric] # X , Y if verbose: pbar.finish() return template_uncropped, vals
bensondaled/pyfluo
pyfluo/motion.py
Python
bsd-2-clause
9,112
[ "Gaussian" ]
fbf157ee2aafac3840ec42bf3f29e224e9ab3514cf280f3c820d5b84d19f88ab
from __future__ import division import numpy as np, numpy.random as nr, numpy.linalg as nlg class GaussianRandomFeatures: """ Class to store Gaussian Random Features. """ def __init__(self, dim, rn, gammak=1.0, sine=False): """ Initialize with dim of input space, dim of random feature space and bandwidth of the RBF kernel. """ self.dim = dim self.rn = rn self.gammak = gammak self.sine = sine self.generateCoefficients() def generateCoefficients (self): """ Generate coefficients for GFF. """ self.ws = [] if not self.sine: self.bs = [] mean = np.zeros(self.dim) cov = np.eye(self.dim)*(2*self.gammak) if self.sine: for _ in range(self.rn): self.ws.append(nr.multivariate_normal(mean, cov)) else: for _ in range(self.rn): self.ws.append(nr.multivariate_normal(mean, cov)) self.bs.append(nr.uniform(0.0, 2*np.pi)) def computeRandomFeatures (self, f): """ Projects onto fourier feature space. """ f = np.array(f) #f = np.atleast_2d(f) ws = np.array(self.ws) if self.sine: rf_cos = (np.cos(ws.dot(f))*np.sqrt(1/self.rn)).tolist() rf_sin = (np.sin(ws.dot(f))*np.sqrt(1/self.rn)).tolist() return np.array(rf_cos + rf_sin) else: bs = np.array(self.bs) rf = np.cos(ws.dot(f) + bs[:,None])*np.sqrt(2/self.rn) return rf def RBFKernel(self, f1, f2, gammak=None): """ Computes RBF Kernel. """ if gammak is None: gammak = self.gammak f1 = np.array(f1) f2 = np.array(f2) return np.exp(-gammak*(nlg.norm(f1 - f2)**2)) def LinearRandomKernel(self, f1, f2): """ Computes Linear Kernel after projecting onto fourier space. """ rf1 = self.computeRandomFeatures(f1) rf2 = self.computeRandomFeatures(f2) return np.squeeze(rf1).dot(np.squeeze(rf2)) class RandomFeaturesConverter: def __init__(self, dim, rn, gammak, sine=False, feature_generator=None): """ dim --> dimension of input space rn --> number of random features gammak --> bandwidth of rbf kernel sine --> use sin in the random fourier features """ self.dim = dim self.rn = rn self.gammak = gammak if feature_generator is None: self.feature_generator = GaussianRandomFeatures(self.dim, self.rn, self.gammak, sine=sine) else: self.feature_generator = feature_generator def getFeatureGenerator(self): """ Get stored feature generator. """ return self.feature_generator def getData (self, fs): """ Gets the projected features. """ assert len(fs[0]) == self.dim rfs = [] for f in fs: rfs.append(self.feature_generator.computeRandomFeatures(f)) return rfs
AutonlabCMU/ActiveSearch
python/gaussianRandomFeatures.py
Python
mit
2,600
[ "Gaussian" ]
b2ec99e94abe461078eed1eeafa39b477c25c9d6d77c4844b730242f4cbf2daa
#!/usr/bin/env python import io import netCDF4 import numpy import m6plot import m6toolbox import matplotlib.pyplot as plt import os import sys import warnings def run(): try: import argparse except: raise Exception('This version of python is not new enough. python 2.7 or newer is required.') parser = argparse.ArgumentParser(description='''Script for plotting plotting poleward heat transport.''') parser.add_argument('infile', type=str, help='''Annually-averaged file containing 3D 'T_ady_2d' and 'T_diffy_2d'.''') parser.add_argument('-l','--label', type=str, default='', help='''Label to add to the plot.''') parser.add_argument('-s','--suptitle', type=str, default='', help='''Super-title for experiment. Default is to read from netCDF file.''') parser.add_argument('-o','--outdir', type=str, default='.', help='''Directory in which to place plots.''') parser.add_argument('-g','--gridspec', type=str, required=True, help='''Directory or tarfile containing mosaic/grid-spec files (ocean_hgrid.nc and ocean_mask.nc).''') cmdLineArgs = parser.parse_args() main(cmdLineArgs) def main(cmdLineArgs,stream=False): if not os.path.exists(cmdLineArgs.gridspec): raise ValueError('Specified gridspec directory/tar file does not exist.') if os.path.isdir(cmdLineArgs.gridspec): x = netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_hgrid.nc').variables['x'][::2,::2] xcenter = netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_hgrid.nc').variables['x'][1::2,1::2] y = netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_hgrid.nc').variables['y'][::2,::2] ycenter = netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_hgrid.nc').variables['y'][1::2,1::2] msk = netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_mask.nc').variables['mask'][:] area = msk*netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_hgrid.nc').variables['area'][:,:].reshape([msk.shape[0], 2, msk.shape[1], 2]).sum(axis=-3).sum(axis=-1) depth = netCDF4.Dataset(cmdLineArgs.gridspec+'/ocean_topog.nc').variables['depth'][:] try: basin_code = netCDF4.Dataset(cmdLineArgs.gridspec+'/basin_codes.nc').variables['basin'][:] except: basin_code = m6toolbox.genBasinMasks(xcenter, ycenter, depth) elif os.path.isfile(cmdLineArgs.gridspec): x = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_hgrid.nc','x')[::2,::2] xcenter = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_hgrid.nc','x')[1::2,1::2] y = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_hgrid.nc','y')[::2,::2] ycenter = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_hgrid.nc','y')[1::2,1::2] msk = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_mask.nc','mask')[:] area = msk*m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_hgrid.nc','area')[:,:].reshape([msk.shape[0], 2, msk.shape[1], 2]).sum(axis=-3).sum(axis=-1) depth = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'ocean_topog.nc','depth')[:] try: basin_code = m6toolbox.readNCFromTar(cmdLineArgs.gridspec,'basin_codes.nc','basin')[:] except: basin_code = m6toolbox.genBasinMasks(xcenter, ycenter, depth) else: raise ValueError('Unable to extract grid information from gridspec directory/tar file.') rootGroup = netCDF4.MFDataset( cmdLineArgs.infile ) if 'T_ady_2d' in rootGroup.variables: varName = 'T_ady_2d' if len(rootGroup.variables[varName].shape)==3: advective = rootGroup.variables[varName][:].mean(axis=0).filled(0.) else: advective = rootGroup.variables[varName][:].filled(0.) else: raise Exception('Could not find "T_ady_2d" in file "%s"'%(cmdLineArgs.infile)) if 'T_diffy_2d' in rootGroup.variables: varName = 'T_diffy_2d' if len(rootGroup.variables[varName].shape)==3: diffusive = rootGroup.variables[varName][:].mean(axis=0).filled(0.) else: diffusive = rootGroup.variables[varName][:].filled(0.) else: diffusive = None warnings.warn('Diffusive temperature term not found. This will result in an underestimation of the heat transport.') def heatTrans(advective, diffusive=None, vmask=None): """Converts vertically integrated temperature advection into heat transport""" rho0 = 1.035e3; Cp = 3989. if diffusive != None: HT = advective + diffusive else: HT = advective HT = HT * (rho0 * Cp); HT = HT * 1.e-15 # convert to PW if vmask != None: HT = HT*vmask HT = HT.sum(axis=-1); HT = HT.squeeze() # sum in x-direction return HT def plotHeatTrans(y, HT, title, xlim=(-80,90)): plt.plot(y, y*0., 'k', linewidth=0.5) plt.plot(y, HT, 'r', linewidth=1.5,label='Model') plt.xlim(xlim); plt.ylim(-2.5,3.0) plt.title(title) plt.grid(True) def annotatePlot(label): fig = plt.gcf() #fig.text(0.1,0.85,label) fig.text(0.535,0.12,label) def annotateObs(): fig = plt.gcf() fig.text(0.1,0.85,r"Trenberth, K. E. and J. M. Caron, 2001: Estimates of Meridional Atmosphere and Ocean Heat Transports. J.Climate, 14, 3433-3443.", fontsize=8) fig.text(0.1,0.825,r"Ganachaud, A. and C. Wunsch, 2000: Improved estimates of global ocean circulation, heat transport and mixing from hydrographic data.", fontsize=8) fig.text(0.13,0.8,r"Nature, 408, 453-457", fontsize=8) m6plot.setFigureSize(npanels=1) # Load Observations fObs = netCDF4.Dataset('/archive/John.Krasting/obs/TC2001/Trenberth_and_Caron_Heat_Transport.nc') #Trenberth and Caron yobs = fObs.variables['ylat'][:] NCEP = {}; NCEP['Global'] = fObs.variables['OTn'] NCEP['Atlantic'] = fObs.variables['ATLn'][:]; NCEP['IndoPac'] = fObs.variables['INDPACn'][:] ECMWF = {}; ECMWF['Global'] = fObs.variables['OTe'][:] ECMWF['Atlantic'] = fObs.variables['ATLe'][:]; ECMWF['IndoPac'] = fObs.variables['INDPACe'][:] #G and W Global = {} Global['lat'] = numpy.array([-30., -19., 24., 47.]) Global['trans'] = numpy.array([-0.6, -0.8, 1.8, 0.6]) Global['err'] = numpy.array([0.3, 0.6, 0.3, 0.1]) Atlantic = {} Atlantic['lat'] = numpy.array([-45., -30., -19., -11., -4.5, 7.5, 24., 47.]) Atlantic['trans'] = numpy.array([0.66, 0.35, 0.77, 0.9, 1., 1.26, 1.27, 0.6]) Atlantic['err'] = numpy.array([0.12, 0.15, 0.2, 0.4, 0.55, 0.31, 0.15, 0.09]) IndoPac = {} IndoPac['lat'] = numpy.array([-30., -18., 24., 47.]) IndoPac['trans'] = numpy.array([-0.9, -1.6, 0.52, 0.]) IndoPac['err'] = numpy.array([0.3, 0.6, 0.2, 0.05,]) GandW = {} GandW['Global'] = Global GandW['Atlantic'] = Atlantic GandW['IndoPac'] = IndoPac def plotGandW(lat,trans,err): low = trans - err high = trans + err for n in range(0,len(low)): if n == 0: plt.plot([lat[n],lat[n]], [low[n],high[n]], 'c', linewidth=2.0, label='G&W') else: plt.plot([lat[n],lat[n]], [low[n],high[n]], 'c', linewidth=2.0) plt.scatter(lat,trans,marker='s',facecolor='cyan') if cmdLineArgs.suptitle != '': suptitle = cmdLineArgs.suptitle + ' ' + cmdLineArgs.label else: suptitle = rootGroup.title + ' ' + cmdLineArgs.label imgbufs = [] # Global Heat Transport HTplot = heatTrans(advective,diffusive) yy = y[1:,:].max(axis=-1) plotHeatTrans(yy,HTplot,title='Global Y-Direction Heat Transport [PW]') plt.plot(yobs,NCEP['Global'],'k--',linewidth=0.5,label='NCEP') plt.plot(yobs,ECMWF['Global'],'k.',linewidth=0.5,label='ECMWF') plotGandW(GandW['Global']['lat'],GandW['Global']['trans'],GandW['Global']['err']) plt.xlabel(r'Latitude [$\degree$N]') plt.suptitle(suptitle) plt.legend(loc=0,fontsize=10) annotateObs() if diffusive is None: annotatePlot('Warning: Diffusive component of transport is missing.') if stream is True: objOut = io.BytesIO() else: objOut = cmdLineArgs.outdir+'/HeatTransport_global.png' plt.savefig(objOut) if stream is True: imgbufs.append(objOut) # Atlantic Heat Transport plt.clf() m = 0*basin_code; m[(basin_code==2) | (basin_code==4) | (basin_code==6) | (basin_code==7) | (basin_code==8)] = 1 HTplot = heatTrans(advective, diffusive, vmask=m*numpy.roll(m,-1,axis=-2)) yy = y[1:,:].max(axis=-1) HTplot[yy<-34] = numpy.nan plotHeatTrans(yy,HTplot,title='Atlantic Y-Direction Heat Transport [PW]') plt.plot(yobs,NCEP['Atlantic'],'k--',linewidth=0.5,label='NCEP') plt.plot(yobs,ECMWF['Atlantic'],'k.',linewidth=0.5,label='ECMWF') plotGandW(GandW['Atlantic']['lat'],GandW['Atlantic']['trans'],GandW['Atlantic']['err']) plt.xlabel(r'Latitude [$\degree$N]') plt.suptitle(suptitle) plt.legend(loc=0,fontsize=10) annotateObs() if diffusive is None: annotatePlot('Warning: Diffusive component of transport is missing.') if stream is True: objOut = io.BytesIO() else: objOut = cmdLineArgs.outdir+'/HeatTransport_Atlantic.png' plt.savefig(objOut) if stream is True: imgbufs.append(objOut) # Indo-Pacific Heat Transport plt.clf() m = 0*basin_code; m[(basin_code==3) | (basin_code==5)] = 1 HTplot = heatTrans(advective, diffusive, vmask=m*numpy.roll(m,-1,axis=-2)) yy = y[1:,:].max(axis=-1) HTplot[yy<-34] = numpy.nan plotHeatTrans(yy,HTplot,title='Indo-Pacific Y-Direction Heat Transport [PW]') plt.plot(yobs,NCEP['IndoPac'],'k--',linewidth=0.5,label='NCEP') plt.plot(yobs,ECMWF['IndoPac'],'k.',linewidth=0.5,label='ECMWF') plotGandW(GandW['IndoPac']['lat'],GandW['IndoPac']['trans'],GandW['IndoPac']['err']) plt.xlabel(r'Latitude [$\degree$N]') annotateObs() if diffusive is None: annotatePlot('Warning: Diffusive component of transport is missing.') plt.suptitle(suptitle) plt.legend(loc=0,fontsize=10) if stream is True: objOut = io.BytesIO() else: objOut = cmdLineArgs.outdir+'/HeatTransport_IndoPac.png' plt.savefig(objOut) if stream is True: imgbufs.append(objOut) if stream is True: return imgbufs if __name__ == '__main__': run()
nicjhan/MOM6-examples
tools/analysis/poleward_heat_transport.py
Python
gpl-3.0
9,681
[ "NetCDF" ]
45c92f691c07d47e4849706c02966d53b0e3c4d098a9f34aff9c99971b5e988b
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'DataTable' db.create_table('neuroelectro_datatable', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('link', self.gf('django.db.models.fields.CharField')(max_length=1000)), ('table_html', self.gf('picklefield.fields.PickledObjectField')(null=True)), ('table_text', self.gf('django.db.models.fields.CharField')(max_length=10000, null=True)), ('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['neuroelectro.Article'])), )) db.send_create_signal('neuroelectro', ['DataTable']) def backwards(self, orm): # Deleting model 'DataTable' db.delete_table('neuroelectro_datatable') models = { 'neuroelectro.article': { 'Meta': {'object_name': 'Article'}, 'abstract': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}), 'full_text_link': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Journal']", 'null': 'True'}), 'pmid': ('django.db.models.fields.IntegerField', [], {}), 'substances': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.Substance']", 'null': 'True', 'symmetrical': 'False'}), 'terms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.MeshTerm']", 'null': 'True', 'symmetrical': 'False'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}) }, 'neuroelectro.articlefulltext': { 'Meta': {'object_name': 'ArticleFullText'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}), 'full_text': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'neuroelectro.brainregion': { 'Meta': {'object_name': 'BrainRegion'}, 'abbrev': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'allenid': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}), 'color': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'isallen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'treedepth': ('django.db.models.fields.IntegerField', [], {'null': 'True'}) }, 'neuroelectro.datatable': { 'Meta': {'object_name': 'DataTable'}, 'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['neuroelectro.Article']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'link': ('django.db.models.fields.CharField', [], {'max_length': '1000'}), 'table_html': ('picklefield.fields.PickledObjectField', [], {'null': 'True'}), 'table_text': ('django.db.models.fields.CharField', [], {'max_length': '10000', 'null': 'True'}) }, 'neuroelectro.insituexpt': { 'Meta': {'object_name': 'InSituExpt'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'imageseriesid': ('django.db.models.fields.IntegerField', [], {}), 'plane': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'regionexprs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.RegionExpr']", 'null': 'True', 'symmetrical': 'False'}), 'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'neuroelectro.journal': { 'Meta': {'object_name': 'Journal'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '300'}) }, 'neuroelectro.meshterm': { 'Meta': {'object_name': 'MeshTerm'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'max_length': '300'}) }, 'neuroelectro.neuron': { 'Meta': {'object_name': 'Neuron'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '500'}), 'nlex_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}), 'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.BrainRegion']", 'null': 'True', 'symmetrical': 'False'}), 'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.NeuronSyn']", 'null': 'True', 'symmetrical': 'False'}) }, 'neuroelectro.neuronsyn': { 'Meta': {'object_name': 'NeuronSyn'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'max_length': '500'}) }, 'neuroelectro.protein': { 'Meta': {'object_name': 'Protein'}, 'allenid': ('django.db.models.fields.IntegerField', [], {}), 'common_name': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True'}), 'entrezid': ('django.db.models.fields.IntegerField', [], {}), 'gene': ('django.db.models.fields.CharField', [], {'max_length': '20'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'in_situ_expts': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.InSituExpt']", 'null': 'True', 'symmetrical': 'False'}), 'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}), 'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'}) }, 'neuroelectro.proteinsyn': { 'Meta': {'object_name': 'ProteinSyn'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'max_length': '500'}) }, 'neuroelectro.regionexpr': { 'Meta': {'object_name': 'RegionExpr'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'region': ('django.db.models.fields.related.ForeignKey', [], {'default': '0', 'to': "orm['neuroelectro.BrainRegion']"}), 'val': ('django.db.models.fields.FloatField', [], {}) }, 'neuroelectro.species': { 'Meta': {'object_name': 'Species'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'specie': ('django.db.models.fields.CharField', [], {'max_length': '500'}) }, 'neuroelectro.substance': { 'Meta': {'object_name': 'Substance'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'term': ('django.db.models.fields.CharField', [], {'max_length': '300'}) }, 'neuroelectro.superprotein': { 'Meta': {'object_name': 'SuperProtein'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_channel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '400'}), 'synonyms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['neuroelectro.ProteinSyn']", 'null': 'True', 'symmetrical': 'False'}) } } complete_apps = ['neuroelectro']
lessc0de/neuroelectro_org
neuroelectro/south_migrations/0011_auto__add_datatable.py
Python
gpl-2.0
8,732
[ "NEURON" ]
7d477cbe6dcbf5a803a3665960969607a7243ef3354b3e11d83f5f68711d413c
# # This file is part of the CCP1 Graphical User Interface (ccp1gui) # # (C) 2002-2005 CCLRC Daresbury Laboratory # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. # """Storage and Management of Gaussian basis sets Still really a prototype implementation We have one python module per library basis not classes right now, just functions .. this way allows different basis sets to be stored and implemented differently for each basis set module must provide three functions valid_elements() get_basis(element) get_ecp(element) no modules for code internal library basis sets (these are handled by special cases .. search for keyword currently there is a dummy module?? probably a mistake Further developments: types of expansions (currently we only have storage of segmented basis sets ... need ANOs etc) The storage of ECPs and basis sets is rather inconsistent exponents first for basis sets, last for ECPs """ import basis.basismanager import basis.sto3g import basis.lanl2dz import string class AtomBasis: """Hold a single basis set A basis here means a list of shells associated with a z value, a basis set label, and descriptive text """ def __init__(self,z=0,name='UNK',text="Unassigned Basis"): self.z=z self.text=text self.name=name self.shells=[] self.label='unassigned' def load_from_list(self,data): """load basis from a nested python list structure, e.g. [ ['S', [ 2.23100000, -0.49005890 ], [ 0.47200000, 1.25426840 ] ], ['S', [ 0.16310000, 1.00000000 ] ], ['P', [ 6.29600000, -0.06356410 ], [ 0.63330000, 1.01413550 ] ], ['P', [ 0.18190000, 1.00000000 ] ] ] """ for shell in data: s = BasisShell() self.shells.append(s) s.type = shell[0] for contr in shell[1:]: if len(contr) == 2: s.expansion.append((float(contr[0]),float(contr[1]))) else: s.expansion.append((float(contr[0]),float(contr[1]),float(contr[2]))) def load_from_file(self,file): """pull in a single atomic basis from a file file format: S 71.616837 0.15432897 13.045096 0.53532814 3.5305122 0.44463454 L 2.9412494 -0.09996723 0.15591627 0.6834831 0.39951283 0.60768372 0.2222899 0.70011547 0.39195739 """ f = open(file) while 1: line = f.readline() if not line: break words = string.split(line) txt = string.upper(words[0]) if words[0] == 'S' or words[0] == 'P' or words[0] == 'L' \ or words[0] == 'D' or words[0] == 'F' or words[0] == 'G': s = BasisShell() s.type = words[0] self.shells.append(s) else: n = len(words) if n == 2: s.expansion.append((float(words[0]),float(words[1]))) else: s.expansion.append((float(words[0]),float(words[1]),float(words[2]))) f.close() def __str__(self): return self.label + ' Basis ' + self.name + ' for z= ' + str(self.z) + ', '+ str(len(self.shells)) + ' shells' def __repr__(self): return self.label + ' Basis ' + self.name + ' for z= ' + str(self.z) + ', '+ str(len(self.shells)) + ' shells' def list(self): """human readable output of the basis set""" print self.name, 'Z=',self.z for shell in self.shells: shell.list() class KeywordAtomBasis(AtomBasis): """A Keyword basis holds metadata relating to a basis set which is to be taken from a code's internal library """ def __str__(self): return self.label + ' Basis ' + self.name + ' for z= ' + str(self.z) + ', (Internal)' def __repr__(self): return self.__str__() def list(self): """human readable output of the basis set""" print self.label, self.name, 'Z=',self.z, '(Internal)' class BasisShell: """Storage for a basis shell Each shell has a type (S,L,P,....G) and an expansion The expansion is a list structure of the form [ [ exp coef ] [ exp coef ] .... ] or for L shells [ [ exp coef_s coef_p ] [ exp coef_s coef_p] .... ] """ def __init__(self): self.type='S' self.expansion=[] def __repr__(self): txt = '' for p in self.expansion: txt = txt + str(p) + '\n' return self.type + ':' + txt def __str__(self): return 'Shell type ' + self.type + ', ' + str(len(self.expansion)) + ' primitives' def list(self): print self.type for p in self.expansion: if len(p) == 2: print '%12.8f %8.4f' % (p[0],p[1]) elif len(p) == 3: print '%12.8f %8.4f %8.4f' % (p[0],p[1],p[2]) class AtomECP: """Storage of ECP data for one element""" def __init__(self,z=-1,name='UNK',ncore=-1,lmax=-1): self.z = z self.lmax = lmax self.ncore = ncore self.expansion=[] self.name=name self.shells=[] self.label='unassigned' def __str__(self): return self.label + ' ECP ' + self.name + ' for z= ' + str(self.z) + ', '+ str(len(self.shells)) + ' terms' def __repr__(self): return self.label + ' ECP ' + self.name + ' for z= ' + str(self.z) + ', '+ str(len(self.shells)) + ' terms' def load_from_list(self,data): """load ecp from a nested python list structure structure required: [ [2, 10], ! L_max, N_core [3, "f potential", ! L , Label [1 , -10.00000000, 94.81300000 ] , ! rexp , coeff , exp [2 , 66.27291700, 165.64400000 ] , [2 , -28.96859500, 30.83170000 ] , [2 , -12.86633700, 10.58410000 ] , [2 , -1.71021700, 3.77040000 ] ] , [0, "s-f potential", [0 , 3.00000000, 128.83910000 ] , [1 , 12.85285100, 120.37860000 ] , [2 , 275.67239800, 63.56220000 ] , [2 , 115.67771200, 18.06950000 ] , [2 , 35.06060900, 3.81420000 ] ] , [1, "p-f potential", [0 , 5.00000000, 216.52630000 ] , [1 , 7.47948600, 46.57230000 ] , [2 , 613.03200000, 147.46850000 ] , [2 , 280.80068500, 48.98690000 ] , [2 , 107.87882400, 13.20960000 ] , [2 , 15.34395600, 3.18310000 ] ] ] """ self.lmax = data[0][0] self.ncore = data[0][1] shells = data[1:] for shell in shells: s = EcpShell() s.type = shell[0] s.desc = shell[1] self.shells.append(s) for c in shell[2:]: s.expansion.append((float(c[0]),float(c[1]),float(c[2]))) def list(self): """human readable output of the basis set""" print self.name, 'Z=',self.z,' ncore=',self.ncore,' lmax= ',self.lmax for shell in self.shells: shell.list() class KeywordAtomECP(AtomECP): """Storage for ECPs to be taken from internal code libraries""" def __str__(self): return self.label + ' ECP ' + self.name + ' for z= ' + str(self.z) + ', (Internal)' def __repr__(self): return self.__str__() def list(self): """human readable output of the basis set""" print self.label, self.name, 'Z=',self.z, '(Internal)' class EcpShell: """Storage for a component of an ECP Each shell has a type (l) (0,1,2), a description, and an expansion The expansion is a list structure of the form [ [ rexp coef exp ] [ rexp coef exp ] .... ] rexp is the integer power of r. """ def __init__(self): self.type='0' self.desc='unk' self.expansion=[] def __repr__(self): txt = '' for p in self.expansion: txt = txt + str(p) + '\n' return self.type + ':' + txt def __str__(self): return 'ECP Shell L=' + self.type + ', ' + str(len(self.expansion)) + ' primitives' def list(self): print self.type,self.desc for p in self.expansion: print '%d %12.8f %8.4f' % (p[0],p[1],p[2])
alexei-matveev/ccp1gui
basis/__init__.py
Python
gpl-2.0
9,197
[ "Gaussian" ]
2ba115ebb63a3808d362bc1021617e37123021d34f018f85656e35782ed0d0af
from __future__ import absolute_import import os.path as op from sfepy.base.testing import TestCommon def get_volume(el, nd): from sfepy.mesh.mesh_tools import elems_q2t from sfepy.base.compat import factorial import numpy as nm from sfepy.linalg.utils import dets_fast dim = nd.shape[1] nnd = el.shape[1] etype = '%d_%d' % (dim, nnd) if etype == '2_4' or etype == '3_8': el = elems_q2t(el) nel = el.shape[0] mul = 1.0 / factorial(dim) if dim == 3: mul *= -1.0 mtx = nm.ones((nel, dim + 1, dim + 1), dtype=nm.double) mtx[:,:,:-1] = nd[el,:] vols = mul * dets_fast(mtx) vol = vols.sum() return vol class Test(TestCommon): @staticmethod def from_conf(conf, options): test = Test(conf=conf, options=options) return test def test_mesh_smoothing(self): from sfepy.mesh.mesh_tools import smooth_mesh from sfepy.discrete.fem.mesh import Mesh from sfepy import data_dir mesh = Mesh.from_file(data_dir + '/meshes/3d/cylinder.vtk') conn = mesh.get_conn('3_4') vol0 = get_volume(conn, mesh.coors) mesh.coors[:] = smooth_mesh(mesh, n_iter=10) vol1 = get_volume(conn, mesh.coors) filename = op.join(self.options.out_dir, 'smoothed_cylinder.vtk') mesh.write(filename) frac = vol1 / vol0 if (frac < 0.967) and (frac > 0.966): self.report('mesh smoothed') return True else: self.report('mesh smoothed, volume mismatch!') return False
rc/sfepy
tests/test_mesh_smoothing.py
Python
bsd-3-clause
1,592
[ "VTK" ]
a8829128326859187d35014cda82920969b0e3ebe69beadbcd6ff9939fdaa234
import difflib from test.support import run_unittest, findfile import unittest import doctest import sys class TestWithAscii(unittest.TestCase): def test_one_insert(self): sm = difflib.SequenceMatcher(None, 'b' * 100, 'a' + 'b' * 100) self.assertAlmostEqual(sm.ratio(), 0.995, places=3) self.assertEqual(list(sm.get_opcodes()), [ ('insert', 0, 0, 0, 1), ('equal', 0, 100, 1, 101)]) self.assertEqual(sm.bpopular, set()) sm = difflib.SequenceMatcher(None, 'b' * 100, 'b' * 50 + 'a' + 'b' * 50) self.assertAlmostEqual(sm.ratio(), 0.995, places=3) self.assertEqual(list(sm.get_opcodes()), [ ('equal', 0, 50, 0, 50), ('insert', 50, 50, 50, 51), ('equal', 50, 100, 51, 101)]) self.assertEqual(sm.bpopular, set()) def test_one_delete(self): sm = difflib.SequenceMatcher(None, 'a' * 40 + 'c' + 'b' * 40, 'a' * 40 + 'b' * 40) self.assertAlmostEqual(sm.ratio(), 0.994, places=3) self.assertEqual(list(sm.get_opcodes()), [ ('equal', 0, 40, 0, 40), ('delete', 40, 41, 40, 40), ('equal', 41, 81, 40, 80)]) def test_bjunk(self): sm = difflib.SequenceMatcher(isjunk=lambda x: x == ' ', a='a' * 40 + 'b' * 40, b='a' * 44 + 'b' * 40) self.assertEqual(sm.bjunk, set()) sm = difflib.SequenceMatcher(isjunk=lambda x: x == ' ', a='a' * 40 + 'b' * 40, b='a' * 44 + 'b' * 40 + ' ' * 20) self.assertEqual(sm.bjunk, {' '}) sm = difflib.SequenceMatcher(isjunk=lambda x: x in [' ', 'b'], a='a' * 40 + 'b' * 40, b='a' * 44 + 'b' * 40 + ' ' * 20) self.assertEqual(sm.bjunk, {' ', 'b'}) class TestAutojunk(unittest.TestCase): """Tests for the autojunk parameter added in 2.7""" def test_one_insert_homogenous_sequence(self): # By default autojunk=True and the heuristic kicks in for a sequence # of length 200+ seq1 = 'b' * 200 seq2 = 'a' + 'b' * 200 sm = difflib.SequenceMatcher(None, seq1, seq2) self.assertAlmostEqual(sm.ratio(), 0, places=3) self.assertEqual(sm.bpopular, {'b'}) # Now turn the heuristic off sm = difflib.SequenceMatcher(None, seq1, seq2, autojunk=False) self.assertAlmostEqual(sm.ratio(), 0.9975, places=3) self.assertEqual(sm.bpopular, set()) class TestSFbugs(unittest.TestCase): def test_ratio_for_null_seqn(self): # Check clearing of SF bug 763023 s = difflib.SequenceMatcher(None, [], []) self.assertEqual(s.ratio(), 1) self.assertEqual(s.quick_ratio(), 1) self.assertEqual(s.real_quick_ratio(), 1) def test_comparing_empty_lists(self): # Check fix for bug #979794 group_gen = difflib.SequenceMatcher(None, [], []).get_grouped_opcodes() self.assertRaises(StopIteration, next, group_gen) diff_gen = difflib.unified_diff([], []) self.assertRaises(StopIteration, next, diff_gen) def test_matching_blocks_cache(self): # Issue #21635 s = difflib.SequenceMatcher(None, "abxcd", "abcd") first = s.get_matching_blocks() second = s.get_matching_blocks() self.assertEqual(second[0].size, 2) self.assertEqual(second[1].size, 2) self.assertEqual(second[2].size, 0) def test_added_tab_hint(self): # Check fix for bug #1488943 diff = list(difflib.Differ().compare(["\tI am a buggy"],["\t\tI am a bug"])) self.assertEqual("- \tI am a buggy", diff[0]) self.assertEqual("? --\n", diff[1]) self.assertEqual("+ \t\tI am a bug", diff[2]) self.assertEqual("? +\n", diff[3]) patch914575_from1 = """ 1. Beautiful is beTTer than ugly. 2. Explicit is better than implicit. 3. Simple is better than complex. 4. Complex is better than complicated. """ patch914575_to1 = """ 1. Beautiful is better than ugly. 3. Simple is better than complex. 4. Complicated is better than complex. 5. Flat is better than nested. """ patch914575_from2 = """ \t\tLine 1: preceeded by from:[tt] to:[ssss] \t\tLine 2: preceeded by from:[sstt] to:[sssst] \t \tLine 3: preceeded by from:[sstst] to:[ssssss] Line 4: \thas from:[sst] to:[sss] after : Line 5: has from:[t] to:[ss] at end\t """ patch914575_to2 = """ Line 1: preceeded by from:[tt] to:[ssss] \tLine 2: preceeded by from:[sstt] to:[sssst] Line 3: preceeded by from:[sstst] to:[ssssss] Line 4: has from:[sst] to:[sss] after : Line 5: has from:[t] to:[ss] at end """ patch914575_from3 = """line 0 1234567890123456789012345689012345 line 1 line 2 line 3 line 4 changed line 5 changed line 6 changed line 7 line 8 subtracted line 9 1234567890123456789012345689012345 short line just fits in!! just fits in two lines yup!! the end""" patch914575_to3 = """line 0 1234567890123456789012345689012345 line 1 line 2 added line 3 line 4 chanGEd line 5a chanGed line 6a changEd line 7 line 8 line 9 1234567890 another long line that needs to be wrapped just fitS in!! just fits in two lineS yup!! the end""" class TestSFpatches(unittest.TestCase): def test_html_diff(self): # Check SF patch 914575 for generating HTML differences f1a = ((patch914575_from1 + '123\n'*10)*3) t1a = (patch914575_to1 + '123\n'*10)*3 f1b = '456\n'*10 + f1a t1b = '456\n'*10 + t1a f1a = f1a.splitlines() t1a = t1a.splitlines() f1b = f1b.splitlines() t1b = t1b.splitlines() f2 = patch914575_from2.splitlines() t2 = patch914575_to2.splitlines() f3 = patch914575_from3 t3 = patch914575_to3 i = difflib.HtmlDiff() j = difflib.HtmlDiff(tabsize=2) k = difflib.HtmlDiff(wrapcolumn=14) full = i.make_file(f1a,t1a,'from','to',context=False,numlines=5) tables = '\n'.join( [ '<h2>Context (first diff within numlines=5(default))</h2>', i.make_table(f1a,t1a,'from','to',context=True), '<h2>Context (first diff after numlines=5(default))</h2>', i.make_table(f1b,t1b,'from','to',context=True), '<h2>Context (numlines=6)</h2>', i.make_table(f1a,t1a,'from','to',context=True,numlines=6), '<h2>Context (numlines=0)</h2>', i.make_table(f1a,t1a,'from','to',context=True,numlines=0), '<h2>Same Context</h2>', i.make_table(f1a,f1a,'from','to',context=True), '<h2>Same Full</h2>', i.make_table(f1a,f1a,'from','to',context=False), '<h2>Empty Context</h2>', i.make_table([],[],'from','to',context=True), '<h2>Empty Full</h2>', i.make_table([],[],'from','to',context=False), '<h2>tabsize=2</h2>', j.make_table(f2,t2), '<h2>tabsize=default</h2>', i.make_table(f2,t2), '<h2>Context (wrapcolumn=14,numlines=0)</h2>', k.make_table(f3.splitlines(),t3.splitlines(),context=True,numlines=0), '<h2>wrapcolumn=14,splitlines()</h2>', k.make_table(f3.splitlines(),t3.splitlines()), '<h2>wrapcolumn=14,splitlines(True)</h2>', k.make_table(f3.splitlines(True),t3.splitlines(True)), ]) actual = full.replace('</body>','\n%s\n</body>' % tables) # temporarily uncomment next two lines to baseline this test #with open('test_difflib_expect.html','w') as fp: # fp.write(actual) with open(findfile('test_difflib_expect.html')) as fp: self.assertEqual(actual, fp.read()) def test_recursion_limit(self): # Check if the problem described in patch #1413711 exists. limit = sys.getrecursionlimit() old = [(i%2 and "K:%d" or "V:A:%d") % i for i in range(limit*2)] new = [(i%2 and "K:%d" or "V:B:%d") % i for i in range(limit*2)] difflib.SequenceMatcher(None, old, new).get_opcodes() class TestOutputFormat(unittest.TestCase): def test_tab_delimiter(self): args = ['one', 'two', 'Original', 'Current', '2005-01-26 23:30:50', '2010-04-02 10:20:52'] ud = difflib.unified_diff(*args, lineterm='') self.assertEqual(list(ud)[0:2], [ "--- Original\t2005-01-26 23:30:50", "+++ Current\t2010-04-02 10:20:52"]) cd = difflib.context_diff(*args, lineterm='') self.assertEqual(list(cd)[0:2], [ "*** Original\t2005-01-26 23:30:50", "--- Current\t2010-04-02 10:20:52"]) def test_no_trailing_tab_on_empty_filedate(self): args = ['one', 'two', 'Original', 'Current'] ud = difflib.unified_diff(*args, lineterm='') self.assertEqual(list(ud)[0:2], ["--- Original", "+++ Current"]) cd = difflib.context_diff(*args, lineterm='') self.assertEqual(list(cd)[0:2], ["*** Original", "--- Current"]) def test_range_format_unified(self): # Per the diff spec at http://www.unix.org/single_unix_specification/ spec = '''\ Each <range> field shall be of the form: %1d", <beginning line number> if the range contains exactly one line, and: "%1d,%1d", <beginning line number>, <number of lines> otherwise. If a range is empty, its beginning line number shall be the number of the line just before the range, or 0 if the empty range starts the file. ''' fmt = difflib._format_range_unified self.assertEqual(fmt(3,3), '3,0') self.assertEqual(fmt(3,4), '4') self.assertEqual(fmt(3,5), '4,2') self.assertEqual(fmt(3,6), '4,3') self.assertEqual(fmt(0,0), '0,0') def test_range_format_context(self): # Per the diff spec at http://www.unix.org/single_unix_specification/ spec = '''\ The range of lines in file1 shall be written in the following format if the range contains two or more lines: "*** %d,%d ****\n", <beginning line number>, <ending line number> and the following format otherwise: "*** %d ****\n", <ending line number> The ending line number of an empty range shall be the number of the preceding line, or 0 if the range is at the start of the file. Next, the range of lines in file2 shall be written in the following format if the range contains two or more lines: "--- %d,%d ----\n", <beginning line number>, <ending line number> and the following format otherwise: "--- %d ----\n", <ending line number> ''' fmt = difflib._format_range_context self.assertEqual(fmt(3,3), '3') self.assertEqual(fmt(3,4), '4') self.assertEqual(fmt(3,5), '4,5') self.assertEqual(fmt(3,6), '4,6') self.assertEqual(fmt(0,0), '0') class TestJunkAPIs(unittest.TestCase): def test_is_line_junk_true(self): for line in ['#', ' ', ' #', '# ', ' # ', '']: self.assertTrue(difflib.IS_LINE_JUNK(line), repr(line)) def test_is_line_junk_false(self): for line in ['##', ' ##', '## ', 'abc ', 'abc #', 'Mr. Moose is up!']: self.assertFalse(difflib.IS_LINE_JUNK(line), repr(line)) @unittest.skip("very slow - even on CPython 3.4.4") def test_is_line_junk_REDOS(self): evil_input = ('\t' * 1000000) + '##' self.assertFalse(difflib.IS_LINE_JUNK(evil_input)) def test_is_character_junk_true(self): for char in [' ', '\t']: self.assertTrue(difflib.IS_CHARACTER_JUNK(char), repr(char)) def test_is_character_junk_false(self): for char in ['a', '#', '\n', '\f', '\r', '\v']: self.assertFalse(difflib.IS_CHARACTER_JUNK(char), repr(char)) def test_main(): difflib.HtmlDiff._default_prefix = 0 Doctests = doctest.DocTestSuite(difflib) run_unittest( TestWithAscii, TestAutojunk, TestSFpatches, TestSFbugs, TestOutputFormat, TestJunkAPIs, Doctests) if __name__ == '__main__': test_main()
IronLanguages/ironpython3
Src/StdLib/Lib/test/test_difflib.py
Python
apache-2.0
12,307
[ "MOOSE" ]
e1aaef3746027b1accc15dae5fb92d389bc306c967c0fb2a82c1aac72746cdaa
# Copyright (C) 2019-2021 The Software Heritage developers # See the AUTHORS file at the top-level directory of this distribution # License: GNU Affero General Public License version 3, or any later version # See top-level LICENSE file for more information from datetime import datetime, timedelta, timezone import json import pytest from swh.auth.django.utils import oidc_user_from_profile from swh.web.auth.utils import SWH_AMBASSADOR_PERMISSION from swh.web.common.models import SaveOriginRequest from swh.web.common.origin_save import SAVE_REQUEST_ACCEPTED, SAVE_TASK_SUCCEEDED from swh.web.common.utils import reverse from swh.web.tests.utils import check_http_get_response VISIT_TYPES = ("git", "svn", "hg") PRIVILEGED_VISIT_TYPES = tuple(list(VISIT_TYPES) + ["archives"]) def test_old_save_url_redirection(client): url = reverse("browse-origin-save") redirect_url = reverse("origin-save") resp = check_http_get_response(client, url, status_code=302) assert resp["location"] == redirect_url def test_save_types_list_default(client): """Unprivileged listing should display default list of visit types. """ url = reverse("origin-save-types-list") resp = check_http_get_response(client, url, status_code=200) actual_response = resp.json() assert set(actual_response) == set(VISIT_TYPES) @pytest.mark.django_db def test_save_types_list_privileged(client, keycloak_oidc): """Privileged listing should display all visit types. """ keycloak_oidc.realm_permissions = [SWH_AMBASSADOR_PERMISSION] client.login(code="", code_verifier="", redirect_uri="") url = reverse("origin-save-types-list") resp = check_http_get_response(client, url, status_code=200) actual_response = resp.json() assert set(actual_response) == set(PRIVILEGED_VISIT_TYPES) @pytest.mark.django_db def test_save_origin_requests_list(client, mocker): nb_origins_per_type = 10 for visit_type in VISIT_TYPES: for i in range(nb_origins_per_type): SaveOriginRequest.objects.create( request_date=datetime.now(tz=timezone.utc), visit_type=visit_type, origin_url=f"https://{visit_type}.example.org/project{i}", status=SAVE_REQUEST_ACCEPTED, visit_date=datetime.now(tz=timezone.utc) + timedelta(hours=1), loading_task_id=i, loading_task_status=SAVE_TASK_SUCCEEDED, ) mock_scheduler = mocker.patch("swh.web.common.origin_save.scheduler") mock_scheduler.get_tasks.return_value = [] mock_scheduler.get_task_runs.return_value = [] # retrieve all save requests in 3 pages, sorted in descending order # of request creation for i, visit_type in enumerate(reversed(VISIT_TYPES)): url = reverse( "origin-save-requests-list", url_args={"status": "all"}, query_params={ "draw": i + 1, "search[value]": "", "order[0][column]": "0", "columns[0][name]": "request_date", "order[0][dir]": "desc", "length": nb_origins_per_type, "start": i * nb_origins_per_type, }, ) resp = check_http_get_response( client, url, status_code=200, content_type="application/json" ) sors = json.loads(resp.content.decode("utf-8")) assert sors["draw"] == i + 1 assert sors["recordsFiltered"] == len(VISIT_TYPES) * nb_origins_per_type assert sors["recordsTotal"] == len(VISIT_TYPES) * nb_origins_per_type assert len(sors["data"]) == nb_origins_per_type assert all(d["visit_type"] == visit_type for d in sors["data"]) # retrieve save requests filtered by visit type in a single page for i, visit_type in enumerate(reversed(VISIT_TYPES)): url = reverse( "origin-save-requests-list", url_args={"status": "all"}, query_params={ "draw": i + 1, "search[value]": visit_type, "order[0][column]": "0", "columns[0][name]": "request_date", "order[0][dir]": "desc", "length": nb_origins_per_type, "start": 0, }, ) resp = check_http_get_response( client, url, status_code=200, content_type="application/json" ) sors = json.loads(resp.content.decode("utf-8")) assert sors["draw"] == i + 1 assert sors["recordsFiltered"] == nb_origins_per_type assert sors["recordsTotal"] == len(VISIT_TYPES) * nb_origins_per_type assert len(sors["data"]) == nb_origins_per_type assert all(d["visit_type"] == visit_type for d in sors["data"]) @pytest.mark.django_db def test_save_origin_requests_list_user_filter(client, mocker, keycloak_oidc): # anonymous user created a save request sor = SaveOriginRequest.objects.create( request_date=datetime.now(tz=timezone.utc), visit_type="svn", origin_url="https://svn.example.org/user/project", status=SAVE_REQUEST_ACCEPTED, visit_date=datetime.now(tz=timezone.utc) + timedelta(hours=1), loading_task_id=1, loading_task_status=SAVE_TASK_SUCCEEDED, ) # authenticated user created a save request user = oidc_user_from_profile(keycloak_oidc, keycloak_oidc.login()) client.login(code="", code_verifier="", redirect_uri="") sor = SaveOriginRequest.objects.create( request_date=datetime.now(tz=timezone.utc), visit_type="git", origin_url="https://git.example.org/user/project", status=SAVE_REQUEST_ACCEPTED, visit_date=datetime.now(tz=timezone.utc) + timedelta(hours=1), loading_task_id=2, loading_task_status=SAVE_TASK_SUCCEEDED, user_ids=f'"{user.id}"', ) # filter save requests according to user id url = reverse( "origin-save-requests-list", url_args={"status": "all"}, query_params={ "draw": 1, "search[value]": "", "order[0][column]": "0", "columns[0][name]": "request_date", "order[0][dir]": "desc", "length": 10, "start": "0", "user_requests_only": "1", }, ) resp = check_http_get_response( client, url, status_code=200, content_type="application/json" ) sors = json.loads(resp.content.decode("utf-8")) assert sors["recordsFiltered"] == 1 assert sors["recordsTotal"] == 2 assert sors["data"][0] == sor.to_dict()
SoftwareHeritage/swh-web-ui
swh/web/tests/misc/test_origin_save.py
Python
agpl-3.0
6,658
[ "VisIt" ]
ab523a67aafe02df2af84d3ca7c7a0500456af0ec0af52843e221efc7152b936
# coding: utf-8 # # Copyright 2012 NAMD-EMAP-FGV # # This file is part of PyPLN. You can get more information at: http://pypln.org/. # # PyPLN is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # PyPLN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with PyPLN. If not, see <http://www.gnu.org/licenses/>. import nltk from pypln.backend.workers.bigrams import Bigrams from utils import TaskTest bigram_measures = nltk.collocations.BigramAssocMeasures() class TestBigramWorker(TaskTest): def test_bigrams_should_return_correct_score(self): # We need this list comprehension because we need to save the word list # in MongoDict (thus, it needs to be pickleable). Also, a list is what # will be available to the worker in real situations. tokens = [w for w in nltk.corpus.genesis.words('english-web.txt')] self.document['tokens'] = tokens bigram_finder = nltk.collocations.BigramCollocationFinder.from_words(tokens) expected = bigram_finder.score_ngram(bigram_measures.chi_sq, u',', u'which') Bigrams().delay(self.fake_id) bigram_rank = self.document['bigram_rank'] result = bigram_rank[0][1][0] self.assertEqual(result, expected)
fccoelho/pypln.backend
tests/test_worker_bigrams.py
Python
gpl-3.0
1,685
[ "NAMD" ]
e3d04baad67cb93c16d511c53b0afafdef82a8152a5f56ec7146b063f511d094
"""An example of the colorbar display on the scatter plot.""" import ternary import matplotlib.pyplot as plt def _en_to_enth(energy, concs, A, B, C): """Converts an energy to an enthalpy. Converts energy to enthalpy using the following formula: Enthalpy = energy - (energy contribution from A) - (energy contribution from B) - (energy contribution from C) An absolute value is taken afterward for convenience. Parameters ---------- energy : float The energy of the structure concs : list of floats The concentrations of each element A : float The energy of pure A B : float The energy of pure B C : float The energy of pure C Returns ------- enth : float The enthalpy of formation. """ enth = abs(energy - concs[0]*A - concs[1] * B - concs[2] * C) return enth def _energy_to_enthalpy(energy): """Converts energy to enthalpy. This function take the energies stored in the energy array and converts them to formation enthalpy. Parameters --------- energy : list of lists of floats Returns ------- enthalpy : list of lists containing the enthalpies. """ pureA = [energy[0][0], energy[0][1]] pureB = [energy[1][0], energy[1][1]] pureC = [energy[2][0], energy[2][1]] enthalpy = [] for en in energy: c = en[2] conc = [float(i) / sum(c) for i in c] CE = _en_to_enth(en[0], conc, pureA[0], pureB[0], pureC[0]) VASP = _en_to_enth(en[1], conc, pureA[1], pureB[1], pureC[1]) enthalpy.append([CE, VASP, c]) return enthalpy def _find_error(vals): """Find the errors in the energy values. This function finds the errors in the enthalpys. Parameters ---------- vals : list of lists of floats Returns ------- err_vals : list of lists containing the errors. """ err_vals = [] for en in vals: c = en[2] conc = [float(i) / sum(c) for i in c] err = abs(en[0] - en[1]) err_vals.append([conc, err]) return err_vals def _read_data(fname): """Reads data from file. Reads the data in 'fname' into a list where each list entry contains [energy predicted, energy calculated, list of concentrations]. Parameters ---------- fname : str The name and path to the data file. Returns ------- energy : list of lists of floats A list of the energies and the concentrations. """ energy = [] with open(fname,'r') as f: for line in f: CE = abs(float(line.strip().split()[0])) VASP = abs(float(line.strip().split()[1])) conc = [i for i in line.strip().split()[2:]] conc_f = [] for c in conc: if '[' in c and ']' in c: conc_f.append(int(c[1:-1])) elif '[' in c: conc_f.append(int(c[1:-1])) elif ']' in c or ',' in c: conc_f.append(int(c[:-1])) else: conc_f.append(int(c)) energy.append([CE, VASP, conc_f]) return energy def conc_err_plot(fname): """Plots the error in the CE data. This plots the error in the CE predictions within a ternary concentration diagram. Parameters ---------- fname : string containing the input file name. """ energies = _read_data(fname) enthalpy = _energy_to_enthalpy(energies) this_errors = _find_error(enthalpy) points = [] colors = [] for er in this_errors: concs = er[0] points.append((concs[0] * 100, concs[1] * 100, concs[2] * 100)) colors.append(er[1]) scale = 100 figure, tax = ternary.figure(scale=scale) tax.boundary(linewidth=1.0) tax.set_title("Errors in Convex Hull Predictions.", fontsize=20) tax.gridlines(multiple=10, color="blue") tax.scatter(points, vmax=max(colors), colormap=plt.cm.viridis, colorbar=True, c=colors, cmap=plt.cm.viridis) tax.show() if __name__ == "__main__": conc_err_plot('sample_data/scatter_colorbar.txt')
marcharper/python-ternary
examples/scatter_colorbar.py
Python
mit
4,211
[ "VASP" ]
d3fb4e17b114ebb5c18accee4f979407c421eb72b97ea798a1fefdc9770b8d0f
import argparse import sys from ya_courier_helpers.util import orders_batch_upload DELIMITER = '\t' LINE_FORMAT = '<order_number>{}<service_duration_in_seconds>'.format(DELIMITER) def parse_args(): parser = argparse.ArgumentParser(usage=usage()) parser.add_argument('--company-id', required=True, type=int, help='Your company ID in Ya.Courier') parser.add_argument('--token', required=True, help='Your Oauth token in Ya.Courier') return parser.parse_args() def usage(): return '\n\tcat input.txt | ya-courier-service-duration-uploader \\\n' + \ '\t--token <YA.COURIER TOKEN> --company-id <YOUR COMPANY ID>\n\n' + \ 'This tool gets from stdin a list of orders with service duration in secs and uploads it to Ya.Courier.\n\n' + \ 'Input file format for each line:\n' + \ '{}\n\n'.format(LINE_FORMAT) + \ 'For Ya.Courier API documentation visit https://courier.yandex.ru/api/v1/howto\n\n' def get_request_data(stream): number_duration_pairs = [line.split(DELIMITER) for line in stream] request_data = [] for i, pair in enumerate(number_duration_pairs): if len(pair) == 2: request_data.append({'number': pair[0], 'service_duration_s': pair[1]}) else: print('Line {} has incorrect format and is skipped. Format: {}'.format(i, LINE_FORMAT)) return request_data def main(): args = parse_args() request_data = get_request_data(sys.stdin) if request_data: response = orders_batch_upload(args.company_id, args.token, request_data) if response.status_code == 200: print('Data uploaded successfully:\n\tOrders updated: {updated}'.format(**response.json())) else: print('Nothing was changed. Error uploading data:') response_text = response.text print(response_text) if 'psycopg2.IntegrityError' in response_text: print('Most probably some orders were not created in Ya.Courier') else: print('Empty input data. No data was uploaded.') if __name__ == '__main__': main()
roschupkin/ya.courier.helpers
ya_courier_helpers/order_service_duration_uploader.py
Python
apache-2.0
2,118
[ "VisIt" ]
05092fa8268c45e9953a7902527f4e341da4a1174dc7d140250f981376585d65
#!/usr/bin/env python3 from distutils.core import setup setup( name='ProteinFeatureAnalyzer', version='0.0.0', author='Xingjie Pan', author_email='xingjiepan@gmail.com', url='https://github.com/Kortemme-Lab/protein_feature_analysis', packages=[ 'ProteinFeatureAnalyzer', ], install_requires=[ 'numpy', 'scipy', 'matplotlib', 'biopython', 'networkx', 'docopt', 'pandas', 'flufl.lock', 'sklearn', 'pytest', 'cylinder_fitting', ], entry_points={ 'console_scripts': [ ], }, description='ProteinFeatureAnalyzer extracts, analyzes and visualizes features from protein structures.', long_description=open('README.rst').read(), classifiers=[ 'Programming Language :: Python :: 3', 'Intended Audience :: Science/Research', ], )
Kortemme-Lab/protein_feature_analysis
setup.py
Python
mit
909
[ "Biopython" ]
f48f68050bba7df3b039d22891453165a8e1abead0948d66e4b2f68ff6e11912
#!/usr/bin/env python # -*- coding: utf-8 -*- import rdkit from rdkit.Chem import AllChem from rdkit import DataStructs __license__ = "X11" METADATA = { "id": "method_rdkit_ecfp2_1024_tanimoto", "representation": "ecfp2_1024", "similarity": "tanimoto" } def _compute_fingerprint(molecule): return AllChem.GetMorganFingerprintAsBitVect(molecule, 1, nBits=1024) def _compute_similarity(left, right): return DataStructs.TanimotoSimilarity(left, right) def create_model(train_ligands, train_decoys): model = [] for molecule in train_ligands: model.append({ "name": molecule.GetProp("_Name"), "fingerprint": _compute_fingerprint(molecule) }) model_information = {} return model, model_information def compute_score(model, molecule): fingerprint = _compute_fingerprint(molecule) similarities = [_compute_similarity(fingerprint, item["fingerprint"]) for item in model] max_score = max(similarities) index_of_max_score = similarities.index(max_score) closest_molecule = model[index_of_max_score] return { "value": max_score, "info": { "closest": closest_molecule["name"] } } def compute_similarity(left, right): return _compute_similarity(_compute_fingerprint(left), _compute_fingerprint(right))
skodapetr/lbvs-environment
methods/ecfp/ecfp2_1024_tanimoto.py
Python
mit
1,396
[ "RDKit" ]
ea89cc73d1299af3fd5f3c960ab3ef5de1e7c029257e2bac65075c6ccdd0707c
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # # PMDA # Copyright (c) 2019 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version """ pmda.rms ======== Ready to use root-mean-square analyses built via MDAnalysis with dask. The full documentation can be found at https://www.mdanalysis.org/pmda/. """ from __future__ import absolute_import from .rmsd import RMSD from .rmsf import RMSF __all__ = ["RMSD", "RMSF"]
MDAnalysis/pmda
pmda/rms/__init__.py
Python
gpl-2.0
613
[ "MDAnalysis" ]
f79bf5e673fa03ca599a1a3e92e74dd71da360e94100aa3a4b2c9ec9ae9b5193
# -*- coding: UTF-8 -*- from gettext import bindtextdomain, install, textdomain, translation from locale import Error as LocaleError, LC_ALL, LC_COLLATE, LC_CTYPE, LC_MESSAGES, LC_MONETARY, LC_NUMERIC, LC_TIME, setlocale, getlocale from os import environ, listdir from os.path import isdir from six import PY2 from subprocess import Popen, PIPE from time import localtime, strftime, time from Tools.CountryCodes import setISO3166 from Tools.Directories import SCOPE_LANGUAGE, resolveFilename PACKAGER = "/usr/bin/opkg" PACKAGE_TEMPLATE = "enigma2-locale-%s" PERMANENT_LOCALES = ["de_DE", "en_US", "fr_FR"] languagePath = resolveFilename(SCOPE_LANGUAGE) try: if PY2: install("enigma2", languagePath, unicode=False, codeset="UTF-8", names=("ngettext", "pgettext")) else: install("enigma2", languagePath, names=("ngettext", "pgettext")) except UnicodeDecodeError: print("[International] Error: The language translation data in '%s' has failed to initialise! Translations are not possible." % languagePath) install("enigma2", "/", names=("ngettext", "pgettext")) bindtextdomain("enigma2", languagePath) textdomain("enigma2") LANG_NAME = 0 LANG_TRANSLATED = 1 LANG_NATIVE = 2 LANG_ENCODING = 3 LANG_COUNTRYCODES = 4 LANG_MAX = 4 # In this code the following meanings are used: # Country: An official country as recognised by ISO, eg "AU" for Australia. # Language: An official language as recognised by ISO, eg "en" for English. # Locale: An official language as spoken in a country, eg "en_AU" for English (Australian). LANGUAGE_DATA = { # DEVELOPER NOTE: # # Should this language table include the ISO three letter code for use in the subtitle code? # Perhaps also have a flag to indicate that the language should be listed in the subtitle list? # # Fields: English Name, Translated Name, Localised Name, Encoding # Character Set, (Tuple of ISO-3166 Alpha2 Country Codes). # NOTE: The first item of the tuple should be the # default or commonly known country for the language. # To make managing this list easier please keep languages in ISO # 639-2 Code order. Language codes should be in lower case and # country codes should be in upper case. Be careful not to # confuse / mix the language and country! # # The Character Set entry is only used to set a shell variable used # by Gstreamer. # # As noted above, if a language is used in more than one country then # the default locale contry should be listed first. # # https://www.loc.gov/standards/iso639-2/php/code_list.php # https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes # https://lh.2xlibre.net/locales/ "aa": ("Afar", _("Afar"), "Afaraf", "UTF-8", ("DJ", "ER", "ET")), "ab": ("Abkhazian", _("Abkhazian"), "Аҧсуа Бызшәа / Аҧсшәа", "UTF-8", ()), "ae": ("Avestan", _("Avestan"), "Avesta", "UTF-8", ()), "af": ("Afrikaans", _("Afrikaans"), "Afrikaans", "UTF-8", ("ZA",)), "ak": ("Akan", _("Akan"), "Akan", "UTF-8", ("GH",)), "am": ("Amharic", _("Amharic"), "አማርኛ", "UTF-8", ("ET",)), "an": ("Aragonese", _("Aragonese"), "Aragonés", "UTF-8", ("ES",)), "ar": ("Arabic", _("Arabic"), "العربية", "ISO-8859-15", ("AE", "BH", "DZ", "EG", "IN", "IQ", "JO", "KW", "LB", "LY", "MA", "OM", "QA", "SA", "SD", "SS", "SY", "TN", "YE")), "as": ("Assamese", _("Assamese"), "অসমীয়া", "UTF-8", ("IN",)), "av": ("Avaric", _("Avaric"), "Авар мацӀ / МагӀарул мацӀ", "UTF-8", ()), "ay": ("Aymara", _("Aymara"), "Aymar Aru", "UTF-8", ("PE",)), "az": ("Azerbaijani", _("Azerbaijani"), "Azərbaycan Dili", "UTF-8", ("AZ", "IR")), "ba": ("Bashkir", _("Bashkir"), "башҡорт теле", "UTF-8", ()), "be": ("Belarusian", _("Belarusian"), "беларуская мова", "UTF-8", ("BY",)), "bg": ("Bulgarian", _("Bulgarian"), "български език", "ISO-8859-15", ("BG",)), "bh": ("Bihari languages", _("Bihari languages"), "भोजपुरी", "UTF-8", ()), "bi": ("Bislama", _("Bislama"), "Bislama", "UTF-8", ("TV", "VU")), "bm": ("Bambara", _("Bambara"), "Bamanankan", "UTF-8", ("ML",)), "bn": ("Bengali", _("Bengali"), "বাংলা", "UTF-8", ("BD", "IN")), "bo": ("Tibetan", _("Tibetan"), "བོད་ཡིག", "UTF-8", ("CN", "IN")), "br": ("Breton", _("Breton"), "Brezhoneg", "UTF-8", ("FR",)), "bs": ("Bosnian", _("Bosnian"), "Bosanski Jezik", "UTF-8", ("BA",)), "ca": ("Catalan / Valencian", _("Catalan / Valencian"), "Català / Valencià", "ISO-8859-15", ("AD", "ES", "FR", "IT")), "ce": ("Chechen", _("Chechen"), "Нохчийн Мотт", "UTF-8", ("RU",)), "ch": ("Chamorro", _("Chamorro"), "Chamoru", "UTF-8", ()), "co": ("Corsican", _("Corsican"), "Corsu, Lingua Corsa", "UTF-8", ()), "cr": ("Cree", _("Cree"), "ᓀᐦᐃᔭᐍᐏᐣ", "UTF-8", ()), "cs": ("Czech", _("Czech"), "Čeština / Český Jazyk", "ISO-8859-15", ("CZ",)), "cu": ("Church Slavic", _("Church Slavic"), "Ѩзыкъ Словѣньскъ", "UTF-8", ()), "cv": ("Chuvash", _("Chuvash"), "Чӑваш Чӗлхи", "UTF-8", ("RU",)), "cy": ("Welsh", _("Welsh"), "Cymraeg", "UTF-8", ("GB",)), "da": ("Danish", _("Danish"), "Dansk", "ISO-8859-15", ("DK",)), "de": ("German", _("German"), "Deutsch", "ISO-8859-15", ("DE", "AT", "BE", "CH", "IT", "LI", "LU")), "dv": ("Divehi / Dhivehi / Maldivian", _("Divehi / Dhivehi / Maldivian"), "ދިވެހި", "UTF-8", ("MV",)), "dz": ("Dzongkha", _("Dzongkha"), "རྫོང་ཁ", "UTF-8", ("BT",)), "ee": ("Ewe", _("Ewe"), "Eʋegbe", "UTF-8", ()), "el": ("Greek", _("Greek"), "Ελληνικά", "ISO-8859-7", ("GR", "CY")), "en": ("English", _("English"), "English", "ISO-8859-15", ("US", "AG", "AU", "BW", "BZ", "CA", "DK", "GB", "HK", "IE", "IL", "IN", "JM", "KH", "NG", "NZ", "PH", "SC", "SG", "TT", "ZA", "ZM", "ZW")), "eo": ("Esperanto", _("Esperanto"), "Esperanto", "UTF-8", ()), "es": ("Spanish / Castilian", _("Spanish / Castilian"), "Español", "ISO-8859-15", ("ES", "AR", "BO", "CL", "CO", "CR", "CU", "DO", "EC", "GT", "HN", "MX", "NI", "PA", "PE", "PR", "PY", "SV", "US", "UY", "VE")), "et": ("Estonian", _("Estonian"), "Eesti / Eesti keel", "ISO-8859-15", ("EE",)), "eu": ("Basque", _("Basque"), "Euskara / Euskera", "UTF-8", ("ES",)), "fa": ("Farsi / Persian", _("Farsi / Persian"), "فارسی", "ISO-8859-15", ("IR",)), "ff": ("Fulah", _("Fulah"), "Fulfulde / Pulaar / Pular", "UTF-8", ("SN",)), "fi": ("Finnish", _("Finnish"), "Suomi / Suomen kieli", "ISO-8859-15", ("FI",)), "fj": ("Fijian", _("Fijian"), "Vosa Vakaviti", "UTF-8", ()), "fo": ("Faroese", _("Faroese"), "Føroyskt", "UTF-8", ("FO",)), "fr": ("French", _("French"), "Français", "ISO-8859-15", ("FR", "AG", "AI", "BE", "BB", "BS", "CA", "CG", "CH", "CI", "CM", "CU", "DO", "DM", "GD", "GY", "HT", "JM", "KN", "LC", "LU", "MA", "MC", "ML", "MQ", "PR", "SN", "SR", "SX", "TT", "VC", "VI")), "fy": ("Western Frisian", _("Western Frisian"), "Frysk", "ISO-8859-15", ("NL", "DE")), "ga": ("Irish", _("Irish"), "Gaeilge", "UTF-8", ("IE",)), "gd": ("Gaelic", _("Gaelic"), "Gàidhlig", "UTF-8", ("GB",)), "gl": ("Galician", _("Galician"), "Galego", "UTF-8", ("ES-GA",)), "gn": ("Guarani", _("Guarani"), "Avañe'ẽ", "UTF-8", ("PY",)), "gu": ("Gujarati", _("Gujarati"), "ગુજરાતી", "UTF-8", ("IN",)), "gv": ("Manx", _("Manx"), "Gaelg / Gailck", "UTF-8", ("GB",)), "ha": ("Hausa", _("Hausa"), "هَوُسَ", "UTF-8", ("NG",)), "he": ("Hebrew", _("Hebrew"), "עברית‎", "ISO-8859-15", ("IL",)), "hi": ("Hindi", _("Hindi"), "हिन्दी / हिंदी", "UTF-8", ("IN",)), "ho": ("Hiri Motu", _("Hiri Motu"), "Hiri Motu", "UTF-8", ()), "hr": ("Croatian", _("Croatian"), "Hrvatski Jezik", "ISO-8859-15", ("HR",)), "ht": ("Haitian / Haitian Creole", _("Haitian / Haitian Creole"), "Kreyòl ayisyen", "UTF-8", ("HT",)), "hu": ("Hungarian", _("Hungarian"), "Magyar", "ISO-8859-15", ("HU",)), "hy": ("Armenian", _("Armenian"), "Հայերեն", "UTF-8", ("AM",)), "hz": ("Herero", _("Herero"), "Otjiherero", "UTF-8", ()), "ia": ("Interlingua", _("Interlingua"), "Interlingua", "UTF-8", ("FR",)), "id": ("Indonesian", _("Indonesian"), "Bahasa Indonesia", "ISO-8859-15", ("ID",)), "ie": ("Interlingue / Occidental", _("Interlingue / Occidental"), "Interlingue", "UTF-8", ()), "ig": ("Igbo", _("Igbo"), "Asụsụ Igbo", "UTF-8", ("NG",)), "ii": ("Sichuan Yi / Nuosu", _("Sichuan Yi / Nuosu"), "ꆈꌠ꒿ Nuosuhxop", "UTF-8", ()), "ik": ("Inupiaq", _("Inupiaq"), "Iñupiaq / Iñupiatun", "UTF-8", ("CA",)), "io": ("Ido", _("Ido"), "Ido", "UTF-8", ()), "is": ("Icelandic", _("Icelandic"), "Íslenska", "ISO-8859-15", ("IS",)), "it": ("Italian", _("Italian"), "Italiano", "ISO-8859-15", ("IT", "CH")), "iu": ("Inuktitut", _("Inuktitut"), "ᐃᓄᒃᑎᑐᑦ", "UTF-8", ("CA",)), "ja": ("Japanese", _("Japanese"), "日本語 (にほんご)", "UTF-8", ("JP",)), "jv": ("Javanese", _("Javanese"), "ꦧꦱꦗꦮ / Basa Jawa", "UTF-8", ()), "ka": ("Georgian", _("Georgian"), "ქართული", "UTF-8", ("GE",)), "kg": ("Kongo", _("Kongo"), "Kikongo", "UTF-8", ()), "ki": ("Kikuyu / Gikuyu", _("Kikuyu / Gikuyu"), "Gĩkũyũ", "UTF-8", ()), "kj": ("Kuanyama / Kwanyama", _("Kuanyama / Kwanyama"), "Kuanyama", "UTF-8", ()), "kk": ("Kazakh", _("Kazakh"), "Қазақ тілі", "UTF-8", ("KZ",)), "kl": ("Kalaallisut / Greenlandic", _("Kalaallisut / Greenlandic"), "Kalaallisut / Kalaallit oqaasii", "UTF-8", ("GL",)), "km": ("Central Khmer", _("Central Khmer"), "ខ្មែរ, ខេមរភាសា, ភាសាខ្មែរ", "UTF-8", ("KH",)), "kn": ("Kannada", _("Kannada"), "ಕನ್ನಡ", "UTF-8", ("IN",)), "ko": ("Korean", _("Korean"), "한국어", "UTF-8", ("KR",)), "kr": ("Kanuri", _("Kanuri"), "Kanuri", "UTF-8", ()), "ks": ("Kashmiri", _("Kashmiri"), "कश्मीरी / كشميري", "UTF-8", ("IN",)), "ku": ("Kurdish", _("Kurdish"), "Kurdî / کوردی", "ISO-8859-15", ("TR",)), "kv": ("Komi", _("Komi"), "Коми кыв", "UTF-8", ()), "kw": ("Cornish", _("Cornish"), "Kernewek", "UTF-8", ("GB",)), "ky": ("Kirghiz / Kyrgyz", _("Kirghiz / Kyrgyz"), "Кыргызча, Кыргыз тили", "UTF-8", ("KG",)), "la": ("Latin", _("Latin"), "Latine / Lingua Latina", "UTF-8", ()), "lb": ("Luxembourgish / Letzeburgesch", _("Luxembourgish / Letzeburgesch"), "Lëtzebuergesch", "UTF-8", ("LU",)), "lg": ("Ganda", _("Ganda"), "Luganda", "UTF-8", ("UG",)), "li": ("Limburgan / Limburger / Limburgish", _("Limburgan / Limburger / Limburgish"), "Limburgs", "UTF-8", ("BE", "NL")), "ln": ("Lingala", _("Lingala"), "Lingála", "UTF-8", ("CD",)), "lo": ("Lao", _("Lao"), "ພາສາລາວ", "UTF-8", ("LA",)), "lt": ("Lithuanian", _("Lithuanian"), "Lietuvių Kalba", "ISO-8859-15", ("LT",)), "lu": ("Luba-Katanga", _("Luba-Katanga"), "Kiluba", "UTF-8", ()), "lv": ("Latvian", _("Latvian"), "Latviešu Valoda", "ISO-8859-15", ("LV",)), "mg": ("Malagasy", _("Malagasy"), "Fiteny Malagasy", "UTF-8", ("MG",)), "mh": ("Marshallese", _("Marshallese"), "Kajin M̧ajeļ", "UTF-8", ("MH",)), "mi": ("Maori", _("Maori"), "te reo Māori", "UTF-8", ("NZ",)), "mk": ("Macedonian", _("Macedonian"), "Македонски Јазик", "UTF-8", ("MK",)), "ml": ("Malayalam", _("Malayalam"), "മലയാളം", "UTF-8", ("IN",)), "mn": ("Mongolian", _("Mongolian"), "Монгол хэл", "UTF-8", ("MN",)), "mr": ("Marathi", _("Marathi"), "मराठी", "UTF-8", ("IN",)), "ms": ("Malay", _("Malay"), "Bahasa Melayu, بهاس ملايو", "UTF-8", ("MY",)), "mt": ("Maltese", _("Maltese"), "Malti", "UTF-8", ("MT",)), "my": ("Burmese", _("Burmese"), "ဗမာစာ", "UTF-8", ("MM",)), "na": ("Nauru", _("Nauru"), "Dorerin Naoero", "UTF-8", ()), "nb": ("Norwegian Bokml", _("Norwegian Bokml"), "Norsk Bokmål", "ISO-8859-15", ("NO",)), "nd": ("North Ndebele", _("North Ndebele"), "isiNdebele", "UTF-8", ()), "ne": ("Nepali", _("Nepali"), "नेपाली", "UTF-8", ("NP",)), "ng": ("Ndonga", _("Ndonga"), "Owambo", "UTF-8", ()), "nl": ("Dutch / Flemish", _("Dutch / Flemish"), "Nederlands / Vlaams", "ISO-8859-15", ("NL", "AW", "BE")), "nn": ("Norwegian Nynorsk", _("Norwegian Nynorsk"), "Norsk Nynorsk", "UTF-8", ("NO",)), "no": ("Norwegian", _("Norwegian"), "Norsk", "ISO-8859-15", ("NO",)), "nr": ("South Ndebele", _("South Ndebele"), "isiNdebele", "UTF-8", ("ZA",)), "nv": ("Navajo / Navaho", _("Navajo / Navaho"), "Diné bizaad", "UTF-8", ()), "ny": ("Chichewa / Chewa / Nyanja", _("Chichewa / Chewa / Nyanja"), "ChiCheŵa / Chinyanja", "UTF-8", ()), "oc": ("Occitan", _("Occitan"), "Occitan / Lenga D'òc", "UTF-8", ("FR",)), "oj": ("Ojibwa", _("Ojibwa"), "ᐊᓂᔑᓈᐯᒧᐎᓐ", "UTF-8", ()), "om": ("Oromo", _("Oromo"), "Afaan Oromoo", "UTF-8", ("ET", "KE")), "or": ("Oriya", _("Oriya"), "ଓଡ଼ିଆ", "UTF-8", ("IN",)), "os": ("Ossetian / Ossetic", _("Ossetian / Ossetic"), "Ирон Æвзаг", "UTF-8", ("RU",)), "pa": ("Panjabi / Punjabi", _("Panjabi / Punjabi"), "ਪੰਜਾਬੀ, پنجابی", "UTF-8", ("IN", "PK")), "pi": ("Pali", _("Pali"), "पालि, पाळि", "UTF-8", ()), "pl": ("Polish", _("Polish"), "Język Polski, Polszczyzna", "ISO-8859-15", ("PL",)), "ps": ("Pushto / Pashto", _("Pushto / Pashto"), "پښتو", "UTF-8", ("AF",)), "pt": ("Portuguese", _("Portuguese"), "Português", "ISO-8859-15", ("PT", "BR")), "qu": ("Quechua", _("Quechua"), "Runa Simi, Kichwa", "UTF-8", ()), "rm": ("Romansh", _("Romansh"), "Rumantsch Grischun", "UTF-8", ()), "rn": ("Rundi", _("Rundi"), "Ikirundi", "UTF-8", ()), "ro": ("Romanian", _("Romanian"), "Română", "ISO-8859-15", ("RO",)), "ru": ("Russian", _("Russian"), "Русский", "ISO-8859-15", ("RU", "UA")), "rw": ("Kinyarwanda", _("Kinyarwanda"), "Ikinyarwanda", "UTF-8", ("RW",)), "sa": ("Sanskrit", _("Sanskrit"), "संस्कृतम्", "UTF-8", ("IN",)), "sb": ("Sorbian", _("Sorbian"), "Sorbian", "UTF-8", ()), # Not in Wikipedia. "sc": ("Sardinian", _("Sardinian"), "Sardu", "UTF-8", ("IT",)), "sd": ("Sindhi", _("Sindhi"), "सिन्धी, سنڌي، سندھی", "UTF-8", ("IN",)), "se": ("Northern Sami", _("Northern Sami"), "Davvisámegiella", "UTF-8", ("NO",)), "sg": ("Sango", _("Sango"), "Yângâ tî sängö", "UTF-8", ()), "si": ("Sinhala / Sinhalese", _("Sinhala / Sinhalese"), "සිංහල", "UTF-8", ("LK",)), "sk": ("Slovak", _("Slovak"), "Slovenčina / Slovenský Jazyk", "ISO-8859-15", ("SK",)), "sl": ("Slovenian", _("Slovenian"), "Slovenski Jezik / Slovenščina", "ISO-8859-15", ("SI",)), "sm": ("Samoan", _("Samoan"), "Gagana Fa'a Samoa", "UTF-8", ("WS",)), "sn": ("Shona", _("Shona"), "chiShona", "UTF-8", ()), "so": ("Somali", _("Somali"), "Soomaaliga, af Soomaali", "UTF-8", ("DJ", "ET", "KE", "SO")), "sq": ("Albanian", _("Albanian"), "Shqip", "UTF-8", ("AL", "KV", "MK")), "sr": ("Serbian", _("Serbian"), "Српски Језик", "ISO-8859-15", ("RS", "ME")), "ss": ("Swati", _("Swati"), "SiSwati", "UTF-8", ("ZA",)), "st": ("Sotho, Southern", _("Sotho, Southern"), "Sesotho", "UTF-8", ("ZA",)), "su": ("Sundanese", _("Sundanese"), "Basa Sunda", "UTF-8", ("SD",)), "sv": ("Swedish", _("Swedish"), "Svenska", "ISO-8859-15", ("SE", "FI")), "sw": ("Swahili", _("Swahili"), "Kiswahili", "UTF-8", ("KE", "TZ")), "ta": ("Tamil", _("Tamil"), "தமிழ்", "UTF-8", ("IN", "LK")), "te": ("Telugu", _("Telugu"), "తెలుగు", "UTF-8", ("IN",)), "tg": ("Tajik", _("Tajik"), "тоҷикӣ, toçikī, تاجیکی", "UTF-8", ("TJ",)), "th": ("Thai", _("Thai"), "ไทย", "ISO-8859-15", ("TH",)), "ti": ("Tigrinya", _("Tigrinya"), "ትግርኛ", "UTF-8", ("ER", "ET")), "tk": ("Turkmen", _("Turkmen"), "Türkmen, Түркмен", "UTF-8", ("TM",)), "tl": ("Tagalog", _("Tagalog"), "Wikang Tagalog", "UTF-8", ("PH",)), "tn": ("Tswana", _("Tswana"), "Setswana", "UTF-8", ("ZA",)), "to": ("Tonga", _("Tonga"), "Faka Tonga", "UTF-8", ("TO",)), "tr": ("Turkish", _("Turkish"), "Türkçe", "ISO-8859-15", ("TR", "CY")), "ts": ("Tsonga", _("Tsonga"), "Xitsonga", "UTF-8", ("ZA",)), "tt": ("Tatar", _("Tatar"), "Татар теле, Tatar tele", "UTF-8", ("RU",)), "tw": ("Twi", _("Twi"), "Twi", "UTF-8", ()), "ty": ("Tahitian", _("Tahitian"), "Reo Tahiti", "UTF-8", ()), "ug": ("Uighur / Uyghur", _("Uighur / Uyghur"), "ئۇيغۇرچە‎ / Uyghurche", "UTF-8", ("CN",)), "uk": ("Ukrainian", _("Ukrainian"), "Українська", "ISO-8859-15", ("UA",)), "ur": ("Urdu", _("Urdu"), "اردو", "UTF-8", ("IN", "PK")), "uz": ("Uzbek", _("Uzbek"), "Oʻzbek, Ўзбек, أۇزبېك", "UTF-8", ("UZ",)), "ve": ("Venda", _("Venda"), "Tshivenḓa", "UTF-8", ("ZA",)), "vi": ("Vietnamese", _("Vietnamese"), "Tiếng Việt", "UTF-8", ("VN",)), "vo": ("Volapük", _("Volapük"), "Volapük", "UTF-8", ()), "wa": ("Walloon", _("Walloon"), "Walon", "UTF-8", ("BE",)), "wo": ("Wolof", _("Wolof"), "Wollof", "UTF-8", ("SN",)), "xh": ("Xhosa", _("Xhosa"), "isiXhosa", "UTF-8", ("ZA",)), "yi": ("Yiddish", _("Yiddish"), "ייִדיש", "UTF-8", ("US",)), "yo": ("Yoruba", _("Yoruba"), "Yorùbá", "UTF-8", ("NG",)), "za": ("Zhuang / Chuang", _("Zhuang / Chuang"), "Saɯ cueŋƅ / Saw cuengh", "UTF-8", ()), "zh": ("Chinese", _("Chinese"), "中文", "UTF-8", ("CN", "HK", "SG", "TW")), "zu": ("Zulu", _("Zulu"), "isiZulu", "UTF-8", ("ZA",)) } COUNTRY_ALPHA3 = 0 COUNTRY_NUMERIC = 1 COUNTRY_NAME = 2 COUNTRY_TRANSLATED = 3 COUNTRY_NATIVE = 4 COUNTRY_MAX = 4 COUNTRY_DATA = { # https://www.iso.org/obp/ui/#search/code/ # https://www.worldatlas.com/aatlas/ctycodes.htm # https://en.wikipedia.org/wiki/List_of_ISO_3166_country_codes # https://en.wikipedia.org/wiki/ISO_3166-2 # https://en.wikipedia.org/wiki/ISO_3166-3 "AD": ("AND", "020", "Andorra", _("Andorra"), "d'Andorra"), "AE": ("ARE", "784", "United Arab Emirates", _("United Arab Emirates"), "الإمارات العربية المتحدة‎ al-ʾImārāt al-ʿArabīyyah al-Muttaḥidah"), "AF": ("AFG", "004", "Afghanistan", _("Afghanistan"), "افغانستان"), "AG": ("ATG", "028", "Antigua and Barbuda", _("Antigua and Barbuda"), "Antigua and Barbuda"), "AI": ("AIA", "660", "Anguilla", _("Anguilla"), "Anguilla"), "AL": ("ALB", "008", "Albania", _("Albania"), "Shqipëri"), "AM": ("ARM", "051", "Armenia", _("Armenia"), "Հայաստան"), "AO": ("AGO", "024", "Angola", _("Angola"), "Angola"), "AQ": ("ATA", "010", "Antarctica", _("Antarctica"), "Antarctica"), "AR": ("ARG", "032", "Argentina", _("Argentina"), "Argentina"), "AS": ("ASM", "016", "American Samoa", _("American Samoa"), "Amerika Sāmoa"), "AT": ("AUT", "040", "Austria", _("Austria"), "Österreich"), "AU": ("AUS", "036", "Australia", _("Australia"), "Australia"), "AW": ("ABW", "533", "Aruba", _("Aruba"), "Aruba"), "AX": ("ALA", "248", "Aland Islands", _("Aland Islands"), "Åland Islands"), "AZ": ("AZE", "031", "Azerbaijan", _("Azerbaijan"), "Azərbaycan"), "BA": ("BIH", "070", "Bosnia and Herzegovina", _("Bosnia and Herzegovina"), "Bosna i Hercegovina"), "BB": ("BRB", "052", "Barbados", _("Barbados"), "Barbados"), "BD": ("BGD", "050", "Bangladesh", _("Bangladesh"), "বাংলাদেশ"), "BE": ("BEL", "056", "Belgium", _("Belgium"), "België"), "BF": ("BFA", "854", "Burkina Faso", _("Burkina Faso"), "Buʁkina Faso"), "BG": ("BGR", "100", "Bulgaria", _("Bulgaria"), "България"), "BH": ("BHR", "048", "Bahrain", _("Bahrain"), "البحرين‎"), "BI": ("BDI", "108", "Burundi", _("Burundi"), "y'Uburundi"), "BJ": ("BEN", "204", "Benin", _("Benin"), "Bénin"), "BL": ("BLM", "652", "Saint Barthelemy", _("Saint Barthelemy"), "Saint-Barthélemy"), "BM": ("BMU", "060", "Bermuda", _("Bermuda"), "Bermuda"), "BN": ("BRN", "096", "Brunei Darussalam", _("Brunei Darussalam"), "Negara Brunei Darussalam"), "BO": ("BOL", "068", "Bolivia", _("Bolivia"), "Mborivia"), "BQ": ("BES", "535", "Bonaire", _("Bonaire"), "Bonaire"), "BR": ("BRA", "076", "Brazil", _("Brazil"), "Brasil"), "BS": ("BHS", "044", "Bahamas", _("Bahamas"), "Bahamas"), "BT": ("BTN", "064", "Bhutan", _("Bhutan"), "འབྲུག་རྒྱལ་ཁབ་"), "BV": ("BVT", "074", "Bouvet Island", _("Bouvet Island"), "Bouvetøya"), "BW": ("BWA", "072", "Botswana", _("Botswana"), "Botswana"), "BY": ("BLR", "112", "Belarus", _("Belarus"), "Беларусь"), "BZ": ("BLZ", "084", "Belize", _("Belize"), "Belize"), "CA": ("CAN", "124", "Canada", _("Canada"), "Canada"), "CC": ("CCK", "166", "Cocos (Keeling) Islands", _("Cocos (Keeling) Islands"), "Cocos (Keeling) Islands"), "CD": ("COD", "180", "Congo, Democratic Republic of the", _("Democratic Republic of the Congo"), "République démocratique du Congo"), "CF": ("CAF", "140", "Central African Republic", _("Central African Republic"), "Ködörösêse tî Bêafrîka"), "CG": ("COG", "178", "Congo", _("Congo"), "Congo"), "CH": ("CHE", "756", "Switzerland", _("Switzerland"), "Suisse"), "CI": ("CIV", "384", "Cote d'Ivoire / Ivory Coast", _("Cote d'Ivoire / Ivory Coast"), "Côte d'Ivoire"), "CK": ("COK", "184", "Cook Islands", _("Cook Islands"), "Kūki 'Āirani"), "CL": ("CHL", "152", "Chile", _("Chile"), "Chile"), "CM": ("CMR", "120", "Cameroon", _("Cameroon"), "Cameroun"), "CN": ("CHN", "156", "China", _("China"), "中国"), "CO": ("COL", "170", "Colombia", _("Colombia"), "Colombia"), "CR": ("CRI", "188", "Costa Rica", _("Costa Rica"), "Costa Rica"), "CU": ("CUB", "192", "Cuba", _("Cuba"), "Cuba"), "CV": ("CPV", "132", "Cape Verde", _("Cape Verde"), "Cabo Verde"), "CW": ("CUW", "531", "Curacao", _("Curacao"), "Kòrsou"), "CX": ("CXR", "162", "Christmas Island", _("Christmas Island"), "聖誕島 / Wilayah Pulau Krismas"), "CY": ("CYP", "196", "Cyprus", _("Cyprus"), "Κύπρος"), "CZ": ("CZE", "203", "Czech Republic", _("Czech Republic"), "Česká Republika"), "DE": ("DEU", "276", "Germany", _("Germany"), "Deutschland"), "DJ": ("DJI", "262", "Djibouti", _("Djibouti"), "جيبوتي‎"), "DK": ("DNK", "208", "Denmark", _("Denmark"), "Danmark"), "DM": ("DMA", "212", "Dominica", _("Dominica"), "Dominique"), "DO": ("DOM", "214", "Dominican Republic", _("Dominican Republic"), "República Dominicana"), "DZ": ("DZA", "012", "Algeria", _("Algeria"), "الجزائر‎"), "EC": ("ECU", "218", "Ecuador", _("Ecuador"), "Ikwayur"), "EE": ("EST", "233", "Estonia", _("Estonia"), "Eesti"), "EG": ("EGY", "818", "Egypt", _("Egypt"), "ِصر‎"), "EH": ("ESH", "732", "Western Sahara", _("Western Sahara"), "الصحراء الغربية"), "ER": ("ERI", "232", "Eritrea", _("Eritrea"), "ኤርትራ"), "ES": ("ESP", "724", "Spain", _("Spain"), "España"), "ES-GA": ("ESP", "724", "Galicia (Spain)", _("Galicia (Spain)"), "Galicia (España)"), "ET": ("ETH", "231", "Ethiopia", _("Ethiopia"), "ኢትዮጵያ"), "FI": ("FIN", "246", "Finland", _("Finland"), "Suomi"), "FJ": ("FJI", "242", "Fiji", _("Fiji"), "Viti"), "FK": ("FLK", "238", "Falkland Islands (Malvinas)", _("Falkland Islands (Malvinas)"), "Islas Malvinas"), "FM": ("FSM", "583", "Micronesia, Federated States of", _("Micronesia, Federated States of"), "Micronesia, Federated States of"), "FO": ("FRO", "234", "Faroe Islands", _("Faroe Islands"), "Føroyar"), "FR": ("FRA", "250", "France", _("France"), "Française"), "GA": ("GAB", "266", "Gabon", _("Gabon"), "Gabonaise"), "GB": ("GBR", "826", "United Kingdom", _("United Kingdom"), "United Kingdom"), "GD": ("GRD", "308", "Grenada", _("Grenada"), "Grenada"), "GE": ("GEO", "268", "Georgia", _("Georgia"), "საქართველო"), "GF": ("GUF", "254", "French Guiana", _("French Guiana"), "Guyane"), "GG": ("GGY", "831", "Guernsey", _("Guernsey"), "Guernési"), "GH": ("GHA", "288", "Ghana", _("Ghana"), "Ghana"), "GI": ("GIB", "292", "Gibraltar", _("Gibraltar"), "جبل طارق"), "GL": ("GRL", "304", "Greenland", _("Greenland"), "Grønland"), "GM": ("GMB", "270", "Gambia", _("Gambia"), "Gambia"), "GN": ("GIN", "324", "Guinea", _("Guinea"), "Guinée"), "GP": ("GLP", "312", "Guadeloupe", _("Guadeloupe"), "Gwadloup"), "GQ": ("GNQ", "226", "Equatorial Guinea", _("Equatorial Guinea"), "Guinea Ecuatorial"), "GR": ("GRC", "300", "Greece", _("Greece"), "Ελληνική Δημοκρατία"), "GS": ("SGS", "239", "South Georgia and the South Sandwich Islands", _("South Georgia and the South Sandwich Islands"), "South Georgia and the South Sandwich Islands"), "GT": ("GTM", "320", "Guatemala", _("Guatemala"), "Guatemala"), "GU": ("GUM", "316", "Guam", _("Guam"), "Guåhån"), "GW": ("GNB", "624", "Guinea-Bissau", _("Guinea-Bissau"), "Guiné-Bissau"), "GY": ("GUY", "328", "Guyana", _("Guyana"), "Guyana"), "HK": ("HKG", "344", "Hong Kong", _("Hong Kong"), "香港"), "HM": ("HMD", "334", "Heard Island and McDonald Islands", _("Heard Island and McDonald Islands"), "Heard Island and McDonald Islands"), "HN": ("HND", "340", "Honduras", _("Honduras"), "Honduras"), "HR": ("HRV", "191", "Croatia", _("Croatia"), "Hrvatska"), "HT": ("hti", "332", "Haiti", _("Haiti"), "Haïti"), "HU": ("HUN", "348", "Hungary", _("Hungary"), "Magyarország"), "ID": ("IDN", "360", "Indonesia", _("Indonesia"), "Indonesia"), "IE": ("IRL", "372", "Ireland", _("Ireland"), "Éire"), "IL": ("ISR", "376", "Israel", _("Israel"), "ישראל"), "IM": ("IMN", "833", "Isle of Man", _("Isle of Man"), "Mannin"), "IN": ("IND", "356", "India", _("India"), "Bhārat"), "IO": ("IOT", "086", "British Indian Ocean Territory", _("British Indian Ocean Territory"), "British Indian Ocean Territory"), "IQ": ("IRQ", "368", "Iraq", _("Iraq"), "ٱلْعِرَاق‎"), "IR": ("IRN", "364", "Iran, Islamic Republic of", _("Iran, Islamic Republic of"), "جمهوری اسلامی ایران"), "IS": ("ISL", "352", "Iceland", _("Iceland"), "Ísland"), "IT": ("ITA", "380", "Italy", _("Italy"), "Italia"), "JE": ("JEY", "832", "Jersey", _("Jersey"), "Jèrri"), "JM": ("JAM", "388", "Jamaica", _("Jamaica"), "Jumieka"), "JO": ("JOR", "400", "Jordan", _("Jordan"), "الْأُرْدُنّ‎"), "JP": ("JPN", "392", "Japan", _("Japan"), "日本"), "KE": ("KEN", "404", "Kenya", _("Kenya"), "Kenya"), "KG": ("KGZ", "417", "Kyrgyzstan", _("Kyrgyzstan"), "Kırğızstan"), "KH": ("KHM", "116", "Cambodia", _("Cambodia"), "កម្ពុជា"), "KI": ("KIR", "296", "Kiribati", _("Kiribati"), "Kiribati"), "KM": ("COM", "174", "Comoros", _("Comoros"), "جزر القمر‎"), "KN": ("KNA", "659", "Saint Kitts and Nevis", _("Saint Kitts and Nevis"), "Saint Kitts and Nevis"), "KP": ("PRK", "408", "Korea, Democratic People's Republic of", _("Korea, Democratic People's Republic of"), "조선"), "KR": ("KOR", "410", "Korea, Republic of", _("Korea, Republic of"), "한국"), "KW": ("KWT", "414", "Kuwait", _("Kuwait"), "الكويت‎"), "KY": ("CYM", "136", "Cayman Islands", _("Cayman Islands"), "Cayman Islands"), "KZ": ("KAZ", "398", "Kazakhstan", _("Kazakhstan"), "Қазақстан"), "LA": ("LAO", "418", "Lao People's Democratic Republic", _("Lao People's Democratic Republic"), "ລາວ"), "LB": ("LBM", "422", "Lebanon", _("Lebanon"), "لبنان‎"), "LC": ("LCA", "662", "Saint Lucia", _("Saint Lucia"), "Sainte-Lucie"), "LI": ("LIE", "438", "Liechtenstein", _("Liechtenstein"), "Liechtenstein"), "LK": ("LKA", "144", "Sri Lanka", _("Sri Lanka"), "ශ්‍රී ලංකා Śrī Laṃkā"), "LR": ("LBR", "430", "Liberia", _("Liberia"), "Liberia"), "LS": ("LSO", "426", "Lesotho", _("Lesotho"), "Lesotho"), "LT": ("LTU", "440", "Lithuania", _("Lithuania"), "Lietuva"), "LU": ("LUX", "442", "Luxembourg", _("Luxembourg"), "Lëtzebuerg"), "LV": ("LVA", "428", "Latvia", _("Latvia"), "Latvija"), "LY": ("LBY", "434", "Libya", _("Libya"), "ليبيا‎"), "MA": ("MAR", "504", "Morocco", _("Morocco"), "المغرب‎"), "MC": ("MCO", "492", "Monaco", _("Monaco"), "Monaco"), "MD": ("MDA", "498", "Moldova, Republic of", _("Moldova, Republic of"), "Republica Moldova"), "ME": ("MNE", "499", "Montenegro", _("Montenegro"), "Црна Гора"), "MF": ("MAF", "663", "Saint Martin (French part)", _("Saint Martin (French part)"), "Saint-Martin"), "MG": ("MDG", "450", "Madagascar", _("Madagascar"), "Madagasikara"), "MH": ("MHL", "584", "Marshall Islands", _("Marshall Islands"), "Aolepān Aorōkin M̧ajeļ"), "MK": ("MKD", "807", "North Macedonia, Republic of", _("North Macedonia, Republic of"), "Република Северна Македонија"), "ML": ("MLI", "466", "Mali", _("Mali"), "Mali"), "MM": ("MMR", "104", "Myanmar", _("Myanmar"), "မြန်မာ"), "MN": ("MNG", "496", "Mongolia", _("Mongolia"), "Монгол Улс"), "MO": ("MAC", "446", "Macao", _("Macao"), "澳門"), "MP": ("MNP", "580", "Northern Mariana Islands", _("Northern Mariana Islands"), "Northern Mariana Islands"), "MQ": ("MTQ", "474", "Martinique", _("Martinique"), "Matnik / Matinik"), "MR": ("MRT", "478", "Mauritania", _("Mauritania"), "موريتانيا‎"), "MS": ("MSR", "500", "Montserrat", _("Montserrat"), "Montserrat"), "MT": ("MLT", "470", "Malta", _("Malta"), "Malta"), "MU": ("MUS", "480", "Mauritius", _("Mauritius"), "Maurice"), "MV": ("MDV", "462", "Maldives", _("Maldives"), "ދިވެހިރާއްޖެ"), "MW": ("MWI", "454", "Malawi", _("Malawi"), "Malaŵi"), "MX": ("MEX", "484", "Mexico", _("Mexico"), "Mēxihco"), "MY": ("MYS", "458", "Malaysia", _("Malaysia"), "Məlejsiə"), "MZ": ("MOZ", "508", "Mozambique", _("Mozambique"), "Moçambique"), "NA": ("NAM", "516", "Namibia", _("Namibia"), "Namibia"), "NC": ("NCL", "540", "New Caledonia", _("New Caledonia"), "Nouvelle-Calédonie"), "NE": ("NER", "562", "Niger", _("Niger"), "Niger"), "NF": ("NFK", "574", "Norfolk Island", _("Norfolk Island"), "Norf'k Ailen"), "NG": ("NGA", "566", "Nigeria", _("Nigeria"), "Nijeriya"), "NI": ("NIC", "558", "Nicaragua", _("Nicaragua"), "Nicaragua"), "NL": ("NLD", "528", "Netherlands", _("Netherlands"), "Nederland"), "NO": ("NOR", "578", "Norway", _("Norway"), "Norge / Noreg"), "NP": ("NPL", "524", "Nepal", _("Nepal"), "नेपाल"), "NR": ("NRU", "520", "Nauru", _("Nauru"), "Naoero"), "NU": ("NIU", "570", "Niue", _("Niue"), "Niuē"), "NZ": ("NZL", "554", "New Zealand", _("New Zealand"), "New Zealand"), "OM": ("OMN", "512", "Oman", _("Oman"), "عمان‎"), "PA": ("PAN", "591", "Panama", _("Panama"), "Panamá"), "PE": ("PER", "604", "Peru", _("Peru"), "Perú"), "PF": ("PYF", "258", "French Polynesia", _("French Polynesia"), "Polynésie française"), "PG": ("PNG", "598", "Papua New Guinea", _("Papua New Guinea"), "Papua Niugini"), "PH": ("PHL", "608", "Philippines", _("Philippines"), "Pilipinas"), "PK": ("PAK", "586", "Pakistan", _("Pakistan"), "اِسلامی جمہوریہ پاكِستان"), "PL": ("POL", "616", "Poland", _("Poland"), "Polska"), "PM": ("SPM", "666", "Saint Pierre and Miquelon", _("Saint Pierre and Miquelon"), "Saint-Pierre-et-Miquelon"), "PN": ("PCN", "612", "Pitcairn", _("Pitcairn"), "Pitkern Ailen"), "PR": ("PRI", "630", "Puerto Rico", _("Puerto Rico"), "Puerto Rico"), "PS": ("PSE", "275", "Palestine, State of", _("Palestine, State of"), "فلسطين‎"), "PT": ("PRT", "620", "Portugal", _("Portugal"), "Portuguesa"), "PW": ("PLW", "585", "Palau", _("Palau"), "Belau"), "PY": ("PRY", "600", "Paraguay", _("Paraguay"), "Paraguái"), "QA": ("QAT", "634", "Qatar", _("Qatar"), "قطر‎"), "RE": ("REU", "638", "Réunion", _("Réunion"), "La Réunion"), "RO": ("ROU", "642", "Romania", _("Romania"), "România"), "RS": ("SRB", "688", "Serbia", _("Serbia"), "Србија"), "RU": ("RUS", "643", "Russian Federation", _("Russian Federation"), "Росси́йская Федера́ция"), "RW": ("RWA", "646", "Rwanda", _("Rwanda"), "Rwanda"), "SA": ("SAU", "682", "Saudi Arabia", _("Saudi Arabia"), "المملكة العربية السعودية"), "SB": ("SLB", "090", "Solomon Islands", _("Solomon Islands"), "Solomon Aelan"), "SC": ("SYC", "690", "Seychelles", _("Seychelles"), "Seychelles"), "SD": ("SDN", "729", "Sudan", _("Sudan"), "السودان‎ as-Sūdān"), "SE": ("SWE", "752", "Sweden", _("Sweden"), "Sverige"), "SG": ("SGP", "702", "Singapore", _("Singapore"), "Singapore"), "SH": ("SHN", "654", "Saint Helena, Ascension and Tristan da Cunha", _("Saint Helena, Ascension and Tristan da Cunha"), "Saint Helena, Ascension and Tristan da Cunha"), "SI": ("SVN", "705", "Slovenia", _("Slovenia"), "Slovenija"), "SJ": ("SJM", "744", "Svalbard and Jan Mayen", _("Svalbard and Jan Mayen"), "Svalbard og Jan Mayen"), "SK": ("SVK", "703", "Slovakia", _("Slovakia"), "Slovensko"), "SL": ("SLE", "694", "Sierra Leone", _("Sierra Leone"), "Sierra Leone"), "SM": ("SMR", "674", "San Marino", _("San Marino"), "San Marino"), "SN": ("SEN", "686", "Senegal", _("Senegal"), "Sénégal"), "SO": ("SOM", "706", "Somalia", _("Somalia"), "Soomaaliya"), "SR": ("SUR", "740", "Suriname", _("Suriname"), "Suriname"), "SS": ("SSD", "728", "South Sudan", _("South Sudan"), "South Sudan"), "ST": ("STP", "678", "Sao Tome and Principe", _("Sao Tome and Principe"), "São Tomé e Principe"), "SV": ("SLV", "222", "El Salvador", _("El Salvador"), "el salβaˈðoɾ"), "SX": ("SXM", "534", "Sint Maarten", _("Sint Maarten"), "Sint Maarten"), "SY": ("SYR", "760", "Syrian Arab Republic", _("Syrian Arab Republic"), "سوريا‎"), "SZ": ("SWZ", "748", "Swaziland / Eswatini", _("Swaziland / Eswatini"), "eSwatini"), "TC": ("TCA", "796", "Turks and Caicos Islands", _("Turks and Caicos Islands"), "Turks and Caicos Islands"), "TD": ("TCD", "148", "Chad", _("Chad"), "تشاد‎"), "TF": ("ATF", "260", "French Southern Territories", _("French Southern Territories"), "Terres australes et antarctiques françaises"), "TG": ("TGO", "768", "Togo", _("Togo"), "Togolaise"), "TH": ("THA", "764", "Thailand", _("Thailand"), "ราชอาณาจักรไทย"), "TJ": ("TJK", "762", "Tajikistan", _("Tajikistan"), "Тоҷикистон"), "TK": ("TKL", "772", "Tokelau", _("Tokelau"), "Tokelau"), "TL": ("TLS", "626", "Timor-Leste / East Timor", _("Timor-Leste / East Timor"), "Timór Lorosa'e"), "TM": ("TKM", "795", "Turkmenistan", _("Turkmenistan"), "Türkmenistan"), "TN": ("TUN", "788", "Tunisia", _("Tunisia"), "الجمهورية التونسية"), "TO": ("TON", "776", "Tonga", _("Tonga"), "Tonga"), "TR": ("TUR", "792", "Turkey", _("Turkey"), "Türkiye"), "TT": ("TTO", "780", "Trinidad and Tobago", _("Trinidad and Tobago"), "Trinidad and Tobago"), "TV": ("TUV", "798", "Tuvalu", _("Tuvalu"), "Tuvalu"), "TW": ("TWN", "158", "Taiwan", _("Taiwan"), "中華民國"), "TZ": ("TZA", "834", "Tanzania, United Republic of", _("Tanzania, United Republic of"), "جمهورية تنزانيا المتحدة‎"), "UA": ("UKR", "804", "Ukraine", _("Ukraine"), "Україна"), "UG": ("UGA", "800", "Uganda", _("Uganda"), "Jamhuri ya Uganda"), "UM": ("UMI", "581", "United States Minor Outlying Islands", _("United States Minor Outlying Islands"), "United States Minor Outlying Islands"), "US": ("USA", "840", "United States of America", _("United States of America"), "United States of America"), "UY": ("URY", "858", "Uruguay", _("Uruguay"), "Uruguay"), "UZ": ("UZB", "860", "Uzbekistan", _("Uzbekistan"), "Oʻzbekiston"), "VA": ("VAT", "336", "Holy See (Vatican City State)", _("Holy See (Vatican City State)"), "Santa Sede (Stato della Città del Vaticano)"), "VC": ("VCT", "670", "Saint Vincent and the Grenadines", _("Saint Vincent and the Grenadines"), "Saint Vincent and the Grenadines"), "VE": ("VEN", "862", "Venezuela, Bolivarian Republic of", _("Venezuela, Bolivarian Republic of"), "República Bolivariana de Venezuela"), "VG": ("VGB", "092", "Virgin Islands (British)", _("Virgin Islands (British)"), "Virgin Islands (British)"), "VI": ("VIR", "850", "Virgin Islands (US)", _("Virgin Islands (US)"), "Virgin Islands (US)"), "VN": ("VNM", "704", "Viet Nam", _("Viet Nam"), "Việt Nam"), "VU": ("VUT", "548", "Vanuatu", _("Vanuatu"), "Vanuatu"), "WF": ("WLF", "876", "Wallis and Futuna", _("Wallis and Futuna"), "Wallis-et-Futuna"), "WS": ("WSM", "882", "Samoa", _("Samoa"), "Sāmoa"), "YE": ("YEM", "887", "Yemen", _("Yemen"), "ٱلْيَمَن‎"), "YT": ("MYT", "175", "Mayotte", _("Mayotte"), "Mayotte"), "ZA": ("ZAF", "710", "South Africa", _("South Africa"), "South Africa"), "ZM": ("ZMB", "894", "Zambia", _("Zambia"), "Zambia"), "ZW": ("ZWE", "716", "Zimbabwe", _("Zimbabwe"), "Zimbabwe") } CAT_ENVIRONMENT = 0 CAT_PYTHON = 1 CATEGORIES = [ ("LC_ALL", LC_ALL), ("LC_ADDRESS", None), ("LC_COLLATE", LC_COLLATE), ("LC_CTYPE", LC_CTYPE), ("LC_DATE", None), ("LC_IDENTIFICATION", None), ("LC_MEASUREMENT", None), ("LC_MESSAGES", LC_MESSAGES), ("LC_MONETARY", LC_MONETARY), ("LC_NAME", None), ("LC_NUMERIC", LC_NUMERIC), ("LC_PAPER", None), ("LC_TELEPHONE", None), ("LC_TIME", LC_TIME) ] class International: def __init__(self): self.availablePackages = [] self.installedPackages = [] self.installedDirectories = [] self.packageLocales = {} self.localeList = ["en_US"] self.languageList = ["en"] self.activeLocale = "en_US" self.catalog = None self.callbacks = [] # environ["LANG"] = "C.UTF-8" # Force the environment to US English so all shell commands run from Enigma2 can be parsed in English (as coded). # environ["LANGUAGE"] = "C.UTF-8" self.buildISO3166() # This should not be required when all Enigma2 code comes here for country and language data. self.initInternational() def buildISO3166(self): # This code builds the CountryCodes.py ISO3166 country list. data = [] for country in COUNTRY_DATA.keys(): data.append(( COUNTRY_DATA[country][COUNTRY_TRANSLATED], country, # This is the ISO3166 ALPHA2 Code. COUNTRY_DATA[country][COUNTRY_ALPHA3], COUNTRY_DATA[country][COUNTRY_NUMERIC], COUNTRY_DATA[country][COUNTRY_NAME] )) data.sort(key=lambda x: x[4]) setISO3166(data) def initInternational(self): self.availablePackages = self.getAvailablePackages(update=True) self.installedPackages = self.getInstalledPackages(update=True) self.installedDirectories = self.getInstalledDirectories(update=True) if len(self.installedDirectories) != len(self.installedPackages): print("[International] Warning: Count of installed locale/language packages and locale/language directory entries do not match!") self.packageLocales = {} for package in self.installedPackages: locales = self.packageToLocales(package) packageLocales = [] for locale in locales: if locale not in packageLocales: packageLocales.append(locale) if locale not in self.localeList: self.localeList.append(locale) self.packageLocales[package] = packageLocales language = self.splitPackage(package)[0] if language not in self.languageList: self.languageList.append(language) count = len(packageLocales) print("[International] Package '%s' supports %d locale%s '%s'." % (package, count, "" if count == 1 else "s", "', '".join(packageLocales))) self.localeList.sort() self.languageList.sort() def activateLanguage(self, language, runCallbacks=True): locale = "%s_%s" % (language, LANGUAGE_DATA[language][LANG_COUNTRYCODES][0]) if language in LANGUAGE_DATA else "en_US" print("[International] Language '%s' is being activated as locale '%s'." % (language, locale)) return self.activateLocale(locale, runCallbacks=runCallbacks) def activateLocale(self, locale, runCallbacks=True): if locale not in self.localeList: print("[International] Selected locale '%s' is not installed or does not exist!" % locale) elif locale == self.activeLocale: print("[International] Language '%s', locale '%s' is already active." % (self.getLanguage(locale), locale)) else: print("[International] Activating language '%s', locale '%s'." % (self.getLanguage(locale), locale)) global languagePath try: self.catalog = translation("enigma2", languagePath, languages=[locale], fallback=True) except UnicodeDecodeError: print("[International] Error: The language translation data in '%s' for '%s' ('%s') has failed to initialise!" % (languagePath, self.getLanguage(locale), locale)) self.catalog = translation("enigma2", "/", fallback=True) self.catalog.install(names=("ngettext", "pgettext")) for category in CATEGORIES: environ[category[CAT_ENVIRONMENT]] = "%s.UTF-8" % locale localeError = None if category[CAT_PYTHON] is not None: try: # Try and set the Python locale to the current locale. setlocale(category[CAT_PYTHON], locale=(locale, "UTF-8")) except LocaleError as err: try: # If unavailable, try for the Python locale to the language base locale. locales = self.packageToLocales(self.getLanguage(locale)) setlocale(category[CAT_PYTHON], locale=(locales[0], "UTF-8")) replacement = locales[0] except LocaleError as err: # If unavailable fall back to the US English locale. setlocale(category[CAT_PYTHON], locale=("POSIX", "")) replacement = "POSIX" if localeError is None: localeError = replacement print("[International] Warning: Locale '%s' is not available in Python %s, using locale '%s' instead." % (locale, category[CAT_ENVIRONMENT], replacement)) environ["LC_ALL"] = "" # This is cleared by popular request. environ["LC_TIME"] = "%s.UTF-8" % locale # Python 2.7 sometimes reverts the LC_TIME environment value, so make sure it has the correct value! environ["LANG"] = "%s.UTF-8" % locale environ["LANGUAGE"] = "%s.UTF-8" % locale environ["GST_SUBTITLE_ENCODING"] = self.getGStreamerSubtitleEncoding() self.activeLocale = locale if runCallbacks: for method in self.callbacks: method() def addCallback(self, callback): if callable(callback): self.callbacks.append(callback) else: print("[International] Error: The callback '%s' is invalid!" % callback) def getActiveCatalog(self): return self.catalog def getAvailablePackages(self, update=False): if update: command = (PACKAGER, "find", PACKAGE_TEMPLATE % "*") availablePackages = [] try: # print("[International] Processing command '%s' with arguments '%s'." % (command[0], "', '".join(command[1:]))) process = Popen(command, stdout=PIPE, stderr=PIPE, universal_newlines=True) packageText, errorText = process.communicate() if errorText: print("[International] getLanguagePackages Error: %s" % errorText) else: for language in packageText.split("\n"): if language and "meta" not in language: lang = language[15:].split(" ")[0] if lang not in availablePackages: availablePackages.append(lang) availablePackages = sorted(availablePackages) except (IOError, OSError) as err: print("[International] getLanguagePackages Error %d: %s ('%s')" % (err.errno, err.strerror, command[0])) availablePackages = [] print("[International] There are %d available locale/language packages in the repository '%s'." % (len(availablePackages), "', '".join(availablePackages))) else: availablePackages = self.availablePackages return availablePackages def getInstalledPackages(self, update=False): if update: command = (PACKAGER, "status", PACKAGE_TEMPLATE % "*") installedPackages = [] try: # print("[International] Processing command '%s' with arguments '%s'." % (command[0], "', '".join(command[1:]))) process = Popen(command, stdout=PIPE, stderr=PIPE, universal_newlines=True) packageText, errorText = process.communicate() if errorText: print("[International] getInstalledPackages Error: %s" % errorText) else: for package in packageText.split("\n\n"): if package.startswith("Package: %s" % (PACKAGE_TEMPLATE % "")) and "meta" not in package: list = [] for data in package.split("\n"): if data.startswith("Package: "): installedPackages.append(data[24:]) break installedPackages = sorted(installedPackages) except (IOError, OSError) as err: print("[International] getInstalledPackages Error %d: %s ('%s')" % (err.errno, err.strerror, command[0])) print("[International] There are %d installed locale/language packages '%s'." % (len(installedPackages), "', '".join(installedPackages))) else: installedPackages = self.installedPackages return installedPackages def getInstalledDirectories(self, update=False): # Adapt language directory entries to match the package format. if update: global languagePath installedDirectories = sorted(listdir(languagePath)) if isdir(languagePath) else [] print("[International] There are %d installed locale/language directories '%s'." % (len(installedDirectories), "', '".join(installedDirectories))) else: installedDirectories = self.installedDirectories return installedDirectories def getPurgablePackages(self, locale=None): if locale is None: locale = self.getLocale() locales = PERMANENT_LOCALES[:] if locale not in locales: locales.append(locale) locales.sort() packages = sorted(self.getInstalledPackages()) for locale in locales: for package in packages: if locale in self.packageLocales[package]: packages.remove(package) return packages def getPermanentLocales(self, locale=None): if locale is None: locale = self.getLocale() locales = PERMANENT_LOCALES[:] if locale not in locales: locales.append(locale) permanent = [] for locale in locales: permanent.append("%s (%s)" % (self.getLanguageName(locale), self.getCountryName(locale))) return permanent def packageToLocales(self, package): locale = package.replace("-", "_") data = self.splitLocale(locale) locales = [] if data[1]: locales.append("%s_%s" % (data[0], data[1].upper())) else: for country in LANGUAGE_DATA.get(data[0], tuple([None] * LANG_MAX))[LANG_COUNTRYCODES]: locales.append("%s_%s" % (data[0], country)) return locales def localeToPackage(self, locale=None): if locale is None: locale = self.getLocale() for package in self.getAvailablePackages(): if locale in self.packageLocales[package]: return package return None def languageToPackage(self, language=None): if language is None: language = self.getLanguage() for package in self.getAvailablePackages(): for locale in self.packageLocales[package]: if language == self.getLanguage(locale): return package return None def splitPackage(self, package): data = package.split("-", 1) if len(data) < 2: data.append(None) else: data[1] = data[1].upper() return data def getLocale(self): return "en_US" if self.activeLocale is None else self.activeLocale def splitLocale(self, locale): data = locale.split("_", 1) if len(data) < 2: data.append(None) return data def getCountry(self, item=None): if item is None: item = self.getLocale() return self.splitLocale(item)[1] if len(item) > 3 else item # and item in COUNTRY_DATA or None def getCountryAlpha3(self, item=None): return COUNTRY_DATA.get(self.getCountry(item), tuple([None] * COUNTRY_MAX))[COUNTRY_ALPHA3] def getCountryNumeric(self, item=None): return COUNTRY_DATA.get(self.getCountry(item), tuple([None] * COUNTRY_MAX))[COUNTRY_NUMERIC] def getCountryName(self, item=None): return COUNTRY_DATA.get(self.getCountry(item), tuple([None] * COUNTRY_MAX))[COUNTRY_NAME] def getCountryTranslated(self, item=None): return COUNTRY_DATA.get(self.getCountry(item), tuple([None] * COUNTRY_MAX))[COUNTRY_TRANSLATED] def getCountryNative(self, item=None): return COUNTRY_DATA.get(self.getCountry(item), tuple([None] * COUNTRY_MAX))[COUNTRY_NATIVE] def getLanguage(self, item=None): if item is None: item = self.getLocale() return self.splitLocale(item)[0] if len(item) > 3 else item # and item in LANGUAGE_DATA or None def getLanguageName(self, item=None): return LANGUAGE_DATA.get(self.getLanguage(item), tuple([None] * LANG_MAX))[LANG_NAME] def getLanguageTranslated(self, item=None): return LANGUAGE_DATA.get(self.getLanguage(item), tuple([None] * LANG_MAX))[LANG_TRANSLATED] def getLanguageNative(self, item=None): return LANGUAGE_DATA.get(self.getLanguage(item), tuple([None] * LANG_MAX))[LANG_NATIVE] def getLanguageEncoding(self, item=None): return LANGUAGE_DATA.get(self.getLanguage(item), tuple([None] * LANG_MAX))[LANG_ENCODING] def getLanguageCountryCode(self, item=None): countries = LANGUAGE_DATA.get(self.getLanguage(item), tuple([None] * LANG_MAX))[LANG_COUNTRYCODES] return countries[0] if countries else None def getLocaleList(self): return self.localeList def getLanguageList(self): return self.languageList def getPackage(self, locale=None): if locale is None: locale = self.getLocale() language = self.getLanguage(locale) pack = locale.replace("_", "-").lower() if pack in self.availablePackages: package = pack elif language in self.availablePackages: package = language else: package = None return package def getGStreamerSubtitleEncoding(self, item=None): language = self.getLanguage(item) return LANGUAGE_DATA[language][LANG_ENCODING] if language in LANGUAGE_DATA else "ISO-8859-15" def deleteLanguagePackages(self, packageList): return self.runPackageManager(cmdList=[PACKAGER, "remove", "--autoremove", "--force-depends"], packageList=packageList, action=_("deleted")) def installLanguagePackages(self, packageList): return self.runPackageManager(cmdList=[PACKAGER, "install", "--volatile-cache"], packageList=packageList, action=_("installed")) def runPackageManager(self, cmdList=None, packageList=None, action=""): retVal = 0 statusMsg = "" if cmdList and packageList: cmdList = tuple(cmdList + [PACKAGE_TEMPLATE % x for x in packageList]) print("[International] Running package manager command line '%s'." % " ".join(cmdList)) try: process = Popen(cmdList, stdout=PIPE, stderr=PIPE, universal_newlines=True) packageText, errorText = process.communicate() retVal = process.returncode if retVal: print("[International] Warning: Package manager exit status is %d!" % process.returncode) locales = 0 languages = 0 for package in packageList: if len(package) > 3: locales += 1 if len(package) < 4: languages += 1 msg = [] if locales: msg.append(ngettext(_("Locale"), _("Locales"), locales)) if languages: msg.append(ngettext(_("Language"), _("Languages"), languages)) msg = "/".join(msg) languages = [self.splitPackage(x)[0] for x in packageList] languages = ["%s (%s)" % (LANGUAGE_DATA[x][LANG_NAME], LANGUAGE_DATA[x][LANG_NATIVE]) for x in languages] if errorText: print("[International] Warning: Package manager error:\n%s" % errorText) statusMsg = _("Error: %s %s not %s! Please try again later.") % (msg, ", ".join(languages), action) else: statusMsg = "%s %s %s." % (msg, ", ".join(languages), action) except (IOError, OSError) as err: print("[International] Error %d: %s for command '%s'!" % (err.errno, err.strerror, " ".join(cmdList))) retVal = -1 statusMsg = _("Error: Unable to process the command! Please try again later.") self.initInternational() return retVal, statusMsg international = International()
openatv/enigma2
lib/python/Components/International.py
Python
gpl-2.0
51,168
[ "BWA" ]
ece7fc3014c452649ef90a5dc5d00874737b746c500dc356556ca644b90e0967
# -*- coding: utf-8 -*- # # GSL documentation build configuration file, created by # sphinx-quickstart on Mon Feb 27 15:17:27 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) import sphinx_rtd_theme # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.imgmath'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'GSL' copyright = u'1996-2018 The GSL Team' author = u'The GSL Team' title = u'GNU Scientific Library' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = u'2.5' # The full version, including alpha/beta/rc tags. release = u'2.5' primary_domain = 'c' numfig = True # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'include.rst', 'specfunc-*.rst'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # #html_theme = 'alabaster' html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { 'display_version': True, 'prev_next_buttons_location': 'both' } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'GSLdoc' # -- Options for LaTeX output --------------------------------------------- my_latex_preamble = '\\DeclareMathOperator\\arccosh{arccosh} \ \\DeclareMathOperator\\arcsinh{arcsinh} \ \\DeclareMathOperator\\arctanh{arctanh} \ \\DeclareMathOperator\\arcsec{arcsec} \ \\DeclareMathOperator\\arccsc{arccsc} \ \\DeclareMathOperator\\arccot{arccot} \ \\DeclareMathOperator\\csch{csch} \ \\DeclareMathOperator\\sech{sech} \ \\DeclareMathOperator\\arcsech{arcsech} \ \\DeclareMathOperator\\arccsch{arccsch} \ \\DeclareMathOperator\\arccoth{arccoth} \ \\DeclareMathOperator\\erf{erf} \ \\DeclareMathOperator\\erfc{erfc} \ \\DeclareMathOperator\\sgn{sgn} \ \\DeclareMathOperator\\sinc{sinc} \ \\DeclareMathOperator\\Var{Var} \ \\DeclareMathOperator\\diag{diag}' my_latex_authors = 'Mark Galassi \\\\ \ Jim Davies \\\\ \ James Theiler \\\\ \ Brian Gough \\\\ \ Gerard Jungman \\\\ \ Patrick Alken \\\\ \ Michael Booth \\\\ \ Fabrice Rossi \\\\ \ Rhys Ulerich' latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': my_latex_preamble, # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'gsl-ref.tex', title, my_latex_authors, 'manual'), ] imgmath_latex_preamble = my_latex_preamble # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'gsl', title, [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'gsl-ref', title, author, 'GSL', 'One line description of project.', 'Miscellaneous'), ]
mancoast/gsl
doc/conf.py
Python
gpl-3.0
6,359
[ "Brian" ]
ce570249130ad59eae476a7ed8d1355e5e9f8938bd023c6caf37b98c542fd7f6
#!/usr/bin/env python """ Given a binary search tree, change it to sorted double linked list by only one pass. """ class TreeNode(object): def __init__(self, x): self.val = x self.left = None self.right = None class Solution(object): """ Inorder traverse. """ def convert(self, tree): """ :type tree: TreeNode :rtype: TreeNode Idea: by inorder traverse, we could traverse the tree by sorted order, then think it as inorder traverse. when visit left sub tree (if exist left tree), we will pass in a node named pre, then return from left tree, make pre.next = current_node, current_node.pre = pre then visit right sub tree (if exist right sub tree), if no right sub tree, return current_node as the last pre node, else return the node returned from right sub tree. Time: O(n) Space: O(1) """ head = TreeNode(0) self.inorder(tree, head) return head.right def inorder(self, node, pre): if node.left: pre = self.inorder(node.left, pre) pre.right = node node.left = pre if node.right: return self.inorder(node.right, node) else: return node class Traverse(object): @staticmethod def inorder(node): if node != None: Traverse.inorder(node.left) print node.val Traverse.inorder(node.right) if __name__ == '__main__': t = TreeNode(4) print 'test #1' t.left = TreeNode(2) t.right = TreeNode(6) t.left.left = TreeNode(1) t.left.right = TreeNode(3) t.right.left = TreeNode(5) t.right.right = TreeNode(7) convertor = Solution() res = convertor.convert(t) while res != None: print res.val, res = res.right print '' print 'test #2' t = TreeNode(8) t.left = TreeNode(4) t.right = TreeNode(11) t.left.left = TreeNode(3) t.left.left.left = TreeNode(2) t.left.left.left.left = TreeNode(1) t.left.right = TreeNode(6) t.left.right.left = TreeNode(5) t.left.right.right = TreeNode(7) t.right.left = TreeNode(10) t.right.left.left = TreeNode(9) t.right.right = TreeNode(13) t.right.right.left = TreeNode(12) res = convertor.convert(t) while res != None: print res.val, res = res.right print '' print 'test #3' t = TreeNode(5) t.left = TreeNode(4) t.left.left = TreeNode(3) t.left.left.left = TreeNode(2) t.left.left.left.left = TreeNode(1) t.right = TreeNode(6) res = convertor.convert(t) while res != None: print res.val, res = res.right
weixsong/algorithm
interview/convert_binary_search_tree_2_sorted_linked_list.py
Python
mit
2,780
[ "VisIt" ]
882272005d2eea4f08364d13951dd158ef5879bc2cbf02609b6c174ba68a2654
#!/usr/bin/python # Copyright (C) 2014 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os.path import select import sys import time import collections import socket import gflags as flags # http://code.google.com/p/python-gflags/ import pkgutil import threading import Queue import traceback import math import bisect from bisect import bisect_left """ scipy, numpy and matplotlib are python packages that can be installed from: http://www.scipy.org/ """ import scipy import matplotlib.pyplot as plt # let this script know about the power monitor implementations sys.path = [os.path.basename(__file__)] + sys.path available_monitors = [ name for _, name, _ in pkgutil.iter_modules( [os.path.join(os.path.dirname(__file__), "power_monitors")]) if not name.startswith("_")] APK = os.path.join(os.path.dirname(__file__), "..", "CtsVerifier.apk") FLAGS = flags.FLAGS # DELAY_SCREEN_OFF is the number of seconds to wait for baseline state DELAY_SCREEN_OFF = 20.0 # whether to log data collected to a file for each sensor run: LOG_DATA_TO_FILE = True logging.getLogger().setLevel(logging.ERROR) def do_import(name): """import a module by name dynamically""" mod = __import__(name) components = name.split(".") for comp in components[1:]: mod = getattr(mod, comp) return mod class PowerTestException(Exception): """ Definition of specialized Exception class for CTS power tests """ def __init__(self, message): self._error_message = message def __str__(self): return self._error_message class PowerTest: """Class to run a suite of power tests. This has methods for obtaining measurements from the power monitor (through the driver) and then processing it to determine baseline and AP suspend state and measure ampere draw of various sensors. Ctrl+C causes a keyboard interrupt exception which terminates the test.""" # Thresholds for max allowed power usage per sensor tested # TODO: Accel, Mag and Gyro have no maximum power specified in the CDD; # the following numbers are bogus and will be replaced soon by what # the device reports (from Sensor.getPower()) MAX_ACCEL_AMPS = 0.08 # Amps MAX_MAG_AMPS = 0.08 # Amps MAX_GYRO_AMPS = 0.08 # Amps MAX_SIGMO_AMPS = 0.08 # Amps # TODO: The following numbers for step counter, etc must be replaced by # the numbers specified in CDD for low-power sensors. The expected current # draw must be computed from the specified power and the voltage used to # power the device (specified from a config file). MAX_STEP_COUNTER_AMPS = 0.08 # Amps MAX_STEP_DETECTOR_AMPS = 0.08 # Amps # The variable EXPECTED_AMPS_VARIATION_HALF_RANGE denotes the expected # variation of the ampere measurements # around the mean value at baseline state. i.e. we expect most of the # ampere measurements at baseline state to vary around the mean by # between +/- of the number below EXPECTED_AMPS_VARIATION_HALF_RANGE = 0.0005 # The variable THRESHOLD_BASELINE_SAMPLES_FRACTION denotes the minimum fraction of samples that must # be in the range of variation defined by EXPECTED_AMPS_VARIATION_HALF_RANGE # around the mean baseline for us to decide that the phone has settled into # its baseline state THRESHOLD_BASELINE_SAMPLES_FRACTION = 0.86 # The variable MAX_PERCENTILE_AP_SCREEN_OFF_AMPS denotes the maximum ampere # draw that the device can consume when it has gone to suspend state with # one or more sensors registered and batching samples (screen and AP are # off in this case) MAX_PERCENTILE_AP_SCREEN_OFF_AMPS = 0.030 # Amps # The variable PERCENTILE_MAX_AP_SCREEN_OFF denotes the fraction of ampere # measurements that must be below the specified maximum amperes # MAX_PERCENTILE_AP_SCREEN_OFF_AMPS for us to decide that the phone has # reached suspend state. PERCENTILE_MAX_AP_SCREEN_OFF = 0.95 DOMAIN_NAME = "/android/cts/powertest" # SAMPLE_COUNT_NOMINAL denotes the typical number of measurements of amperes # to collect from the power monitor SAMPLE_COUNT_NOMINAL = 1000 # RATE_NOMINAL denotes the nominal frequency at which ampere measurements # are taken from the monsoon power monitor RATE_NOMINAL = 100 ENABLE_PLOTTING = False REQUEST_EXTERNAL_STORAGE = "EXTERNAL STORAGE?" REQUEST_EXIT = "EXIT" REQUEST_RAISE = "RAISE %s %s" REQUEST_USER_RESPONSE = "USER RESPONSE %s" REQUEST_SET_TEST_RESULT = "SET TEST RESULT %s %s %s" REQUEST_SENSOR_SWITCH = "SENSOR %s %s" REQUEST_SENSOR_AVAILABILITY = "SENSOR? %s" REQUEST_SCREEN_OFF = "SCREEN OFF" REQUEST_SHOW_MESSAGE = "MESSAGE %s" NEGATIVE_AMPERE_ERROR_MESSAGE = ( "Negative ampere draw measured, possibly due to power " "supply from USB cable. Check the setup of device and power " "monitor to make sure that the device is not connected " "to machine via USB directly. The device should be " "connected to the USB slot in the power monitor. It is okay " "to change the wiring when the test is in progress.") def __init__(self, max_baseline_amps): """ Args: max_baseline_amps: The maximum value of baseline amperes that we expect the device to consume at baseline state. This can be different between models of phones. """ power_monitors = do_import("power_monitors.%s" % FLAGS.power_monitor) testid = time.strftime("%d_%m_%Y__%H__%M_%S") self._power_monitor = power_monitors.Power_Monitor(log_file_id = testid) self._tcp_connect_port = 0 # any available port print ("Establishing connection to device...") self.setUsbEnabled(True) status = self._power_monitor.GetStatus() self._native_hz = status["sampleRate"] * 1000 # the following describes power test being run (i.e on what sensor # and what type of test. This is used for logging. self._current_test = "None" self._external_storage = self.executeOnDevice(PowerTest.REQUEST_EXTERNAL_STORAGE) self._max_baseline_amps = max_baseline_amps def __del__(self): self.finalize() def finalize(self): """To be called upon termination of host connection to device""" if self._tcp_connect_port > 0: # tell device side to exit connection loop, and remove the forwarding # connection self.executeOnDevice(PowerTest.REQUEST_EXIT, reportErrors = False) self.executeLocal("adb forward --remove tcp:%d" % self._tcp_connect_port) self._tcp_connect_port = 0 if self._power_monitor: self._power_monitor.Close() self._power_monitor = None def _send(self, msg, report_errors = True): """Connect to the device, send the given command, and then disconnect""" if self._tcp_connect_port == 0: # on first attempt to send a command, connect to device via any open port number, # forwarding that port to a local socket on the device via adb logging.debug("Seeking port for communication...") # discover an open port dummysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) dummysocket.bind(("localhost", 0)) (_, self._tcp_connect_port) = dummysocket.getsockname() dummysocket.close() assert(self._tcp_connect_port > 0) status = self.executeLocal("adb forward tcp:%d localabstract:%s" % (self._tcp_connect_port, PowerTest.DOMAIN_NAME)) # If the status !=0, then the host machine is unable to # forward requests to client over adb. Ending the test and logging error message # to the console on the host. self.endTestIfLostConnection( status != 0, "Unable to forward requests to client over adb") logging.info("Forwarding requests over local port %d", self._tcp_connect_port) link = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: logging.debug("Connecting to device...") link.connect(("localhost", self._tcp_connect_port)) logging.debug("Connected.") except socket.error as serr: print "Socket connection error: ", serr print "Finalizing and exiting the test" self.endTestIfLostConnection( report_errors, "Unable to communicate with device: connection refused") except: print "Non socket-related exception at this block in _send(); re-raising now." raise logging.debug("Sending '%s'", msg) link.sendall(msg) logging.debug("Getting response...") response = link.recv(4096) logging.debug("Got response '%s'", response) link.close() return response def queryDevice(self, query): """Post a yes/no query to the device, return True upon successful query, False otherwise""" logging.info("Querying device with '%s'", query) return self._send(query) == "OK" # TODO: abstract device communication (and string commands) into its own class def executeOnDevice(self, cmd, reportErrors = True): """Execute a (string) command on the remote device""" return self._send(cmd, reportErrors) def executeLocal(self, cmd, check_status = True): """execute a shell command locally (on the host)""" from subprocess import call status = call(cmd.split(" ")) if status != 0 and check_status: logging.error("Failed to execute \"%s\"", cmd) else: logging.debug("Executed \"%s\"", cmd) return status def reportErrorRaiseExceptionIf(self, condition, msg): """Report an error condition to the device if condition is True. Will raise an exception on the device if condition is True. Args: condition: If true, this reports error msg: Message related to exception Raises: A PowerTestException encapsulating the message provided in msg """ if condition: try: logging.error("Exiting on error: %s" % msg) self.executeOnDevice(PowerTest.REQUEST_RAISE % (self._current_test, msg), reportErrors = True) except: logging.error("Unable to communicate with device to report " "error: %s" % msg) self.finalize() sys.exit(msg) raise PowerTestException(msg) def endTestIfLostConnection(self, lost_connection, error_message): """ This function ends the test if lost_connection was true, which indicates that the connection to the device was lost. Args: lost_connection: boolean variable, if True it indicates that connection to device was lost and the test must be terminated. error_message: String to print to the host console before exiting the test (if lost_connection is True) Returns: None. """ if lost_connection: logging.error(error_message) self.finalize() sys.exit(error_message) def setUsbEnabled(self, enabled, verbose = True): if enabled: val = 1 else: val = 0 self._power_monitor.SetUsbPassthrough(val) tries = 0 # Sometimes command won't go through first time, particularly if immediately after a data # collection, so allow for retries # TODO: Move this retry mechanism to the power monitor driver. status = self._power_monitor.GetStatus() while status is None and tries < 5: tries += 1 time.sleep(2.0) logging.error("Retrying get status call...") self._power_monitor.StopDataCollection() self._power_monitor.SetUsbPassthrough(val) status = self._power_monitor.GetStatus() if enabled: if verbose: print("...USB enabled, waiting for device") self.executeLocal("adb wait-for-device") if verbose: print("...device online") else: if verbose: logging.info("...USB disabled") # re-establish port forwarding if enabled and self._tcp_connect_port > 0: status = self.executeLocal("adb forward tcp:%d localabstract:%s" % (self._tcp_connect_port, PowerTest.DOMAIN_NAME)) self.reportErrorRaiseExceptionIf(status != 0, msg = "Unable to forward requests to client over adb") def computeBaselineState(self, measurements): """ Args: measurements: List of floats containing ampere draw measurements taken from the monsoon power monitor. Must be atleast 100 measurements long Returns: A tuple (isBaseline, mean_current) where isBaseline is a boolean that is True only if the baseline state for the phone is detected. mean_current is an estimate of the average baseline current for the device, which is valid only if baseline state is detected (if not, it is set to -1). """ # Looks at the measurements to see if it is in baseline state if len(measurements) < 100: print( "Need at least 100 measurements to determine if baseline state has" " been reached") return (False, -1) # Assumption: At baseline state, the power profile is Gaussian distributed # with low-variance around the mean current draw. # Ideally we should find the mode from a histogram bin to find an estimated mean. # Assuming here that the median is very close to this value; later we check that the # variance of the samples is low enough to validate baseline. sorted_measurements = sorted(measurements) number_measurements = len(measurements) if not number_measurements % 2: median_measurement = (sorted_measurements[(number_measurements - 1) / 2] + sorted_measurements[(number_measurements + 1) / 2]) / 2 else: median_measurement = sorted_measurements[number_measurements / 2] # Assume that at baseline state, a large fraction of power measurements # are within +/- EXPECTED_AMPS_VARIATION_HALF_RANGE milliAmperes of # the average baseline current. Find all such measurements in the # sorted measurement vector. left_index = ( bisect_left( sorted_measurements, median_measurement - PowerTest.EXPECTED_AMPS_VARIATION_HALF_RANGE)) right_index = ( bisect_left( sorted_measurements, median_measurement + PowerTest.EXPECTED_AMPS_VARIATION_HALF_RANGE)) average_baseline_amps = scipy.mean( sorted_measurements[left_index: (right_index - 1)]) detected_baseline = True # We enforce that a fraction of more than 'THRESHOLD_BASELINE_SAMPLES_FRACTION' # of samples must be within +/- EXPECTED_AMPS_VARIATION_HALF_RANGE # milliAmperes of the mean baseline current, which we have estimated as # the median. if ((right_index - left_index) < PowerTest.THRESHOLD_BASELINE_SAMPLES_FRACTION * len( measurements)): detected_baseline = False # We check for the maximum limit of the expected baseline if median_measurement > self._max_baseline_amps: detected_baseline = False if average_baseline_amps < 0: print PowerTest.NEGATIVE_AMPERE_ERROR_MESSAGE detected_baseline = False print("%s baseline state" % ("Could detect" if detected_baseline else "Could NOT detect")) print( "median amps = %f, avg amps = %f, fraction of good samples = %f" % (median_measurement, average_baseline_amps, float(right_index - left_index) / len(measurements))) if PowerTest.ENABLE_PLOTTING: plt.plot(measurements) plt.show() print("To continue test, please close the plot window manually.") return (detected_baseline, average_baseline_amps) def isApInSuspendState(self, measurements_amps, nominal_max_amps, test_percentile): """ This function detects AP suspend and display off state of phone after a sensor has been registered. Because the power profile can be very different between sensors and even across builds, it is difficult to specify a tight threshold for mean current draw or mandate that the power measurements must have low variance. We use a criteria that allows for a certain fraction of peaks in power spectrum and checks that test_percentile fraction of measurements must be below the specified value nominal_max_amps Args: measurements_amps: amperes draw measurements from power monitor test_percentile: the fraction of measurements we require to be below a specified amps value nominal_max_amps: the specified value of the max current draw Returns: returns a boolean which is True if and only if the AP suspend and display off state is detected """ count_good = len([m for m in measurements_amps if m < nominal_max_amps]) count_negative = len([m for m in measurements_amps if m < 0]) if count_negative > 0: print PowerTest.NEGATIVE_AMPERE_ERROR_MESSAGE return False; return count_good > test_percentile * len(measurements_amps) def getBaselineState(self): """This function first disables all sensors, then collects measurements through the power monitor and continuously evaluates if baseline state is reached. Once baseline state is detected, it returns a tuple with status information. If baseline is not detected in a preset maximum number of trials, it returns as well. Returns: Returns a tuple (isBaseline, mean_current) where isBaseline is a boolean that is True only if the baseline state for the phone is detected. mean_current is an estimate of the average baseline current for the device, which is valid only if baseline state is detected (if not, it is set to -1) """ self.setPowerOn("ALL", False) self.setUsbEnabled(False) print("Waiting %d seconds for baseline state" % DELAY_SCREEN_OFF) time.sleep(DELAY_SCREEN_OFF) MEASUREMENT_DURATION_SECONDS_BASELINE_DETECTION = 5 # seconds NUMBER_MEASUREMENTS_BASELINE_DETECTION = ( PowerTest.RATE_NOMINAL * MEASUREMENT_DURATION_SECONDS_BASELINE_DETECTION) NUMBER_MEASUREMENTS_BASELINE_VERIFICATION = ( NUMBER_MEASUREMENTS_BASELINE_DETECTION * 5) MAX_TRIALS = 50 collected_baseline_measurements = False for tries in xrange(MAX_TRIALS): print("Trial number %d of %d..." % (tries, MAX_TRIALS)) measurements = self.collectMeasurements( NUMBER_MEASUREMENTS_BASELINE_DETECTION, PowerTest.RATE_NOMINAL, verbose = False) if self.computeBaselineState(measurements)[0] is True: collected_baseline_measurements = True break if collected_baseline_measurements: print("Verifying baseline state over a longer interval " "in order to double check baseline state") measurements = self.collectMeasurements( NUMBER_MEASUREMENTS_BASELINE_VERIFICATION, PowerTest.RATE_NOMINAL, verbose = False) self.reportErrorRaiseExceptionIf( not measurements, "No background measurements could be taken") retval = self.computeBaselineState(measurements) if retval[0]: print("Verified baseline.") if measurements and LOG_DATA_TO_FILE: with open("/tmp/cts-power-tests-background-data.log", "w") as f: for m in measurements: f.write("%.4f\n" % m) return retval else: return (False, -1) def waitForApSuspendMode(self): """This function repeatedly collects measurements until AP suspend and display off mode is detected. After a maximum number of trials, if this state is not reached, it raises an error. Returns: boolean which is True if device was detected to be in suspend state Raises: Power monitor-related exception """ print("waitForApSuspendMode(): Sleeping for %d seconds" % DELAY_SCREEN_OFF) time.sleep(DELAY_SCREEN_OFF) NUMBER_MEASUREMENTS = 200 # Maximum trials for which to collect measurements to get to Ap suspend # state MAX_TRIALS = 50 got_to_suspend_state = False for count in xrange(MAX_TRIALS): print ("waitForApSuspendMode(): Trial %d of %d" % (count, MAX_TRIALS)) measurements = self.collectMeasurements(NUMBER_MEASUREMENTS, PowerTest.RATE_NOMINAL, verbose = False) if self.isApInSuspendState( measurements, PowerTest.MAX_PERCENTILE_AP_SCREEN_OFF_AMPS, PowerTest.PERCENTILE_MAX_AP_SCREEN_OFF): got_to_suspend_state = True break self.reportErrorRaiseExceptionIf( got_to_suspend_state is False, msg = "Unable to determine application processor suspend mode status.") print("Got to AP suspend state") return got_to_suspend_state def collectMeasurements(self, measurementCount, rate, verbose = True): """Args: measurementCount: Number of measurements to collect from the power monitor rate: The integer frequency in Hertz at which to collect measurements from the power monitor Returns: A list containing measurements from the power monitor; that has the requested count of the number of measurements at the specified rate """ assert (measurementCount > 0) decimate_by = self._native_hz / rate or 1 self._power_monitor.StartDataCollection() sub_measurements = [] measurements = [] tries = 0 if verbose: print("") try: while len(measurements) < measurementCount and tries < 5: if tries: self._power_monitor.StopDataCollection() self._power_monitor.StartDataCollection() time.sleep(1.0) tries += 1 additional = self._power_monitor.CollectData() if additional is not None: tries = 0 sub_measurements.extend(additional) while len(sub_measurements) >= decimate_by: sub_avg = sum(sub_measurements[0:decimate_by]) / decimate_by measurements.append(sub_avg) sub_measurements = sub_measurements[decimate_by:] if verbose: # "\33[1A\33[2K" is a special Linux console control # sequence for moving to the previous line, and # erasing it; and reprinting new text on that # erased line. sys.stdout.write("\33[1A\33[2K") print ("MEASURED[%d]: %f" % (len(measurements), measurements[-1])) finally: self._power_monitor.StopDataCollection() self.reportErrorRaiseExceptionIf(measurementCount > len(measurements), "Unable to collect all requested measurements") return measurements def requestUserAcknowledgment(self, msg): """Post message to user on screen and wait for acknowledgment""" response = self.executeOnDevice(PowerTest.REQUEST_USER_RESPONSE % msg) self.reportErrorRaiseExceptionIf( response != "OK", "Unable to request user acknowledgment") def setTestResult(self, test_name, test_result, test_message): """ Reports the result of a test to the device Args: test_name: name of the test test_result: Boolean result of the test (True means Pass) test_message: Relevant message """ print ("Test %s : %s" % (test_name, test_result)) response = ( self.executeOnDevice( PowerTest.REQUEST_SET_TEST_RESULT % (test_name, test_result, test_message))) self.reportErrorRaiseExceptionIf( response != "OK", "Unable to send test status to Verifier") def setPowerOn(self, sensor, powered_on): response = self.executeOnDevice(PowerTest.REQUEST_SENSOR_SWITCH % (("ON" if powered_on else "OFF"), sensor)) self.reportErrorRaiseExceptionIf( response == "ERR", "Unable to set sensor %s state" % sensor) logging.info("Set %s %s", sensor, ("ON" if powered_on else "OFF")) return response def runSensorPowerTest( self, sensor, max_amperes_allowed, baseline_amps, user_request = None): """ Runs power test for a specific sensor; i.e. measures the amperes draw of the phone using monsoon, with the specified sensor mregistered and the phone in suspend state; and verifies that the incremental consumed amperes is within expected bounds. Args: sensor: The specified sensor for which to run the power test max_amperes_allowed: Maximum ampere draw of the device with the sensor registered and device in suspend state baseline_amps: The power draw of the device when it is in baseline state (no sensors registered, display off, AP asleep) """ self._current_test = ("%s_Power_Test_While_%s" % ( sensor, ("Under_Motion" if user_request is not None else "Still"))) try: print ("\n\n---------------------------------") if user_request is not None: print ("Running power test on %s under motion." % sensor) else: print ("Running power test on %s while device is still." % sensor) print ("---------------------------------") response = self.executeOnDevice( PowerTest.REQUEST_SENSOR_AVAILABILITY % sensor) if response == "UNAVAILABLE": self.setTestResult( self._current_test, test_result = "SKIPPED", test_message = "Sensor %s not available on this platform" % sensor) self.setPowerOn("ALL", False) if response == "UNAVAILABLE": self.setTestResult( self._current_test, test_result = "SKIPPED", test_message = "Sensor %s not available on this device" % sensor) return self.reportErrorRaiseExceptionIf(response != "OK", "Unable to set all sensor off") self.executeOnDevice(PowerTest.REQUEST_SCREEN_OFF) self.setUsbEnabled(False) self.setUsbEnabled(True) self.setPowerOn(sensor, True) if user_request is not None: print("===========================================\n" + "==> Please follow the instructions presented on the device\n" + "===========================================") self.requestUserAcknowledgment(user_request) self.executeOnDevice(PowerTest.REQUEST_SCREEN_OFF) self.setUsbEnabled(False) self.reportErrorRaiseExceptionIf( response != "OK", "Unable to set sensor %s ON" % sensor) self.waitForApSuspendMode() print ("Collecting sensor %s measurements" % sensor) measurements = self.collectMeasurements(PowerTest.SAMPLE_COUNT_NOMINAL, PowerTest.RATE_NOMINAL) if measurements and LOG_DATA_TO_FILE: with open("/tmp/cts-power-tests-%s-%s-sensor-data.log" % (sensor, ("Under_Motion" if user_request is not None else "Still")), "w") as f: for m in measurements: f.write("%.4f\n" % m) self.setUsbEnabled(True, verbose = False) print("Saving raw data files to device...") self.executeLocal("adb shell mkdir -p %s" % self._external_storage, False) self.executeLocal("adb push %s %s/." % (f.name, self._external_storage)) self.setUsbEnabled(False, verbose = False) self.reportErrorRaiseExceptionIf( not measurements, "No measurements could be taken for %s" % sensor) avg = sum(measurements) / len(measurements) squared = [(m - avg) * (m - avg) for m in measurements] stddev = math.sqrt(sum(squared) / len(squared)) current_diff = avg - baseline_amps self.setUsbEnabled(True) max_power = max(measurements) - avg if current_diff <= max_amperes_allowed: # TODO: fail the test of background > current message = ( "Draw is within limits. Sensor delta:%f mAmp Baseline:%f " "mAmp Sensor: %f mAmp Stddev : %f mAmp Peak: %f mAmp") % ( current_diff * 1000.0, baseline_amps * 1000.0, avg * 1000.0, stddev * 1000.0, max_power * 1000.0) else: message = ( "Draw is too high. Current:%f Background:%f Measured: %f " "Stddev: %f Peak: %f") % ( current_diff * 1000.0, baseline_amps * 1000.0, avg * 1000.0, stddev * 1000.0, max_power * 1000.0) self.setTestResult( self._current_test, ("PASS" if (current_diff <= max_amperes_allowed) else "FAIL"), message) print("Result: " + message) except: traceback.print_exc() self.setTestResult(self._current_test, test_result = "FAIL", test_message = "Exception occurred during run of test.") raise @staticmethod def runTests(max_baseline_amps): testrunner = None try: GENERIC_MOTION_REQUEST = ("\n===> Please press Next and when the " "screen is off, keep the device under motion with only tiny, " "slow movements until the screen turns on again.\nPlease " "refrain from interacting with the screen or pressing any side " "buttons while measurements are taken.") USER_STEPS_REQUEST = ("\n===> Please press Next and when the " "screen is off, then move the device to simulate step motion " "until the screen turns on again.\nPlease refrain from " "interacting with the screen or pressing any side buttons " "while measurements are taken.") testrunner = PowerTest(max_baseline_amps) testrunner.executeOnDevice( PowerTest.REQUEST_SHOW_MESSAGE % "Connected. Running tests...") is_baseline_success, baseline_amps = testrunner.getBaselineState() if is_baseline_success: testrunner.setUsbEnabled(True) # TODO: Enable testing a single sensor testrunner.runSensorPowerTest( "SIGNIFICANT_MOTION", PowerTest.MAX_SIGMO_AMPS, baseline_amps, user_request = GENERIC_MOTION_REQUEST) testrunner.runSensorPowerTest( "STEP_DETECTOR", PowerTest.MAX_STEP_DETECTOR_AMPS, baseline_amps, user_request = USER_STEPS_REQUEST) testrunner.runSensorPowerTest( "STEP_COUNTER", PowerTest.MAX_STEP_COUNTER_AMPS, baseline_amps, user_request = USER_STEPS_REQUEST) testrunner.runSensorPowerTest( "ACCELEROMETER", PowerTest.MAX_ACCEL_AMPS, baseline_amps, user_request = GENERIC_MOTION_REQUEST) testrunner.runSensorPowerTest( "MAGNETIC_FIELD", PowerTest.MAX_MAG_AMPS, baseline_amps, user_request = GENERIC_MOTION_REQUEST) testrunner.runSensorPowerTest( "GYROSCOPE", PowerTest.MAX_GYRO_AMPS, baseline_amps, user_request = GENERIC_MOTION_REQUEST) testrunner.runSensorPowerTest( "ACCELEROMETER", PowerTest.MAX_ACCEL_AMPS, baseline_amps, user_request = None) testrunner.runSensorPowerTest( "MAGNETIC_FIELD", PowerTest.MAX_MAG_AMPS, baseline_amps, user_request = None) testrunner.runSensorPowerTest( "GYROSCOPE", PowerTest.MAX_GYRO_AMPS, baseline_amps, user_request = None) testrunner.runSensorPowerTest( "SIGNIFICANT_MOTION", PowerTest.MAX_SIGMO_AMPS, baseline_amps, user_request = None) testrunner.runSensorPowerTest( "STEP_DETECTOR", PowerTest.MAX_STEP_DETECTOR_AMPS, baseline_amps, user_request = None) testrunner.runSensorPowerTest( "STEP_COUNTER", PowerTest.MAX_STEP_COUNTER_AMPS, baseline_amps, user_request = None) else: print("Could not get to baseline state. This is either because " "in several trials, the monitor could not measure a set " "of power measurements that had the specified low " "variance or the mean measurements were below the " "expected value. None of the sensor power measurement " " tests were performed due to not being able to detect " "baseline state. Please re-run the power tests.") except KeyboardInterrupt: print "Keyboard interrupt from user." raise except: import traceback traceback.print_exc() finally: logging.info("TESTS COMPLETE") if testrunner: try: testrunner.finalize() except socket.error: sys.exit( "===================================================\n" "Unable to connect to device under test. Make sure \n" "the device is connected via the usb pass-through, \n" "the CtsVerifier app is running the SensorPowerTest on \n" "the device, and USB pass-through is enabled.\n" "===================================================") def main(argv): """ Simple command-line interface for a power test application.""" useful_flags = ["voltage", "status", "usbpassthrough", "samples", "current", "log", "power_monitor"] if not [f for f in useful_flags if FLAGS.get(f, None) is not None]: print __doc__.strip() print FLAGS.MainModuleHelp() return if FLAGS.avg and FLAGS.avg < 0: logging.error("--avg must be greater than 0") return if FLAGS.voltage is not None: if FLAGS.voltage > 5.5: print("!!WARNING: Voltage higher than typical values!!!") try: response = raw_input( "Voltage of %.3f requested. Confirm this is correct (Y/N)" % FLAGS.voltage) if response.upper() != "Y": sys.exit("Aborting") except: sys.exit("Aborting.") if not FLAGS.power_monitor: sys.exit( "You must specify a '--power_monitor' option to specify which power " "monitor type " + "you are using.\nOne of:\n \n ".join(available_monitors)) power_monitors = do_import("power_monitors.%s" % FLAGS.power_monitor) try: mon = power_monitors.Power_Monitor(device = FLAGS.device) except: import traceback traceback.print_exc() sys.exit("No power monitors found") if FLAGS.voltage is not None: if FLAGS.ramp is not None: mon.RampVoltage(mon.start_voltage, FLAGS.voltage) else: mon.SetVoltage(FLAGS.voltage) if FLAGS.current is not None: mon.SetMaxCurrent(FLAGS.current) if FLAGS.status: items = sorted(mon.GetStatus().items()) print "\n".join(["%s: %s" % item for item in items]) if FLAGS.usbpassthrough: if FLAGS.usbpassthrough == "off": mon.SetUsbPassthrough(0) elif FLAGS.usbpassthrough == "on": mon.SetUsbPassthrough(1) elif FLAGS.usbpassthrough == "auto": mon.SetUsbPassthrough(2) else: mon.Close() sys.exit("bad pass-through flag: %s" % FLAGS.usbpassthrough) if FLAGS.samples: # Make sure state is normal mon.StopDataCollection() status = mon.GetStatus() native_hz = status["sampleRate"] * 1000 # Collect and average samples as specified mon.StartDataCollection() # In case FLAGS.hz doesn't divide native_hz exactly, use this invariant: # 'offset' = (consumed samples) * FLAGS.hz - (emitted samples) * native_hz # This is the error accumulator in a variation of Bresenham's algorithm. emitted = offset = 0 collected = [] history_deque = collections.deque() # past n samples for rolling average # TODO: Complicated lines of code below. Refactoring needed try: last_flush = time.time() while emitted < FLAGS.samples or FLAGS.samples == -1: # The number of raw samples to consume before emitting the next output need = (native_hz - offset + FLAGS.hz - 1) / FLAGS.hz if need > len(collected): # still need more input samples samples = mon.CollectData() if not samples: break collected.extend(samples) else: # Have enough data, generate output samples. # Adjust for consuming 'need' input samples. offset += need * FLAGS.hz while offset >= native_hz: # maybe multiple, if FLAGS.hz > native_hz this_sample = sum(collected[:need]) / need if FLAGS.timestamp: print int(time.time()), if FLAGS.avg: history_deque.appendleft(this_sample) if len(history_deque) > FLAGS.avg: history_deque.pop() print "%f %f" % (this_sample, sum(history_deque) / len(history_deque)) else: print "%f" % this_sample sys.stdout.flush() offset -= native_hz emitted += 1 # adjust for emitting 1 output sample collected = collected[need:] now = time.time() if now - last_flush >= 0.99: # flush every second sys.stdout.flush() last_flush = now except KeyboardInterrupt: print("interrupted") return 1 finally: mon.Close() return 0 if FLAGS.run: if not FLAGS.power_monitor: sys.exit( "When running power tests, you must specify which type of power " "monitor to use" + " with '--power_monitor <type of power monitor>'") try: PowerTest.runTests(FLAGS.max_baseline_amps) except KeyboardInterrupt: print "Keyboard interrupt from user" if __name__ == "__main__": flags.DEFINE_boolean("status", None, "Print power meter status") flags.DEFINE_integer("avg", None, "Also report average over last n data points") flags.DEFINE_float("voltage", None, "Set output voltage (0 for off)") flags.DEFINE_float("current", None, "Set max output current") flags.DEFINE_string("usbpassthrough", None, "USB control (on, off, auto)") flags.DEFINE_integer("samples", None, "Collect and print this many samples") flags.DEFINE_integer("hz", 5000, "Print this many samples/sec") flags.DEFINE_string("device", None, "Path to the device in /dev/... (ex:/dev/ttyACM1)") flags.DEFINE_boolean("timestamp", None, "Also print integer (seconds) timestamp on each line") flags.DEFINE_boolean("ramp", True, "Gradually increase voltage") flags.DEFINE_boolean("log", False, "Log progress to a file or not") flags.DEFINE_boolean("run", False, "Run the test suite for power") flags.DEFINE_string("power_monitor", None, "Type of power monitor to use") flags.DEFINE_float("max_baseline_amps", 0.005, "Set maximum baseline current for device being tested") sys.exit(main(FLAGS(sys.argv)))
s20121035/rk3288_android5.1_repo
cts/apps/CtsVerifier/assets/scripts/execute_power_tests.py
Python
gpl-3.0
43,223
[ "Gaussian" ]
091dea942b9798b421b7c4ad7543520be5ced1b38d0c49be1c691fd764e13b79
# -*- coding: utf-8 -*- # vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4 ############################################################################### # OpenLP - Open Source Lyrics Projection # # --------------------------------------------------------------------------- # # Copyright (c) 2008-2013 Raoul Snyman # # Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan # # Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, # # Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. # # Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, # # Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, # # Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, # # Frode Woldsund, Martin Zibricky, Patrick Zimmermann # # --------------------------------------------------------------------------- # # This program is free software; you can redistribute it and/or modify it # # under the terms of the GNU General Public License as published by the Free # # Software Foundation; version 2 of the License. # # # # This program is distributed in the hope that it will be useful, but WITHOUT # # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # # more details. # # # # You should have received a copy of the GNU General Public License along # # with this program; if not, write to the Free Software Foundation, Inc., 59 # # Temple Place, Suite 330, Boston, MA 02111-1307 USA # ############################################################################### from PyQt4 import QtCore, QtGui from openlp.core.lib import UiStrings, build_icon from openlp.core.lib.ui import create_button_box from openlp.plugins.songs.lib.ui import SongStrings class Ui_SongMaintenanceDialog(object): def setupUi(self, songMaintenanceDialog): songMaintenanceDialog.setObjectName(u'songMaintenanceDialog') songMaintenanceDialog.setWindowModality(QtCore.Qt.ApplicationModal) songMaintenanceDialog.resize(10, 350) self.dialogLayout = QtGui.QGridLayout(songMaintenanceDialog) self.dialogLayout.setObjectName(u'dialog_layout') self.typeListWidget = QtGui.QListWidget(songMaintenanceDialog) self.typeListWidget.setIconSize(QtCore.QSize(32, 32)) self.typeListWidget.setUniformItemSizes(True) self.typeListWidget.setObjectName(u'typeListWidget') self.listItemAuthors = QtGui.QListWidgetItem(self.typeListWidget) self.listItemAuthors.setIcon(build_icon(u':/songs/author_maintenance.png')) self.listItemTopics = QtGui.QListWidgetItem(self.typeListWidget) self.listItemTopics.setIcon(build_icon(u':/songs/topic_maintenance.png')) self.listItemBooks = QtGui.QListWidgetItem(self.typeListWidget) self.listItemBooks.setIcon(build_icon(u':/songs/book_maintenance.png')) self.dialogLayout.addWidget(self.typeListWidget, 0, 0) self.stackedLayout = QtGui.QStackedLayout() self.stackedLayout.setObjectName(u'stackedLayout') # authors page self.authorsPage = QtGui.QWidget(songMaintenanceDialog) self.authorsPage.setObjectName(u'authorsPage') self.authorsLayout = QtGui.QVBoxLayout(self.authorsPage) self.authorsLayout.setObjectName(u'authorsLayout') self.authorsListWidget = QtGui.QListWidget(self.authorsPage) self.authorsListWidget.setObjectName(u'authorsListWidget') self.authorsLayout.addWidget(self.authorsListWidget) self.authorsButtonsLayout = QtGui.QHBoxLayout() self.authorsButtonsLayout.setObjectName(u'authorsButtonsLayout') self.authorsButtonsLayout.addStretch() self.authorsAddButton = QtGui.QPushButton(self.authorsPage) self.authorsAddButton.setIcon(build_icon(u':/songs/author_add.png')) self.authorsAddButton.setObjectName(u'authorsAddButton') self.authorsButtonsLayout.addWidget(self.authorsAddButton) self.authorsEditButton = QtGui.QPushButton(self.authorsPage) self.authorsEditButton.setIcon(build_icon(u':/songs/author_edit.png')) self.authorsEditButton.setObjectName(u'authorsEditButton') self.authorsButtonsLayout.addWidget(self.authorsEditButton) self.authorsDeleteButton = QtGui.QPushButton(self.authorsPage) self.authorsDeleteButton.setIcon(build_icon(u':/songs/author_delete.png')) self.authorsDeleteButton.setObjectName(u'authorsDeleteButton') self.authorsButtonsLayout.addWidget(self.authorsDeleteButton) self.authorsLayout.addLayout(self.authorsButtonsLayout) self.stackedLayout.addWidget(self.authorsPage) # topics page self.topicsPage = QtGui.QWidget(songMaintenanceDialog) self.topicsPage.setObjectName(u'topicsPage') self.topicsLayout = QtGui.QVBoxLayout(self.topicsPage) self.topicsLayout.setObjectName(u'topicsLayout') self.topicsListWidget = QtGui.QListWidget(self.topicsPage) self.topicsListWidget.setObjectName(u'topicsListWidget') self.topicsLayout.addWidget(self.topicsListWidget) self.topicsButtonsLayout = QtGui.QHBoxLayout() self.topicsButtonsLayout.setObjectName(u'topicsButtonLayout') self.topicsButtonsLayout.addStretch() self.topicsAddButton = QtGui.QPushButton(self.topicsPage) self.topicsAddButton.setIcon(build_icon(u':/songs/topic_add.png')) self.topicsAddButton.setObjectName(u'topicsAddButton') self.topicsButtonsLayout.addWidget(self.topicsAddButton) self.topicsEditButton = QtGui.QPushButton(self.topicsPage) self.topicsEditButton.setIcon(build_icon(u':/songs/topic_edit.png')) self.topicsEditButton.setObjectName(u'topicsEditButton') self.topicsButtonsLayout.addWidget(self.topicsEditButton) self.topicsDeleteButton = QtGui.QPushButton(self.topicsPage) self.topicsDeleteButton.setIcon(build_icon(u':/songs/topic_delete.png')) self.topicsDeleteButton.setObjectName(u'topicsDeleteButton') self.topicsButtonsLayout.addWidget(self.topicsDeleteButton) self.topicsLayout.addLayout(self.topicsButtonsLayout) self.stackedLayout.addWidget(self.topicsPage) # song books page self.booksPage = QtGui.QWidget(songMaintenanceDialog) self.booksPage.setObjectName(u'booksPage') self.booksLayout = QtGui.QVBoxLayout(self.booksPage) self.booksLayout.setObjectName(u'booksLayout') self.booksListWidget = QtGui.QListWidget(self.booksPage) self.booksListWidget.setObjectName(u'booksListWidget') self.booksLayout.addWidget(self.booksListWidget) self.booksButtonsLayout = QtGui.QHBoxLayout() self.booksButtonsLayout.setObjectName(u'booksButtonLayout') self.booksButtonsLayout.addStretch() self.booksAddButton = QtGui.QPushButton(self.booksPage) self.booksAddButton.setIcon(build_icon(u':/songs/book_add.png')) self.booksAddButton.setObjectName(u'booksAddButton') self.booksButtonsLayout.addWidget(self.booksAddButton) self.booksEditButton = QtGui.QPushButton(self.booksPage) self.booksEditButton.setIcon(build_icon(u':/songs/book_edit.png')) self.booksEditButton.setObjectName(u'booksEditButton') self.booksButtonsLayout.addWidget(self.booksEditButton) self.booksDeleteButton = QtGui.QPushButton(self.booksPage) self.booksDeleteButton.setIcon(build_icon(u':/songs/book_delete.png')) self.booksDeleteButton.setObjectName(u'booksDeleteButton') self.booksButtonsLayout.addWidget(self.booksDeleteButton) self.booksLayout.addLayout(self.booksButtonsLayout) self.stackedLayout.addWidget(self.booksPage) # self.dialogLayout.addLayout(self.stackedLayout, 0, 1) self.button_box = create_button_box(songMaintenanceDialog, u'button_box', [u'close']) self.dialogLayout.addWidget(self.button_box, 1, 0, 1, 2) self.retranslateUi(songMaintenanceDialog) self.stackedLayout.setCurrentIndex(0) QtCore.QObject.connect(self.typeListWidget, QtCore.SIGNAL(u'currentRowChanged(int)'), self.stackedLayout.setCurrentIndex) def retranslateUi(self, songMaintenanceDialog): songMaintenanceDialog.setWindowTitle(SongStrings.SongMaintenance) self.listItemAuthors.setText(SongStrings.Authors) self.listItemTopics.setText(SongStrings.Topics) self.listItemBooks.setText(SongStrings.SongBooks) self.authorsAddButton.setText(UiStrings().Add) self.authorsEditButton.setText(UiStrings().Edit) self.authorsDeleteButton.setText(UiStrings().Delete) self.topicsAddButton.setText(UiStrings().Add) self.topicsEditButton.setText(UiStrings().Edit) self.topicsDeleteButton.setText(UiStrings().Delete) self.booksAddButton.setText(UiStrings().Add) self.booksEditButton.setText(UiStrings().Edit) self.booksDeleteButton.setText(UiStrings().Delete) typeListWidth = max(self.fontMetrics().width(SongStrings.Authors), self.fontMetrics().width(SongStrings.Topics), self.fontMetrics().width(SongStrings.SongBooks)) self.typeListWidget.setFixedWidth(typeListWidth + self.typeListWidget.iconSize().width() + 32)
marmyshev/transitions
openlp/plugins/songs/forms/songmaintenancedialog.py
Python
gpl-2.0
9,854
[ "Brian" ]
d308fc4efc8a18b6b8e407571beec8074858b35fcb167114e62b3439c75dc11e
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module implements a core class LammpsData for generating/parsing LAMMPS data file, and other bridging classes to build LammpsData from molecules. This module also implements a subclass CombinedData for merging LammpsData object. Only point particle styles are supported for now (atom_style in angle, atomic, bond, charge, full and molecular only). See the pages below for more info. http://lammps.sandia.gov/doc/atom_style.html http://lammps.sandia.gov/doc/read_data.html """ import itertools import re import warnings from collections import OrderedDict from io import StringIO from pathlib import Path import numpy as np import pandas as pd from monty.dev import deprecated from monty.json import MSONable from monty.serialization import loadfn from pymatgen import yaml from pymatgen.core.periodic_table import Element from pymatgen.core.lattice import Lattice from pymatgen.core.structure import Molecule, Structure from pymatgen.core.operations import SymmOp from pymatgen.util.io_utils import clean_lines __author__ = "Kiran Mathew, Zhi Deng, Tingzheng Hou" __copyright__ = "Copyright 2018, The Materials Virtual Lab" __version__ = "1.0" __maintainer__ = "Zhi Deng" __email__ = "z4deng@eng.ucsd.edu" __date__ = "Aug 1, 2018" MODULE_DIR = Path(__file__).resolve().parent SECTION_KEYWORDS = { "atom": [ "Atoms", "Velocities", "Masses", "Ellipsoids", "Lines", "Triangles", "Bodies", ], "topology": ["Bonds", "Angles", "Dihedrals", "Impropers"], "ff": [ "Pair Coeffs", "PairIJ Coeffs", "Bond Coeffs", "Angle Coeffs", "Dihedral Coeffs", "Improper Coeffs", ], "class2": [ "BondBond Coeffs", "BondAngle Coeffs", "MiddleBondTorsion Coeffs", "EndBondTorsion Coeffs", "AngleTorsion Coeffs", "AngleAngleTorsion Coeffs", "BondBond13 Coeffs", "AngleAngle Coeffs", ], } CLASS2_KEYWORDS = { "Angle Coeffs": ["BondBond Coeffs", "BondAngle Coeffs"], "Dihedral Coeffs": [ "MiddleBondTorsion Coeffs", "EndBondTorsion Coeffs", "AngleTorsion Coeffs", "AngleAngleTorsion Coeffs", "BondBond13 Coeffs", ], "Improper Coeffs": ["AngleAngle Coeffs"], } SECTION_HEADERS = { "Masses": ["mass"], "Velocities": ["vx", "vy", "vz"], "Bonds": ["type", "atom1", "atom2"], "Angles": ["type", "atom1", "atom2", "atom3"], "Dihedrals": ["type", "atom1", "atom2", "atom3", "atom4"], "Impropers": ["type", "atom1", "atom2", "atom3", "atom4"], } ATOMS_HEADERS = { "angle": ["molecule-ID", "type", "x", "y", "z"], "atomic": ["type", "x", "y", "z"], "bond": ["molecule-ID", "type", "x", "y", "z"], "charge": ["type", "q", "x", "y", "z"], "full": ["molecule-ID", "type", "q", "x", "y", "z"], "molecular": ["molecule-ID", "type", "x", "y", "z"], } class LammpsBox(MSONable): """ Object for representing a simulation box in LAMMPS settings. """ def __init__(self, bounds, tilt=None): """ Args: bounds: A (3, 2) array/list of floats setting the boundaries of simulation box. tilt: A (3,) array/list of floats setting the tilt of simulation box. Default to None, i.e., use an orthogonal box. """ bounds_arr = np.array(bounds) assert bounds_arr.shape == ( 3, 2, ), "Expecting a (3, 2) array for bounds," " got {}".format(bounds_arr.shape) self.bounds = bounds_arr.tolist() matrix = np.diag(bounds_arr[:, 1] - bounds_arr[:, 0]) self.tilt = None if tilt is not None: tilt_arr = np.array(tilt) assert tilt_arr.shape == (3,), "Expecting a (3,) array for box_tilt," " got {}".format(tilt_arr.shape) self.tilt = tilt_arr.tolist() matrix[1, 0] = tilt_arr[0] matrix[2, 0] = tilt_arr[1] matrix[2, 1] = tilt_arr[2] self._matrix = matrix def __str__(self): return self.get_string() def __repr__(self): return self.get_string() @property def volume(self): """ Volume of simulation box. """ m = self._matrix return np.dot(np.cross(m[0], m[1]), m[2]) def get_string(self, significant_figures=6): """ Returns the string representation of simulation box in LAMMPS data file format. Args: significant_figures (int): No. of significant figures to output for box settings. Default to 6. Returns: String representation """ ph = "{:.%df}" % significant_figures lines = [] for bound, d in zip(self.bounds, "xyz"): fillers = bound + [d] * 2 bound_format = " ".join([ph] * 2 + [" {}lo {}hi"]) lines.append(bound_format.format(*fillers)) if self.tilt: tilt_format = " ".join([ph] * 3 + [" xy xz yz"]) lines.append(tilt_format.format(*self.tilt)) return "\n".join(lines) def get_box_shift(self, i): """ Calculates the coordinate shift due to PBC. Args: i: A (n, 3) integer array containing the labels for box images of n entries. Returns: Coorindate shift array with the same shape of i """ return np.inner(i, self._matrix.T) def to_lattice(self): """ Converts the simulation box to a more powerful Lattice backend. Note that Lattice is always periodic in 3D space while a simulation box is not necessarily periodic in all dimensions. Returns: Lattice """ return Lattice(self._matrix) def lattice_2_lmpbox(lattice, origin=(0, 0, 0)): """ Converts a lattice object to LammpsBox, and calculates the symmetry operation used. Args: lattice (Lattice): Input lattice. origin: A (3,) array/list of floats setting lower bounds of simulation box. Default to (0, 0, 0). Returns: LammpsBox, SymmOp """ a, b, c = lattice.abc xlo, ylo, zlo = origin xhi = a + xlo m = lattice.matrix xy = np.dot(m[1], m[0] / a) yhi = np.sqrt(b ** 2 - xy ** 2) + ylo xz = np.dot(m[2], m[0] / a) yz = (np.dot(m[1], m[2]) - xy * xz) / (yhi - ylo) zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2) + zlo tilt = None if lattice.is_orthogonal else [xy, xz, yz] rot_matrix = np.linalg.solve([[xhi - xlo, 0, 0], [xy, yhi - ylo, 0], [xz, yz, zhi - zlo]], m) bounds = [[xlo, xhi], [ylo, yhi], [zlo, zhi]] symmop = SymmOp.from_rotation_and_translation(rot_matrix, origin) return LammpsBox(bounds, tilt), symmop class LammpsData(MSONable): """ Object for representing the data in a LAMMPS data file. """ def __init__( self, box, masses, atoms, velocities=None, force_field=None, topology=None, atom_style="full", ): """ This is a low level constructor designed to work with parsed data or other bridging objects (ForceField and Topology). Not recommended to use directly. Args: box (LammpsBox): Simulation box. masses (pandas.DataFrame): DataFrame with one column ["mass"] for Masses section. atoms (pandas.DataFrame): DataFrame with multiple columns for Atoms section. Column names vary with atom_style. velocities (pandas.DataFrame): DataFrame with three columns ["vx", "vy", "vz"] for Velocities section. Optional with default to None. If not None, its index should be consistent with atoms. force_field (dict): Data for force field sections. Optional with default to None. Only keywords in force field and class 2 force field are valid keys, and each value is a DataFrame. topology (dict): Data for topology sections. Optional with default to None. Only keywords in topology are valid keys, and each value is a DataFrame. atom_style (str): Output atom_style. Default to "full". """ if velocities is not None: assert len(velocities) == len(atoms), "Inconsistency found between atoms and velocities" if force_field: all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"] force_field = {k: v for k, v in force_field.items() if k in all_ff_kws} if topology: topology = {k: v for k, v in topology.items() if k in SECTION_KEYWORDS["topology"]} self.box = box self.masses = masses self.atoms = atoms self.velocities = velocities self.force_field = force_field self.topology = topology self.atom_style = atom_style def __str__(self): return self.get_string() def __repr__(self): return self.get_string() @property def structure(self): """ Exports a periodic structure object representing the simulation box. Return: Structure """ masses = self.masses atoms = self.atoms.copy() if "nx" in atoms.columns: atoms.drop(["nx", "ny", "nz"], axis=1, inplace=True) atoms["molecule-ID"] = 1 ld_copy = self.__class__(self.box, masses, atoms) topologies = ld_copy.disassemble()[-1] molecule = topologies[0].sites coords = molecule.cart_coords - np.array(self.box.bounds)[:, 0] species = molecule.species latt = self.box.to_lattice() site_properties = {} if "q" in atoms: site_properties["charge"] = atoms["q"].values if self.velocities is not None: site_properties["velocities"] = self.velocities.values return Structure( latt, species, coords, coords_are_cartesian=True, site_properties=site_properties, ) def get_string(self, distance=6, velocity=8, charge=4): """ Returns the string representation of LammpsData, essentially the string to be written to a file. Support hybrid style coeffs read and write. Args: distance (int): No. of significant figures to output for box settings (bounds and tilt) and atomic coordinates. Default to 6. velocity (int): No. of significant figures to output for velocities. Default to 8. charge (int): No. of significant figures to output for charges. Default to 3. Returns: String representation """ file_template = """Generated by pymatgen.io.lammps.data.LammpsData {stats} {box} {body} """ box = self.box.get_string(distance) body_dict = OrderedDict() body_dict["Masses"] = self.masses types = OrderedDict() types["atom"] = len(self.masses) if self.force_field: all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"] ff_kws = [k for k in all_ff_kws if k in self.force_field] for kw in ff_kws: body_dict[kw] = self.force_field[kw] if kw in SECTION_KEYWORDS["ff"][2:]: types[kw.lower()[:-7]] = len(self.force_field[kw]) body_dict["Atoms"] = self.atoms counts = OrderedDict() counts["atoms"] = len(self.atoms) if self.velocities is not None: body_dict["Velocities"] = self.velocities if self.topology: for kw in SECTION_KEYWORDS["topology"]: if kw in self.topology: body_dict[kw] = self.topology[kw] counts[kw.lower()] = len(self.topology[kw]) all_stats = list(counts.values()) + list(types.values()) stats_template = "{:>%d} {}" % len(str(max(all_stats))) count_lines = [stats_template.format(v, k) for k, v in counts.items()] type_lines = [stats_template.format(v, k + " types") for k, v in types.items()] stats = "\n".join(count_lines + [""] + type_lines) def map_coords(q): return ("{:.%df}" % distance).format(q) def map_velos(q): return ("{:.%df}" % velocity).format(q) def map_charges(q): return ("{:.%df}" % charge).format(q) float_format = "{:.9f}".format float_format_2 = "{:.1f}".format int_format = "{:.0f}".format default_formatters = { "x": map_coords, "y": map_coords, "z": map_coords, "vx": map_velos, "vy": map_velos, "vz": map_velos, "q": map_charges, } coeffsdatatype = loadfn(str(MODULE_DIR / "CoeffsDataType.yaml")) coeffs = {} for style, types in coeffsdatatype.items(): coeffs[style] = {} for type, formatter in types.items(): coeffs[style][type] = {} for coeff, datatype in formatter.items(): if datatype == "int_format": coeffs[style][type][coeff] = int_format elif datatype == "float_format_2": coeffs[style][type][coeff] = float_format_2 else: coeffs[style][type][coeff] = float_format section_template = "{kw}\n\n{df}\n" parts = [] for k, v in body_dict.items(): index = k != "PairIJ Coeffs" if k in [ "Bond Coeffs", "Angle Coeffs", "Dihedral Coeffs", "Improper Coeffs", ]: listofdf = np.array_split(v, len(v.index)) df_string = "" for i, df in enumerate(listofdf): if isinstance(df.iloc[0]["coeff1"], str): try: formatters = { **default_formatters, **coeffs[k][df.iloc[0]["coeff1"]], } except KeyError: formatters = default_formatters line_string = df.to_string( header=False, formatters=formatters, index_names=False, index=index, na_rep="", ) else: line_string = v.to_string( header=False, formatters=default_formatters, index_names=False, index=index, na_rep="", ).splitlines()[i] df_string += line_string.replace("nan", "").rstrip() + "\n" else: df_string = v.to_string( header=False, formatters=default_formatters, index_names=False, index=index, na_rep="", ) parts.append(section_template.format(kw=k, df=df_string)) body = "\n".join(parts) return file_template.format(stats=stats, box=box, body=body) def write_file(self, filename, distance=6, velocity=8, charge=4): """ Writes LammpsData to file. Args: filename (str): Filename. distance (int): No. of significant figures to output for box settings (bounds and tilt) and atomic coordinates. Default to 6. velocity (int): No. of significant figures to output for velocities. Default to 8. charge (int): No. of significant figures to output for charges. Default to 3. """ with open(filename, "w") as f: f.write(self.get_string(distance=distance, velocity=velocity, charge=charge)) def disassemble(self, atom_labels=None, guess_element=True, ff_label="ff_map"): """ Breaks down LammpsData to building blocks (LammpsBox, ForceField and a series of Topology). RESTRICTIONS APPLIED: 1. No complex force field defined not just on atom types, where the same type or equivalent types of topology may have more than one set of coefficients. 2. No intermolecular topologies (with atoms from different molecule-ID) since a Topology object includes data for ONE molecule or structure only. Args: atom_labels ([str]): List of strings (must be different from one another) for labelling each atom type found in Masses section. Default to None, where the labels are automaticaly added based on either element guess or dummy specie assignment. guess_element (bool): Whether to guess the element based on its atomic mass. Default to True, otherwise dummy species "Qa", "Qb", ... will be assigned to various atom types. The guessed or assigned elements will be reflected on atom labels if atom_labels is None, as well as on the species of molecule in each Topology. ff_label (str): Site property key for labeling atoms of different types. Default to "ff_map". Returns: LammpsBox, ForceField, [Topology] """ atoms_df = self.atoms.copy() if "nx" in atoms_df.columns: atoms_df[["x", "y", "z"]] += self.box.get_box_shift(atoms_df[["nx", "ny", "nz"]].values) atoms_df = pd.concat([atoms_df, self.velocities], axis=1) mids = atoms_df.get("molecule-ID") if mids is None: unique_mids = [1] data_by_mols = {1: {"Atoms": atoms_df}} else: unique_mids = np.unique(mids) data_by_mols = {} for k in unique_mids: df = atoms_df[atoms_df["molecule-ID"] == k] data_by_mols[k] = {"Atoms": df} masses = self.masses.copy() masses["label"] = atom_labels unique_masses = np.unique(masses["mass"]) if guess_element: ref_masses = [el.atomic_mass.real for el in Element] diff = np.abs(np.array(ref_masses) - unique_masses[:, None]) atomic_numbers = np.argmin(diff, axis=1) + 1 symbols = [Element.from_Z(an).symbol for an in atomic_numbers] else: symbols = ["Q%s" % a for a in map(chr, range(97, 97 + len(unique_masses)))] for um, s in zip(unique_masses, symbols): masses.loc[masses["mass"] == um, "element"] = s if atom_labels is None: # add unique labels based on elements for el, vc in masses["element"].value_counts().iteritems(): masses.loc[masses["element"] == el, "label"] = ["%s%d" % (el, c) for c in range(1, vc + 1)] assert masses["label"].nunique(dropna=False) == len(masses), "Expecting unique atom label for each type" mass_info = [tuple([r["label"], r["mass"]]) for _, r in masses.iterrows()] nonbond_coeffs, topo_coeffs = None, None if self.force_field: if "PairIJ Coeffs" in self.force_field: nbc = self.force_field["PairIJ Coeffs"] nbc = nbc.sort_values(["id1", "id2"]).drop(["id1", "id2"], axis=1) nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)] elif "Pair Coeffs" in self.force_field: nbc = self.force_field["Pair Coeffs"].sort_index() nonbond_coeffs = [list(t) for t in nbc.itertuples(False, None)] topo_coeffs = {k: [] for k in SECTION_KEYWORDS["ff"][2:] if k in self.force_field} for kw in topo_coeffs.keys(): class2_coeffs = { k: list(v.itertuples(False, None)) for k, v in self.force_field.items() if k in CLASS2_KEYWORDS.get(kw, []) } ff_df = self.force_field[kw] for t in ff_df.itertuples(True, None): d = {"coeffs": list(t[1:]), "types": []} if class2_coeffs: d.update({k: list(v[t[0] - 1]) for k, v in class2_coeffs.items()}) topo_coeffs[kw].append(d) if self.topology: def label_topo(t): return tuple(masses.loc[atoms_df.loc[t, "type"], "label"]) for k, v in self.topology.items(): ff_kw = k[:-1] + " Coeffs" for topo in v.itertuples(False, None): topo_idx = topo[0] - 1 indices = list(topo[1:]) mids = atoms_df.loc[indices]["molecule-ID"].unique() assert len(mids) == 1, ( "Do not support intermolecular topology formed " "by atoms with different molecule-IDs" ) label = label_topo(indices) topo_coeffs[ff_kw][topo_idx]["types"].append(label) if data_by_mols[mids[0]].get(k): data_by_mols[mids[0]][k].append(indices) else: data_by_mols[mids[0]][k] = [indices] if topo_coeffs: for v in topo_coeffs.values(): for d in v: d["types"] = list(set(d["types"])) ff = ForceField(mass_info=mass_info, nonbond_coeffs=nonbond_coeffs, topo_coeffs=topo_coeffs) topo_list = [] for mid in unique_mids: data = data_by_mols[mid] atoms = data["Atoms"] shift = min(atoms.index) type_ids = atoms["type"] species = masses.loc[type_ids, "element"] labels = masses.loc[type_ids, "label"] coords = atoms[["x", "y", "z"]] m = Molecule(species.values, coords.values, site_properties={ff_label: labels.values}) charges = atoms.get("q") velocities = atoms[["vx", "vy", "vz"]] if "vx" in atoms.columns else None topologies = {} for kw in SECTION_KEYWORDS["topology"]: if data.get(kw): topologies[kw] = (np.array(data[kw]) - shift).tolist() topologies = None if not topologies else topologies topo_list.append( Topology( sites=m, ff_label=ff_label, charges=charges, velocities=velocities, topologies=topologies, ) ) return self.box, ff, topo_list @classmethod def from_file(cls, filename, atom_style="full", sort_id=False): """ Constructor that parses a file. Args: filename (str): Filename to read. atom_style (str): Associated atom_style. Default to "full". sort_id (bool): Whether sort each section by id. Default to True. """ with open(filename) as f: lines = f.readlines() kw_pattern = r"|".join(itertools.chain(*SECTION_KEYWORDS.values())) section_marks = [i for i, l in enumerate(lines) if re.search(kw_pattern, l)] parts = np.split(lines, section_marks) float_group = r"([0-9eE.+-]+)" header_pattern = dict() header_pattern["counts"] = r"^\s*(\d+)\s+([a-zA-Z]+)$" header_pattern["types"] = r"^\s*(\d+)\s+([a-zA-Z]+)\s+types$" header_pattern["bounds"] = r"^\s*{}$".format(r"\s+".join([float_group] * 2 + [r"([xyz])lo \3hi"])) header_pattern["tilt"] = r"^\s*{}$".format(r"\s+".join([float_group] * 3 + ["xy xz yz"])) header = {"counts": {}, "types": {}} bounds = {} for l in clean_lines(parts[0][1:]): # skip the 1st line match = None for k, v in header_pattern.items(): match = re.match(v, l) if match: break if match and k in ["counts", "types"]: header[k][match.group(2)] = int(match.group(1)) elif match and k == "bounds": g = match.groups() bounds[g[2]] = [float(i) for i in g[:2]] elif match and k == "tilt": header["tilt"] = [float(i) for i in match.groups()] header["bounds"] = [bounds.get(i, [-0.5, 0.5]) for i in "xyz"] box = LammpsBox(header["bounds"], header.get("tilt")) def parse_section(sec_lines): title_info = sec_lines[0].split("#", 1) kw = title_info[0].strip() sio = StringIO("".join(sec_lines[2:])) # skip the 2nd line if kw.endswith("Coeffs") and not kw.startswith("PairIJ"): df_list = [ pd.read_csv(StringIO(line), header=None, comment="#", delim_whitespace=True) for line in sec_lines[2:] if line.strip() ] df = pd.concat(df_list, ignore_index=True) names = ["id"] + ["coeff%d" % i for i in range(1, df.shape[1])] else: df = pd.read_csv(sio, header=None, comment="#", delim_whitespace=True) if kw == "PairIJ Coeffs": names = ["id1", "id2"] + ["coeff%d" % i for i in range(1, df.shape[1] - 1)] df.index.name = None elif kw in SECTION_HEADERS: names = ["id"] + SECTION_HEADERS[kw] elif kw == "Atoms": names = ["id"] + ATOMS_HEADERS[atom_style] if df.shape[1] == len(names): pass elif df.shape[1] == len(names) + 3: names += ["nx", "ny", "nz"] else: raise ValueError("Format in Atoms section inconsistent" " with atom_style %s" % atom_style) else: raise NotImplementedError("Parser for %s section" " not implemented" % kw) df.columns = names if sort_id: sort_by = "id" if kw != "PairIJ Coeffs" else ["id1", "id2"] df.sort_values(sort_by, inplace=True) if "id" in df.columns: df.set_index("id", drop=True, inplace=True) df.index.name = None return kw, df err_msg = "Bad LAMMPS data format where " body = {} seen_atoms = False for part in parts[1:]: name, section = parse_section(part) if name == "Atoms": seen_atoms = True if ( name in ["Velocities"] + SECTION_KEYWORDS["topology"] and not seen_atoms ): # Atoms must appear earlier than these raise RuntimeError(err_msg + "%s section appears before" " Atoms section" % name) body.update({name: section}) err_msg += "Nos. of {} do not match between header and {} section" assert len(body["Masses"]) == header["types"]["atom"], err_msg.format("atom types", "Masses") atom_sections = ["Atoms", "Velocities"] if "Velocities" in body else ["Atoms"] for s in atom_sections: assert len(body[s]) == header["counts"]["atoms"], err_msg.format("atoms", s) for s in SECTION_KEYWORDS["topology"]: if header["counts"].get(s.lower(), 0) > 0: assert len(body[s]) == header["counts"][s.lower()], err_msg.format(s.lower(), s) items = {k.lower(): body[k] for k in ["Masses", "Atoms"]} items["velocities"] = body.get("Velocities") ff_kws = [k for k in body if k in SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"]] items["force_field"] = {k: body[k] for k in ff_kws} if ff_kws else None topo_kws = [k for k in body if k in SECTION_KEYWORDS["topology"]] items["topology"] = {k: body[k] for k in topo_kws} if topo_kws else None items["atom_style"] = atom_style items["box"] = box return cls(**items) @classmethod def from_ff_and_topologies(cls, box, ff, topologies, atom_style="full"): """ Constructor building LammpsData from a ForceField object and a list of Topology objects. Do not support intermolecular topologies since a Topology object includes data for ONE molecule or structure only. Args: box (LammpsBox): Simulation box. ff (ForceField): ForceField object with data for Masses and force field sections. topologies ([Topology]): List of Topology objects with data for Atoms, Velocities and topology sections. atom_style (str): Output atom_style. Default to "full". """ atom_types = set.union(*[t.species for t in topologies]) assert atom_types.issubset(ff.maps["Atoms"].keys()), "Unknown atom type found in topologies" items = dict(box=box, atom_style=atom_style, masses=ff.masses, force_field=ff.force_field) mol_ids, charges, coords, labels = [], [], [], [] v_collector = [] if topologies[0].velocities else None topo_collector = {"Bonds": [], "Angles": [], "Dihedrals": [], "Impropers": []} topo_labels = {"Bonds": [], "Angles": [], "Dihedrals": [], "Impropers": []} for i, topo in enumerate(topologies): if topo.topologies: shift = len(labels) for k, v in topo.topologies.items(): topo_collector[k].append(np.array(v) + shift + 1) topo_labels[k].extend([tuple([topo.type_by_sites[j] for j in t]) for t in v]) if isinstance(v_collector, list): v_collector.append(topo.velocities) mol_ids.extend([i + 1] * len(topo.sites)) labels.extend(topo.type_by_sites) coords.append(topo.sites.cart_coords) q = [0.0] * len(topo.sites) if not topo.charges else topo.charges charges.extend(q) atoms = pd.DataFrame(np.concatenate(coords), columns=["x", "y", "z"]) atoms["molecule-ID"] = mol_ids atoms["q"] = charges atoms["type"] = list(map(ff.maps["Atoms"].get, labels)) atoms.index += 1 atoms = atoms[ATOMS_HEADERS[atom_style]] velocities = None if v_collector: velocities = pd.DataFrame(np.concatenate(v_collector), columns=SECTION_HEADERS["Velocities"]) velocities.index += 1 topology = {k: None for k, v in topo_labels.items() if len(v) > 0} for k in topology: df = pd.DataFrame(np.concatenate(topo_collector[k]), columns=SECTION_HEADERS[k][1:]) df["type"] = list(map(ff.maps[k].get, topo_labels[k])) if any(pd.isnull(df["type"])): # Throw away undefined topologies warnings.warn("Undefined %s detected and removed" % k.lower()) df.dropna(subset=["type"], inplace=True) df.reset_index(drop=True, inplace=True) df.index += 1 topology[k] = df[SECTION_HEADERS[k]] topology = {k: v for k, v in topology.items() if not v.empty} items.update({"atoms": atoms, "velocities": velocities, "topology": topology}) return cls(**items) @classmethod def from_structure(cls, structure, ff_elements=None, atom_style="charge", is_sort=False): """ Simple constructor building LammpsData from a structure without force field parameters and topologies. Args: structure (Structure): Input structure. ff_elements ([str]): List of strings of elements that must be present due to force field settings but not necessarily in the structure. Default to None. atom_style (str): Choose between "atomic" (neutral) and "charge" (charged). Default to "charge". is_sort (bool): whether to sort sites """ if is_sort: s = structure.get_sorted_structure() else: s = structure.copy() box, symmop = lattice_2_lmpbox(s.lattice) coords = symmop.operate_multi(s.cart_coords) site_properties = s.site_properties if "velocities" in site_properties: velos = np.array(s.site_properties["velocities"]) rot = SymmOp.from_rotation_and_translation(symmop.rotation_matrix) rot_velos = rot.operate_multi(velos) site_properties.update({"velocities": rot_velos}) boxed_s = Structure( box.to_lattice(), s.species, coords, site_properties=site_properties, coords_are_cartesian=True, ) symbols = list(s.symbol_set) if ff_elements: symbols.extend(ff_elements) elements = sorted(Element(el) for el in set(symbols)) mass_info = [tuple([i.symbol] * 2) for i in elements] ff = ForceField(mass_info) topo = Topology(boxed_s) return cls.from_ff_and_topologies(box=box, ff=ff, topologies=[topo], atom_style=atom_style) @classmethod def from_dict(cls, d): """ Constructor that reads in a dictionary. Args: d (dict): Dictionary to read. """ def decode_df(s): return pd.read_json(s, orient="split") items = dict() items["box"] = LammpsBox.from_dict(d["box"]) items["masses"] = decode_df(d["masses"]) items["atoms"] = decode_df(d["atoms"]) items["atom_style"] = d["atom_style"] velocities = d["velocities"] if velocities: velocities = decode_df(velocities) items["velocities"] = velocities force_field = d["force_field"] if force_field: force_field = {k: decode_df(v) for k, v in force_field.items()} items["force_field"] = force_field topology = d["topology"] if topology: topology = {k: decode_df(v) for k, v in topology.items()} items["topology"] = topology return cls(**items) def as_dict(self): """ Returns the LammpsData as a dict. """ def encode_df(df): return df.to_json(orient="split") d = dict() d["@module"] = self.__class__.__module__ d["@class"] = self.__class__.__name__ d["box"] = self.box.as_dict() d["masses"] = encode_df(self.masses) d["atoms"] = encode_df(self.atoms) d["atom_style"] = self.atom_style d["velocities"] = None if self.velocities is None else encode_df(self.velocities) d["force_field"] = None if not self.force_field else {k: encode_df(v) for k, v in self.force_field.items()} d["topology"] = None if not self.topology else {k: encode_df(v) for k, v in self.topology.items()} return d class Topology(MSONable): """ Class carrying most data in Atoms, Velocities and molecular topology sections for ONE SINGLE Molecule or Structure object, or a plain list of Sites. """ def __init__(self, sites, ff_label=None, charges=None, velocities=None, topologies=None): """ Args: sites ([Site] or SiteCollection): A group of sites in a list or as a Molecule/Structure. ff_label (str): Site property key for labeling atoms of different types. Default to None, i.e., use site.species_string. charges ([q, ...]): Charge of each site in a (n,) array/list, where n is the No. of sites. Default to None, i.e., search site property for charges. velocities ([[vx, vy, vz], ...]): Velocity of each site in a (n, 3) array/list, where n is the No. of sites. Default to None, i.e., search site property for velocities. topologies (dict): Bonds, angles, dihedrals and improper dihedrals defined by site indices. Default to None, i.e., no additional topology. All four valid keys listed below are optional. { "Bonds": [[i, j], ...], "Angles": [[i, j, k], ...], "Dihedrals": [[i, j, k, l], ...], "Impropers": [[i, j, k, l], ...] } """ if not isinstance(sites, (Molecule, Structure)): sites = Molecule.from_sites(sites) if ff_label: type_by_sites = sites.site_properties.get(ff_label) else: type_by_sites = [site.specie.symbol for site in sites] # search for site property if not override if charges is None: charges = sites.site_properties.get("charge") if velocities is None: velocities = sites.site_properties.get("velocities") # validate shape if charges is not None: charge_arr = np.array(charges) assert charge_arr.shape == (len(sites),), "Wrong format for charges" charges = charge_arr.tolist() if velocities is not None: velocities_arr = np.array(velocities) assert velocities_arr.shape == ( len(sites), 3, ), "Wrong format for velocities" velocities = velocities_arr.tolist() if topologies: topologies = {k: v for k, v in topologies.items() if k in SECTION_KEYWORDS["topology"]} self.sites = sites self.ff_label = ff_label self.charges = charges self.velocities = velocities self.topologies = topologies self.type_by_sites = type_by_sites self.species = set(type_by_sites) @classmethod def from_bonding(cls, molecule, bond=True, angle=True, dihedral=True, tol=0.1, **kwargs): """ Another constructor that creates an instance from a molecule. Covalent bonds and other bond-based topologies (angles and dihedrals) can be automatically determined. Cannot be used for non bond-based topologies, e.g., improper dihedrals. Args: molecule (Molecule): Input molecule. bond (bool): Whether find bonds. If set to False, angle and dihedral searching will be skipped. Default to True. angle (bool): Whether find angles. Default to True. dihedral (bool): Whether find dihedrals. Default to True. tol (float): Bond distance tolerance. Default to 0.1. Not recommended to alter. **kwargs: Other kwargs supported by Topology. """ real_bonds = molecule.get_covalent_bonds(tol=tol) bond_list = [list(map(molecule.index, [b.site1, b.site2])) for b in real_bonds] if not all((bond, bond_list)): # do not search for others if not searching for bonds or no bonds return cls(sites=molecule, **kwargs) angle_list, dihedral_list = [], [] dests, freq = np.unique(bond_list, return_counts=True) hubs = dests[np.where(freq > 1)].tolist() bond_arr = np.array(bond_list) if len(hubs) > 0: hub_spokes = {} for hub in hubs: ix = np.any(np.isin(bond_arr, hub), axis=1) bonds = np.unique(bond_arr[ix]).tolist() bonds.remove(hub) hub_spokes[hub] = bonds # skip angle or dihedral searching if too few bonds or hubs dihedral = False if len(bond_list) < 3 or len(hubs) < 2 else dihedral angle = False if len(bond_list) < 2 or len(hubs) < 1 else angle if angle: for k, v in hub_spokes.items(): angle_list.extend([[i, k, j] for i, j in itertools.combinations(v, 2)]) if dihedral: hub_cons = bond_arr[np.all(np.isin(bond_arr, hubs), axis=1)] for i, j in hub_cons.tolist(): ks = [k for k in hub_spokes[i] if k != j] ls = [l for l in hub_spokes[j] if l != i] dihedral_list.extend([[k, i, j, l] for k, l in itertools.product(ks, ls) if k != l]) topologies = { k: v for k, v in zip(SECTION_KEYWORDS["topology"][:3], [bond_list, angle_list, dihedral_list]) if len(v) > 0 } topologies = None if len(topologies) == 0 else topologies return cls(sites=molecule, topologies=topologies, **kwargs) class ForceField(MSONable): """ Class carrying most data in Masses and force field sections. Attributes: masses (pandas.DataFrame): DataFrame for Masses section. force_field (dict): Force field section keywords (keys) and data (values) as DataFrames. maps (dict): Dict for labeling atoms and topologies. """ @staticmethod def _is_valid(df): return not pd.isnull(df).values.any() def __init__(self, mass_info, nonbond_coeffs=None, topo_coeffs=None): """ Args: mass_into (list): List of atomic mass info. Elements, strings (symbols) and floats are all acceptable for the values, with the first two converted to the atomic mass of an element. It is recommended to use OrderedDict.items() to prevent key duplications. [("C", 12.01), ("H", Element("H")), ("O", "O"), ...] nonbond_coeffs [coeffs]: List of pair or pairij coefficients, of which the sequence must be sorted according to the species in mass_dict. Pair or PairIJ determined by the length of list. Optional with default to None. topo_coeffs (dict): Dict with force field coefficients for molecular topologies. Optional with default to None. All four valid keys listed below are optional. Each value is a list of dicts with non optional keys "coeffs" and "types", and related class2 force field keywords as optional keys. { "Bond Coeffs": [{"coeffs": [coeff], "types": [("C", "C"), ...]}, ...], "Angle Coeffs": [{"coeffs": [coeff], "BondBond Coeffs": [coeff], "types": [("H", "C", "H"), ...]}, ...], "Dihedral Coeffs": [{"coeffs": [coeff], "BondBond13 Coeffs": [coeff], "types": [("H", "C", "C", "H"), ...]}, ...], "Improper Coeffs": [{"coeffs": [coeff], "AngleAngle Coeffs": [coeff], "types": [("H", "C", "C", "H"), ...]}, ...], } Topology of same type or equivalent types (e.g., ("C", "H") and ("H", "C") bonds) are NOT ALLOWED to be defined MORE THAN ONCE with DIFFERENT coefficients. """ def map_mass(v): return ( v.atomic_mass.real if isinstance(v, Element) else Element(v).atomic_mass.real if isinstance(v, str) else v ) index, masses, self.mass_info, atoms_map = [], [], [], {} for i, m in enumerate(mass_info): index.append(i + 1) mass = map_mass(m[1]) masses.append(mass) self.mass_info.append((m[0], mass)) atoms_map[m[0]] = i + 1 self.masses = pd.DataFrame({"mass": masses}, index=index) self.maps = {"Atoms": atoms_map} ff_dfs = {} self.nonbond_coeffs = nonbond_coeffs if self.nonbond_coeffs: ff_dfs.update(self._process_nonbond()) self.topo_coeffs = topo_coeffs if self.topo_coeffs: self.topo_coeffs = {k: v for k, v in self.topo_coeffs.items() if k in SECTION_KEYWORDS["ff"][2:]} for k in self.topo_coeffs.keys(): coeffs, mapper = self._process_topo(k) ff_dfs.update(coeffs) self.maps.update(mapper) self.force_field = None if len(ff_dfs) == 0 else ff_dfs def _process_nonbond(self): pair_df = pd.DataFrame(self.nonbond_coeffs) assert self._is_valid(pair_df), "Invalid nonbond coefficients with rows varying in length" npair, ncoeff = pair_df.shape pair_df.columns = ["coeff%d" % i for i in range(1, ncoeff + 1)] nm = len(self.mass_info) ncomb = int(nm * (nm + 1) / 2) if npair == nm: kw = "Pair Coeffs" pair_df.index = range(1, nm + 1) elif npair == ncomb: kw = "PairIJ Coeffs" ids = list(itertools.combinations_with_replacement(range(1, nm + 1), 2)) id_df = pd.DataFrame(ids, columns=["id1", "id2"]) pair_df = pd.concat([id_df, pair_df], axis=1) else: raise ValueError( "Expecting {} Pair Coeffs or " "{} PairIJ Coeffs for {} atom types," " got {}".format(nm, ncomb, nm, npair) ) return {kw: pair_df} def _process_topo(self, kw): def find_eq_types(label, section): if section.startswith("Improper"): label_arr = np.array(label) seqs = [[0, 1, 2, 3], [0, 2, 1, 3], [3, 1, 2, 0], [3, 2, 1, 0]] return [tuple(label_arr[s]) for s in seqs] return [label] + [label[::-1]] main_data, distinct_types = [], [] class2_data = {k: [] for k in self.topo_coeffs[kw][0].keys() if k in CLASS2_KEYWORDS.get(kw, [])} for i, d in enumerate(self.topo_coeffs[kw]): main_data.append(d["coeffs"]) distinct_types.append(d["types"]) for k in class2_data.keys(): class2_data[k].append(d[k]) distinct_types = [set(itertools.chain(*[find_eq_types(t, kw) for t in dt])) for dt in distinct_types] type_counts = sum([len(dt) for dt in distinct_types]) type_union = set.union(*distinct_types) assert len(type_union) == type_counts, "Duplicated items found " "under different coefficients in %s" % kw atoms = set(np.ravel(list(itertools.chain(*distinct_types)))) assert atoms.issubset(self.maps["Atoms"].keys()), "Undefined atom type found in %s" % kw mapper = {} for i, dt in enumerate(distinct_types): for t in dt: mapper[t] = i + 1 def process_data(data): df = pd.DataFrame(data) assert self._is_valid(df), "Invalid coefficients with rows varying in length" n, c = df.shape df.columns = ["coeff%d" % i for i in range(1, c + 1)] df.index = range(1, n + 1) return df all_data = {kw: process_data(main_data)} if class2_data: all_data.update({k: process_data(v) for k, v in class2_data.items()}) return all_data, {kw[:-7] + "s": mapper} def to_file(self, filename): """ Saves object to a file in YAML format. Args: filename (str): Filename. """ d = { "mass_info": self.mass_info, "nonbond_coeffs": self.nonbond_coeffs, "topo_coeffs": self.topo_coeffs, } yml = yaml.YAML(typ="safe") with open(filename, "w") as f: yml.dump(d, f) @classmethod def from_file(cls, filename): """ Constructor that reads in a file in YAML format. Args: filename (str): Filename. """ yml = yaml.YAML(typ="safe") with open(filename, "r") as f: d = yml.load(f) return cls.from_dict(d) @classmethod def from_dict(cls, d): """ Constructor that reads in a dictionary. Args: d (dict): Dictionary to read. """ d["mass_info"] = [tuple(m) for m in d["mass_info"]] if d.get("topo_coeffs"): for v in d["topo_coeffs"].values(): for c in v: c["types"] = [tuple(t) for t in c["types"]] return cls(d["mass_info"], d["nonbond_coeffs"], d["topo_coeffs"]) class CombinedData(LammpsData): """ Object for a collective set of data for a series of LAMMPS data file. velocities not yet implementd. """ def __init__( self, list_of_molecules, list_of_names, list_of_numbers, coordinates, atom_style="full", ): """ Args: list_of_molecules: a list of LammpsData of a single cluster. list_of_names: a list of name for each cluster. list_of_numbers: a list of Integer for counts of each molecule coordinates (pandas.DataFrame): DataFrame with with four columns ["atom", "x", "y", "z"] for coordinates of atoms. atom_style (str): Output atom_style. Default to "full". """ max_xyz = coordinates[["x", "y", "z"]].max().max() min_xyz = coordinates[["x", "y", "z"]].min().min() self.box = LammpsBox(np.array(3 * [[min_xyz - 0.5, max_xyz + 0.5]])) self.atom_style = atom_style self.n = sum(list_of_numbers) self.names = list_of_names self.mols = list_of_molecules self.nums = list_of_numbers self.masses = pd.concat([mol.masses.copy() for mol in self.mols], ignore_index=True) self.masses.index += 1 all_ff_kws = SECTION_KEYWORDS["ff"] + SECTION_KEYWORDS["class2"] ff_kws = [k for k in all_ff_kws if k in self.mols[0].force_field] self.force_field = {} for kw in ff_kws: self.force_field[kw] = pd.concat( [mol.force_field[kw].copy() for mol in self.mols if kw in mol.force_field], ignore_index=True, ) self.force_field[kw].index += 1 self.atoms = pd.DataFrame() mol_count = 0 type_count = 0 for i, mol in enumerate(self.mols): atoms_df = mol.atoms.copy() atoms_df["molecule-ID"] += mol_count atoms_df["type"] += type_count for j in range(self.nums[i]): self.atoms = self.atoms.append(atoms_df, ignore_index=True) atoms_df["molecule-ID"] += 1 type_count += len(mol.masses) mol_count += self.nums[i] self.atoms.index += 1 assert len(self.atoms) == len(coordinates), "Wrong number of coordinates." self.atoms.update(coordinates) self.velocities = None assert self.mols[0].velocities is None, "Velocities not supported" self.topology = {} atom_count = 0 count = {"Bonds": 0, "Angles": 0, "Dihedrals": 0, "Impropers": 0} for i, mol in enumerate(self.mols): for kw in SECTION_KEYWORDS["topology"]: if kw in mol.topology: if kw not in self.topology: self.topology[kw] = pd.DataFrame() topo_df = mol.topology[kw].copy() topo_df["type"] += count[kw] for col in topo_df.columns[1:]: topo_df[col] += atom_count for j in range(self.nums[i]): self.topology[kw] = self.topology[kw].append(topo_df, ignore_index=True) for col in topo_df.columns[1:]: topo_df[col] += len(mol.atoms) count[kw] += len(mol.force_field[kw[:-1] + " Coeffs"]) atom_count += len(mol.atoms) * self.nums[i] for kw in SECTION_KEYWORDS["topology"]: if kw in self.topology: self.topology[kw].index += 1 @classmethod def parse_xyz(cls, filename): """ load xyz file generated from packmol (for those who find it hard to install openbabel) Returns: pandas.DataFrame """ with open(filename) as f: lines = f.readlines() sio = StringIO("".join(lines[2:])) # skip the 2nd line df = pd.read_csv( sio, header=None, comment="#", delim_whitespace=True, names=["atom", "x", "y", "z"], ) df.index += 1 return df @classmethod def from_files(cls, coordinate_file, list_of_numbers, *filenames): """ Constructor that parse a series of data file. Args: coordinate_file (str): The filename of xyz coordinates. list_of_numbers (list): A list of numbers specifying counts for each clusters parsed from files. filenames (str): A series of filenames in string format. """ names = [] mols = [] styles = [] coordinates = cls.parse_xyz(filename=coordinate_file) for i in range(0, len(filenames)): exec("cluster%d = LammpsData.from_file(filenames[i])" % (i + 1)) names.append("cluster%d" % (i + 1)) mols.append(eval("cluster%d" % (i + 1))) styles.append(eval("cluster%d" % (i + 1)).atom_style) style = set(styles) assert len(style) == 1, "Files have different atom styles." return cls.from_lammpsdata(mols, names, list_of_numbers, coordinates, style.pop()) @classmethod def from_lammpsdata(cls, mols, names, list_of_numbers, coordinates, atom_style=None): """ Constructor that can infer atom_style. The input LammpsData objects are used non-destructively. Args: mols: a list of LammpsData of a single cluster. names: a list of name for each cluster. list_of_numbers: a list of Integer for counts of each molecule coordinates (pandas.DataFrame): DataFrame with with four columns ["atom", "x", "y", "z"] for coordinates of atoms. atom_style (str): Output atom_style. Default to "full". """ styles = [] for mol in mols: styles.append(mol.atom_style) style = set(styles) assert len(style) == 1, "Data have different atom_style." style_return = style.pop() if atom_style: assert atom_style == style_return, "Data have different atom_style as specified." return cls(mols, names, list_of_numbers, coordinates, style_return) def get_string(self, distance=6, velocity=8, charge=4): """ Returns the string representation of CombinedData, essentially the string to be written to a file. Combination info is included. Args: distance (int): No. of significant figures to output for box settings (bounds and tilt) and atomic coordinates. Default to 6. velocity (int): No. of significant figures to output for velocities. Default to 8. charge (int): No. of significant figures to output for charges. Default to 3. Returns: String representation """ lines = LammpsData.get_string(self, distance, velocity, charge).splitlines() info = "# " + " + ".join(str(a) + " " + b for a, b in zip(self.nums, self.names)) lines.insert(1, info) return "\n".join(lines) @deprecated( LammpsData.from_structure, "structure_2_lmpdata has been deprecated " "in favor of LammpsData.from_structure", ) def structure_2_lmpdata(structure, ff_elements=None, atom_style="charge", is_sort=False): """ Converts a structure to a LammpsData object with no force field parameters and topologies. Args: structure (Structure): Input structure. ff_elements ([str]): List of strings of elements that must be present due to force field settings but not necessarily in the structure. Default to None. atom_style (str): Choose between "atomic" (neutral) and "charge" (charged). Default to "charge". is_sort (bool): whether to sort the structure sites Returns: LammpsData """ if is_sort: s = structure.get_sorted_structure() else: s = structure.copy() a, b, c = s.lattice.abc m = s.lattice.matrix xhi = a xy = np.dot(m[1], m[0] / xhi) yhi = np.sqrt(b ** 2 - xy ** 2) xz = np.dot(m[2], m[0] / xhi) yz = (np.dot(m[1], m[2]) - xy * xz) / yhi zhi = np.sqrt(c ** 2 - xz ** 2 - yz ** 2) box_bounds = [[0.0, xhi], [0.0, yhi], [0.0, zhi]] box_tilt = [xy, xz, yz] box_tilt = None if not any(box_tilt) else box_tilt box = LammpsBox(box_bounds, box_tilt) new_latt = Lattice([[xhi, 0, 0], [xy, yhi, 0], [xz, yz, zhi]]) s.lattice = new_latt symbols = list(s.symbol_set) if ff_elements: symbols.extend(ff_elements) elements = sorted(Element(el) for el in set(symbols)) mass_info = [tuple([i.symbol] * 2) for i in elements] ff = ForceField(mass_info) topo = Topology(s) return LammpsData.from_ff_and_topologies(box=box, ff=ff, topologies=[topo], atom_style=atom_style)
davidwaroquiers/pymatgen
pymatgen/io/lammps/data.py
Python
mit
58,156
[ "LAMMPS", "pymatgen" ]
6e169d079626eacfa5d33704258f19fe283f01cafe2f956309126d89006ce374
#!/usr/bin/env python import sys import pysam import argparse import random import subprocess import os import bamsurgeon.replacereads as rr import bamsurgeon.aligners as aligners import bamsurgeon.mutation as mutation import bamsurgeon.makevcf as makevcf from operator import itemgetter from bamsurgeon.common import * from uuid import uuid4 from shutil import move from re import sub from multiprocessing import Pool from collections import defaultdict as dd import logging FORMAT = '%(levelname)s %(asctime)s %(message)s' logging.basicConfig(format=FORMAT) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) #sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) #sys.stderr = os.fdopen(sys.stderr.fileno(), 'w', 0) def mut(base, altbase): """ change base to something different """ bases = ('A','T','C','G') base = base.upper() if base not in bases or (altbase is not None and altbase not in ['A','T','G','C']): raise ValueError("ERROR\t" + now() + "\tbase passed to mut(): " + str(base) + " not one of (A,T,C,G)\n") if altbase is not None: return altbase else: alt = base while alt == base: alt = bases[int(random.uniform(0,4))] return alt def countReadCoverage(bam,chrom,start,end): """ calculate coverage of aligned reads over region """ coverage = [] start = int(start) end = int(end) for i in range(end-start+1): coverage.append(0.0) i = 0 if chrom in bam.references: for pcol in bam.pileup(chrom,start,end): n = 0 if pcol.pos >= start and pcol.pos <= end: for read in pcol.pileups: if read.alignment.mapq >= 0 and not read.alignment.is_duplicate: n += 1 coverage[i] = n i += 1 return coverage def replace(origbamfile, mutbamfile, outbamfile, seed=None): ''' open .bam file and call replacereads ''' origbam = pysam.Samfile(origbamfile, 'rb') mutbam = pysam.Samfile(mutbamfile, 'rb') outbam = pysam.Samfile(outbamfile, 'wb', template=origbam) rr.replaceReads(origbam, mutbam, outbam, keepqual=True, seed=seed) origbam.close() mutbam.close() outbam.close() def makemut(args, hc, avoid, alignopts): mutid_list = [] for site in hc: mutid_list.append(site['chrom'] + '_' + str(site['start']) + '_' + str(site['end']) + '_' + str(site['vaf']) + '_' + str(site['altbase'])) if args.seed is not None: random.seed(int(args.seed) + int(hc[0]['start'])) bamfile = pysam.Samfile(args.bamFileName, 'rb') bammate = pysam.Samfile(args.bamFileName, 'rb') # use for mates to avoid iterator problems reffile = pysam.Fastafile(args.refFasta) tmpbams = [] #snvfrac = float(args.snvfrac) chrom = None vaf = None mutpos_list = [] altbase_list = [] for site in hc: if chrom is None: chrom = site['chrom'] else: assert chrom == site['chrom'], "haplotype clusters cannot span multiple chromosomes!" if vaf is None: vaf = site['vaf'] elif vaf != site['vaf']: logger.warning("multiple VAFs for single haplotype, using first encountered VAF: %f" % vaf) mutpos = int(random.uniform(site['start'],site['end']+1)) # position of mutation in genome mutpos_list.append(mutpos) altbase_list.append(site['altbase']) mutbase_list = [] refbase_list = [] mutstr_list = [] for n, mutpos in enumerate(mutpos_list): refbase = reffile.fetch(chrom,mutpos-1,mutpos) altbase = altbase_list[n] refbase_list.append(refbase) if altbase == refbase.upper() and not args.ignoreref: logger.warning("%s specified ALT base matches reference, skipping mutation" % mutid_list[n]) return None try: mutbase = mut(refbase, altbase) mutbase_list.append(mutbase) except ValueError as e: logger.warning(mutid_list[n] + " " + ' '.join(("skipped site:",chrom,str(hc[n]['start']),str(hc[n]['end']),"due to N base:",str(e),"\n"))) return None mutstr_list.append(refbase + "-->" + str(mutbase)) # optional CNV file cnv = None if (args.cnvfile): cnv = pysam.Tabixfile(args.cnvfile, 'r') hapstr = "_".join(('haplo',chrom,str(min(mutpos_list)),str(max(mutpos_list)))) log = open('addsnv_logs_' + os.path.basename(args.outBamFile) + '/' + os.path.basename(args.outBamFile) + "." + hapstr + ".log",'w') tmpoutbamname = args.tmpdir + "/" + hapstr + ".tmpbam." + str(uuid4()) + ".bam" logger.info("%s creating tmp bam: %s" % (hapstr, tmpoutbamname)) outbam_muts = pysam.Samfile(tmpoutbamname, 'wb', template=bamfile) mutfail, hasSNP, maxfrac, outreads, mutreads, mutmates = mutation.mutate(args, log, bamfile, bammate, chrom, min(mutpos_list), max(mutpos_list)+1, mutpos_list, avoid=avoid, mutid_list=mutid_list, is_snv=True, mutbase_list=mutbase_list, reffile=reffile) if mutfail: outbam_muts.close() os.remove(tmpoutbamname) return None # pick reads to change readlist = [] for extqname,read in outreads.items(): if read.seq != mutreads[extqname]: readlist.append(extqname) logger.info("%s len(readlist): %s" % (hapstr, str(len(readlist)))) readlist.sort() random.shuffle(readlist) if len(readlist) < int(args.mindepth): logger.warning("%s too few reads in region (%s) skipping..." % (hapstr, str(len(readlist)))) outbam_muts.close() os.remove(tmpoutbamname) return None if vaf is None: vaf = float(args.mutfrac) # default minor allele freq if not otherwise specified if cnv: # cnv file is present if chrom in cnv.contigs: for cnregion in cnv.fetch(chrom,min(mutpos_list),max(mutpos_list)+1): cn = float(cnregion.strip().split()[3]) # expect chrom,start,end,CN logger.info(hapstr + "\t" + ' '.join(("copy number in snp region:",chrom,str(min(mutpos_list)),str(max(mutpos_list)),"=",str(cn)))) if float(cn) > 0.0: vaf = vaf/float(cn) else: vaf = 0.0 logger.info("%s adjusted VAF: %f" % (hapstr, vaf)) else: logger.info("%s selected VAF: %f" % (hapstr, vaf)) lastread = int(len(readlist)*vaf) # pick at least args.minmutreads if possible if lastread < int(args.minmutreads): if len(readlist) > int(args.minmutreads): lastread = int(args.minmutreads) logger.warning("%s forced %d reads." % (hapstr, lastread)) else: logger.warning("%s dropped site with fewer reads than --minmutreads" % hapstr) os.remove(tmpoutbamname) return None readtrack = dd(list) for readname in readlist: orig_name, readpos, pairend = readname.split(',') readtrack[orig_name].append('%s,%s' % (readpos, pairend)) usedreads = 0 newreadlist = [] for orig_name in readtrack: for read_instance in readtrack[orig_name]: newreadlist.append(orig_name + ',' + read_instance) usedreads += 1 if usedreads >= lastread: break readlist = newreadlist logger.info("%s picked: %d" % (hapstr, len(readlist))) wrote = 0 nmut = 0 mut_out = {} # change reads from .bam to mutated sequences for extqname,read in outreads.items(): if read.seq != mutreads[extqname]: if not args.nomut and extqname in readlist: qual = read.qual # changing seq resets qual (see pysam API docs) read.seq = mutreads[extqname] # make mutation read.qual = qual nmut += 1 if (not hasSNP) or args.force: wrote += 1 mut_out[extqname] = read muts_written = {} for extqname in mut_out: if extqname not in muts_written: outbam_muts.write(mut_out[extqname]) muts_written[extqname] = True if mutmates[extqname] is not None: # is mate also in mutated list? mate_read = mutmates[extqname] pairname = 'F' # read is first in pair if mate_read.is_read2: pairname = 'S' # read is second in pair if not mate_read.is_paired: pairname = 'U' # read is unpaired mateqname = ','.join((mate_read.qname,str(mate_read.pos),pairname)) if mateqname in mut_out: # yes: output mutated mate outbam_muts.write(mut_out[mateqname]) muts_written[mateqname] = True else: # no: output original mate outbam_muts.write(mate_read) logger.info("%s wrote: %d, mutated: %d" % (hapstr,wrote,nmut)) if not hasSNP or args.force: outbam_muts.close() aligners.remap_bam(args.aligner, tmpoutbamname, args.refFasta, alignopts, threads=int(args.alignerthreads), mutid=hapstr, paired=(not args.single), picardjar=args.picardjar, insane=args.insane) outbam_muts = pysam.Samfile(tmpoutbamname,'rb') coverwindow = 1 incover = countReadCoverage(bamfile,chrom,min(mutpos_list)-coverwindow,max(mutpos_list)+coverwindow) outcover = countReadCoverage(outbam_muts,chrom,min(mutpos_list)-coverwindow,max(mutpos_list)+coverwindow) avgincover = float(sum(incover))/float(len(incover)) avgoutcover = float(sum(outcover))/float(len(outcover)) logger.info("%s avgincover: %f, avgoutcover: %f" % (hapstr, avgincover, avgoutcover)) spikein_snvfrac = 0.0 if wrote > 0: spikein_snvfrac = float(nmut)/float(wrote) # qc cutoff for final snv depth if (avgoutcover > 0 and avgincover > 0 and avgoutcover/avgincover >= float(args.coverdiff)) or args.force: tmpbams.append(tmpoutbamname) for n,site in enumerate(hc): snvstr = chrom + ":" + str(site['start']) + "-" + str(site['end']) + " (VAF=" + str(vaf) + ")" log.write("\t".join(("snv",snvstr,str(mutpos_list[n]),mutstr_list[n],str(avgincover),str(avgoutcover),str(spikein_snvfrac),str(maxfrac)))+"\n") else: outbam_muts.close() os.remove(tmpoutbamname) if os.path.exists(tmpoutbamname + '.bai'): os.remove(tmpoutbamname + '.bai') logger.warning("%s dropped for outcover/incover < %s" % (hapstr, str(args.coverdiff))) return None outbam_muts.close() bamfile.close() bammate.close() log.close() return tmpbams def main(args): logger.info("starting %s called with args: %s" % (sys.argv[0], ' '.join(sys.argv))) bedfile = open(args.varFileName, 'r') reffile = pysam.Fastafile(args.refFasta) if not os.path.exists(args.bamFileName + '.bai'): logger.error("input bam must be indexed, not .bai file found for %s" % args.bamFileName) sys.exit(1) alignopts = {} if args.alignopts is not None: alignopts = dict([o.split(':') for o in args.alignopts.split(',')]) aligners.checkoptions(args.aligner, alignopts, args.picardjar) # load readlist to avoid, if specified avoid = None if args.avoidreads is not None: avoid = dictlist(args.avoidreads) # make a temporary file to hold mutated reads outbam_mutsfile = "addsnv." + str(uuid4()) + ".muts.bam" bamfile = pysam.Samfile(args.bamFileName, 'rb') outbam_muts = pysam.Samfile(outbam_mutsfile, 'wb', template=bamfile) outbam_muts.close() bamfile.close() tmpbams = [] if not os.path.exists(args.tmpdir): os.mkdir(args.tmpdir) logger.info("created tmp directory: %s" % args.tmpdir) if not os.path.exists('addsnv_logs_' + os.path.basename(args.outBamFile)): os.mkdir('addsnv_logs_' + os.path.basename(args.outBamFile)) logger.info("created directory: addsnv_logs_%s" % os.path.basename(args.outBamFile)) assert os.path.exists('addsnv_logs_' + os.path.basename(args.outBamFile)), "could not create output directory!" assert os.path.exists(args.tmpdir), "could not create temporary directory!" pool = Pool(processes=int(args.procs)) results = [] ntried = 0 targets = [] for bedline in bedfile: if ntried < int(args.numsnvs) or int(args.numsnvs) == 0: c = bedline.strip().split() target = { 'chrom' : c[0], 'start' : int(c[1]), 'end' : int(c[2]), 'vaf' : None, 'altbase' : None } # VAF is 4th column, if present if len(c) > 3: target['vaf'] = float(c[3]) # ALT is 5th column, if present if len(c) == 5: altbase = c[4].upper() assert altbase in ['A','T','C','G'], "ERROR:\t" + now() + "\tALT " + altbase + " not A, T, C, or G!\n" target['altbase'] = altbase targets.append(target) ntried += 1 targets = sorted(targets, key=itemgetter('chrom', 'start')) # sort list of dicts by chrom, start haploclusters = [] hc = [] lastchrom = None laststart = None hapsize = int(args.haplosize) for target in targets: if lastchrom is None: lastchrom = target['chrom'] laststart = target['start'] hc.append(target) elif target['chrom'] == lastchrom: if laststart is None: laststart = target['start'] hc.append(target) elif target['start'] - laststart < hapsize: hc.append(target) else: haploclusters.append(hc) hc = [] hc.append(target) elif target['chrom'] != lastchrom: haploclusters.append(hc) hc = [] laststart = None hc.append(target) haploclusters.append(hc) for hc in haploclusters: # make mutation (submit job to thread pool) result = pool.apply_async(makemut, [args, hc, avoid, alignopts]) results.append(result) for result in results: tmpbamlist = result.get() if tmpbamlist is not None: for tmpbam in tmpbamlist: if os.path.exists(tmpbam): tmpbams.append(tmpbam) if len(tmpbams) == 0: logger.error("no succesful mutations") sys.exit() # merge tmp bams if len(tmpbams) == 1: move(tmpbams[0],outbam_mutsfile) elif len(tmpbams) > 1: mergebams(tmpbams,outbam_mutsfile,maxopen=int(args.maxopen)) bedfile.close() # cleanup for bam in tmpbams: if os.path.exists(bam): os.remove(bam) if os.path.exists(bam + '.bai'): os.remove(bam + '.bai') if args.skipmerge: logger.info("skipping merge, plase merge reads from %s manually." % outbam_mutsfile) else: if args.tagreads: from bamsurgeon.markreads import markreads tmp_tag_bam = 'tag.%s.bam' % str(uuid4()) markreads(outbam_mutsfile, tmp_tag_bam) move(tmp_tag_bam, outbam_mutsfile) logger.info("tagged reads.") logger.info("done making mutations, merging mutations into %s --> %s" % (args.bamFileName, args.outBamFile)) replace(args.bamFileName, outbam_mutsfile, args.outBamFile, seed=args.seed) #cleanup os.remove(outbam_mutsfile) var_basename = '.'.join(os.path.basename(args.varFileName).split('.')[:-1]) bam_basename = '.'.join(os.path.basename(args.outBamFile).split('.')[:-1]) vcf_fn = bam_basename + '.addsnv.' + var_basename + '.vcf' makevcf.write_vcf_snv('addsnv_logs_' + os.path.basename(args.outBamFile), vcf_fn) logger.info('vcf output written to ' + vcf_fn) def run(): parser = argparse.ArgumentParser(description='adds SNVs to reads, outputs modified reads as .bam along with mates') parser.add_argument('-v', '--varfile', dest='varFileName', required=True, help='Target regions to try and add a SNV, as BED') parser.add_argument('-f', '--bamfile', dest='bamFileName', required=True, help='sam/bam file from which to obtain reads') parser.add_argument('-r', '--reference', dest='refFasta', required=True, help='reference genome, fasta indexed with bwa index _and_ samtools faidx') parser.add_argument('-o', '--outbam', dest='outBamFile', required=True, help='.bam file name for output') parser.add_argument('-s', '--snvfrac', dest='snvfrac', default=1, help='maximum allowable linked SNP MAF (for avoiding haplotypes) (default = 1)') parser.add_argument('-m', '--mutfrac', dest='mutfrac', default=0.5, help='allelic fraction at which to make SNVs (default = 0.5)') parser.add_argument('-n', '--numsnvs', dest='numsnvs', default=0, help="maximum number of mutations to try (default: entire input)") parser.add_argument('-c', '--cnvfile', dest='cnvfile', default=None, help="tabix-indexed list of genome-wide absolute copy number values (e.g. 2 alleles = no change)") parser.add_argument('-d', '--coverdiff', dest='coverdiff', default=0.9, help="allow difference in input and output coverage (default=0.9)") parser.add_argument('-z', '--haplosize', default=0, help='haplotype size (default = 0)') parser.add_argument('-p', '--procs', dest='procs', default=1, help="split into multiple processes (default=1)") parser.add_argument('--picardjar', default=None, help='path to picard.jar, required for most aligners') parser.add_argument('--mindepth', default=10, help='minimum read depth to make mutation (default = 10)') parser.add_argument('--maxdepth', default=2000, help='maximum read depth to make mutation (default = 2000)') parser.add_argument('--minmutreads', default=3, help='minimum number of mutated reads to output per site') parser.add_argument('--avoidreads', default=None, help='file of read names to avoid (mutations will be skipped if overlap)') parser.add_argument('--nomut', action='store_true', default=False, help="dry run") parser.add_argument('--ignoresnps', action='store_true', default=False, help="make mutations even if there are non-reference alleles sharing the relevant reads") parser.add_argument('--ignoreref', action='store_true', default=False, help="make mutations even if the mutation is back to the reference allele") parser.add_argument('--force', action='store_true', default=False, help="force mutation to happen regardless of nearby SNP or low coverage") parser.add_argument('--insane', action='store_true', default=False, help="ignore sanity check enforcing input read count = output read count in realignment") parser.add_argument('--single', action='store_true', default=False, help="input BAM is simgle-ended (default is paired-end)") parser.add_argument('--maxopen', dest='maxopen', default=1000, help="maximum number of open files during merge (default 1000)") parser.add_argument('--requirepaired', action='store_true', default=False, help='skip mutations if unpaired reads are present') parser.add_argument('--tagreads', action='store_true', default=False, help='add BS tag to altered reads') parser.add_argument('--skipmerge', action='store_true', default=False, help="final output is tmp file to be merged") parser.add_argument('--ignorepileup', action='store_true', default=False, help="do not check pileup depth in mutation regions") parser.add_argument('--aligner', default='backtrack', help='supported aligners: ' + ','.join(aligners.supported_aligners_bam)) parser.add_argument('--alignerthreads', default=1, help='threads used per realignment (default = 1)') parser.add_argument('--alignopts', default=None, help='aligner-specific options as comma delimited list of option1:value1,option2:value2,...') parser.add_argument('--tmpdir', default='addsnv.tmp', help='temporary directory (default=addsnv.tmp)') parser.add_argument('--seed', default=None, help='seed random number generation') args = parser.parse_args() if 'BAMSURGEON_PICARD_JAR' in os.environ: args.picardjar = os.environ['BAMSURGEON_PICARD_JAR'] main(args) if __name__ == '__main__': run()
adamewing/bamsurgeon
bin/addsnv.py
Python
mit
20,604
[ "BWA", "pysam" ]
2adc7cce8eaaa74f52b7b4f4d13c3ec15db7dcb107c51a75199045512d3b5d93
# changelog bisection for mercurial # # Copyright 2007 Matt Mackall # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org> # Inspired by git bisect, extension skeleton taken from mq.py. # # This software may be used and distributed according to the terms # of the GNU General Public License, incorporated herein by reference. from i18n import _ from node import short import util def bisect(changelog, state): clparents = changelog.parentrevs skip = dict.fromkeys([changelog.rev(n) for n in state['skip']]) def buildancestors(bad, good): # only the earliest bad revision matters badrev = min([changelog.rev(n) for n in bad]) goodrevs = [changelog.rev(n) for n in good] # build ancestors array ancestors = [[]] * (changelog.count() + 1) # an extra for [-1] # clear good revs from array for node in goodrevs: ancestors[node] = None for rev in xrange(changelog.count(), -1, -1): if ancestors[rev] is None: for prev in clparents(rev): ancestors[prev] = None if ancestors[badrev] is None: return badrev, None return badrev, ancestors good = 0 badrev, ancestors = buildancestors(state['bad'], state['good']) if not ancestors: # looking for bad to good transition? good = 1 badrev, ancestors = buildancestors(state['good'], state['bad']) bad = changelog.node(badrev) if not ancestors: # now we're confused raise util.Abort(_("Inconsistent state, %s:%s is good and bad") % (badrev, short(bad))) # build children dict children = {} visit = [badrev] candidates = [] while visit: rev = visit.pop(0) if ancestors[rev] == []: candidates.append(rev) for prev in clparents(rev): if prev != -1: if prev in children: children[prev].append(rev) else: children[prev] = [rev] visit.append(prev) candidates.sort() # have we narrowed it down to one entry? tot = len(candidates) if tot == 1: return (bad, 0, good) perfect = tot / 2 # find the best node to test best_rev = None best_len = -1 poison = {} for rev in candidates: if rev in poison: for c in children.get(rev, []): poison[c] = True # poison children continue a = ancestors[rev] or [rev] ancestors[rev] = None x = len(a) # number of ancestors y = tot - x # number of non-ancestors value = min(x, y) # how good is this test? if value > best_len and rev not in skip: best_len = value best_rev = rev if value == perfect: # found a perfect candidate? quit early break if y < perfect: # all downhill from here? for c in children.get(rev, []): poison[c] = True # poison children continue for c in children.get(rev, []): if ancestors[c]: ancestors[c] = dict.fromkeys(ancestors[c] + a).keys() else: ancestors[c] = a + [c] assert best_rev is not None best_node = changelog.node(best_rev) return (best_node, tot, good)
carlgao/lenga
images/lenny64-peon/usr/share/python-support/mercurial-common/mercurial/hbisect.py
Python
mit
3,419
[ "VisIt" ]
e979ec80f06d8ff2384e14d7a98cfbd701a24e8df22144f2dd00bdceeac3f5a1
""" Class for outlier detection. This class provides a framework for outlier detection. It consists in several methods that can be added to a covariance estimator in order to assess the outlying-ness of the observations of a data set. Such a "outlier detector" object is proposed constructed from a robust covariance estimator (the Minimum Covariance Determinant). """ # Author: Virgile Fritsch <virgile.fritsch@inria.fr> # # License: BSD 3 clause import numpy as np import scipy as sp from . import MinCovDet from ..base import ClassifierMixin class OutlierDetectionMixin(object): """Set of methods for outliers detection with covariance estimators. Parameters ---------- contamination : float, 0. < contamination < 0.5 The amount of contamination of the data set, i.e. the proportion of outliers in the data set. Notes ----- Outlier detection from covariance estimation may break or not perform well in high-dimensional settings. In particular, one will always take care to work with ``n_samples > n_features ** 2``. """ def __init__(self, contamination=0.1): self.contamination = contamination self.threshold = None def decision_function(self, X, raw_values=False): """Compute the decision function of the given observations. Parameters ---------- X : array-like, shape (n_samples, n_features) raw_values : bool Whether or not to consider raw Mahalanobis distances as the decision function. Must be False (default) for compatibility with the others outlier detection tools. Returns ------- decision : array-like, shape (n_samples, ) The values of the decision function for each observations. It is equal to the Mahalanobis distances if `raw_values` is True. By default (``raw_values=True``), it is equal to the cubic root of the shifted Mahalanobis distances. In that case, the threshold for being an outlier is 0, which ensures a compatibility with other outlier detection tools such as the One-Class SVM. """ mahal_dist = self.mahalanobis(X) if raw_values: decision = mahal_dist else: if self.threshold is None: raise Exception("Please fit data before predicting") transformed_mahal_dist = mahal_dist ** 0.33 decision = self.threshold ** 0.33 - transformed_mahal_dist return decision def predict(self, X): """Outlyingness of observations in X according to the fitted model. Parameters ---------- X : array-like, shape = (n_samples, n_features) Returns ------- is_outliers : array, shape = (n_samples, ), dtype = bool For each observations, tells whether or not it should be considered as an outlier according to the fitted model. threshold : float, The values of the less outlying point's decision function. """ if self.threshold is None: raise Exception("Please fit data before predicting") is_inlier = -np.ones(X.shape[0], dtype=int) if self.contamination is not None: values = self.decision_function(X, raw_values=True) is_inlier[values <= self.threshold] = 1 else: raise NotImplementedError("You must provide a contamination rate.") return is_inlier class EllipticEnvelope(ClassifierMixin, OutlierDetectionMixin, MinCovDet): """An object for detecting outliers in a Gaussian distributed dataset. Attributes ---------- `contamination` : float, 0. < contamination < 0.5 The amount of contamination of the data set, i.e. the proportion of \ outliers in the data set. location_ : array-like, shape (n_features,) Estimated robust location covariance_ : array-like, shape (n_features, n_features) Estimated robust covariance matrix precision_ : array-like, shape (n_features, n_features) Estimated pseudo inverse matrix. (stored only if store_precision is True) support_ : array-like, shape (n_samples,) A mask of the observations that have been used to compute the robust estimates of location and shape. Parameters ---------- store_precision : bool Specify if the estimated precision is stored. assume_centered : Boolean If True, the support of robust location and covariance estimates is computed, and a covariance estimate is recomputed from it, without centering the data. Useful to work with data whose mean is significantly equal to zero but is not exactly zero. If False, the robust location and covariance are directly computed with the FastMCD algorithm without additional treatment. support_fraction : float, 0 < support_fraction < 1 The proportion of points to be included in the support of the raw MCD estimate. Default is ``None``, which implies that the minimum value of support_fraction will be used within the algorithm: `[n_sample + n_features + 1] / 2`. contamination : float, 0. < contamination < 0.5 The amount of contamination of the data set, i.e. the proportion of outliers in the data set. See Also -------- EmpiricalCovariance, MinCovDet Notes ----- Outlier detection from covariance estimation may break or not perform well in high-dimensional settings. In particular, one will always take care to work with ``n_samples > n_features ** 2``. References ---------- .. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the minimum covariance determinant estimator" Technometrics 41(3), 212 (1999) """ def __init__(self, store_precision=True, assume_centered=False, support_fraction=None, contamination=0.1, random_state=None): MinCovDet.__init__(self, store_precision=store_precision, assume_centered=assume_centered, support_fraction=support_fraction, random_state=random_state) OutlierDetectionMixin.__init__(self, contamination=contamination) def fit(self, X, y=None): """ """ MinCovDet.fit(self, X) self.threshold = sp.stats.scoreatpercentile( self.dist_, 100. * (1. - self.contamination)) return self
soulmachine/scikit-learn
sklearn/covariance/outlier_detection.py
Python
bsd-3-clause
6,624
[ "Gaussian" ]
b29cf5f2104caebc04a1dacbf5e033c73dea1a40b15ebb7cc83a82f1d7c3ff4d
from asap3 import * from ase.md.verlet import VelocityVerlet from ase.lattice.cubic import FaceCenteredCubic from ase.io.trajectory import * from numpy import * from asap3.mpi import world import sys, os, time from asap3.testtools import ReportTest #DebugOutput("output.%d") print_version(1) worldsize = world.size ismaster = world.rank == 0 if worldsize == 1: layout = [None] elif worldsize == 2: layout = [(2,1,1), (1,2,1), (1,1,2)] elif worldsize == 3: layout = [(3,1,1), (1,1,3)] elif worldsize == 4: layout = [(1,2,2), (2,1,2), (2,2,1)] elif worldsize == 8: layout = [(2,2,2), (1,2,4)] else: raise ValueError, ("Cannot run on %d CPUs." % (worldsize,)) elements = [29] epsilon = [0.15] sigma = [2.7] if ismaster: initial = FaceCenteredCubic(directions=((1,0,0),(0,1,0),(0,0,1)), size=(40,40,40), symbol="Cu", latticeconstant=1.09*sigma[0]*1.41, pbc=(1,1,0)) momenta = sqrt(2*63.5 * units.kB * 400) * sin(arange(3*len(initial))) momenta.shape = (-1,3) initial.set_momenta(momenta) stdout = sys.stdout print "Number of atoms:", len(initial) else: initial = None stdout = open("/dev/null", "w") for cpulayout in layout: if cpulayout: print >>stdout, "Test with layout "+str(cpulayout) atoms = MakeParallelAtoms(initial, cpulayout) natoms = atoms.get_number_of_atoms() else: print >>stdout, "Serial test" atoms = Atoms(initial) natoms = len(atoms) print "Number of atoms:", natoms temp = atoms.get_kinetic_energy() / (1.5*units.kB*natoms) print >>stdout, "Temp:", temp, "K" ReportTest("Initial temperature", temp, 400.0, 1.0) atoms.set_calculator(LennardJones(elements, epsilon, sigma, -1.0, True)) epot = atoms.get_potential_energy() print >>stdout, "Potential energy:", epot ReportTest("Initial potential energy", epot, -301358.3, 0.5) etot = epot + atoms.get_kinetic_energy() if 0: if cpulayout: traj = ParallelNetCDFTrajectory("parallel.nc", atoms) else: traj = NetCDFTrajectory("serial.nc", atoms) traj.Add("PotentialEnergies") traj.Update() traj.Close() print "Trajectory done" dyn = VelocityVerlet(atoms, 3*units.fs) etot2 = None for i in range(5): dyn.run(15) newetot = atoms.get_potential_energy()+ atoms.get_kinetic_energy() print >>stdout, "Total energy:", newetot temp = atoms.get_kinetic_energy() / (1.5*units.kB*natoms) print >>stdout, "Temp:", temp, "K" if etot2 == None: ReportTest("Total energy (first step)", newetot, etot, 40.0) etot2=newetot else: ReportTest(("Total energy (step %d)" % (i+1,)), newetot, etot2, 20.0) print >>stdout, " *** This test completed ***" ReportTest.Summary()
auag92/n2dm
Asap-3.8.4/Test/parallel/parallelLennardJones.py
Python
mit
2,976
[ "ASE" ]
26c01f6b74e539ccd94b1b8c9d0f3fa0c43b6dde722b6e0fb0b217d1617f0230
import numpy as np import os try: import netCDF4 as netCDF except: import netCDF3 as netCDF import matplotlib.pyplot as plt import time from datetime import datetime from matplotlib.dates import date2num, num2date import pyroms import pyroms_toolbox import _remapping class nctime(object): pass def remap_bdry_uv(src_file, src_grd, dst_grd, dmax=0, cdepth=0, kk=0, dst_dir='./'): # Arctic ystart=690 # get time nctime.long_name = 'time' nctime.units = 'days since 1900-01-01 00:00:00' # time reference "days since 1900-01-01 00:00:00" ref = datetime(1900, 1, 1, 0, 0, 0) ref = date2num(ref) tag = src_file.rsplit('/')[-1].rsplit('_')[2] print("tag:", tag) year = int(tag[:4]) month = int(tag[4:6]) day = int(tag[6:]) time = datetime(year, month, day, 0, 0, 0) time = date2num(time) time = time - ref time = time + 2.5 # 5-day average # get dimensions Mp, Lp = dst_grd.hgrid.mask_rho.shape # create destination file dst_file = src_file.rsplit('/')[-1] dst_fileu = dst_dir + dst_file[:-4] + '_u_bdry_' + dst_grd.name + '.nc' print '\nCreating destination file', dst_fileu if os.path.exists(dst_fileu) is True: os.remove(dst_fileu) pyroms_toolbox.nc_create_roms_file(dst_fileu, dst_grd, nctime) dst_filev = dst_dir + dst_file[:-4] + '_v_bdry_' + dst_grd.name + '.nc' print 'Creating destination file', dst_filev if os.path.exists(dst_filev) is True: os.remove(dst_filev) pyroms_toolbox.nc_create_roms_file(dst_filev, dst_grd, nctime) # open destination file ncu = netCDF.Dataset(dst_fileu, 'a', format='NETCDF3_64BIT') ncv = netCDF.Dataset(dst_filev, 'a', format='NETCDF3_64BIT') #load var cdf = netCDF.Dataset(src_file) src_varu = cdf.variables['vozocrtx'] src_varv = cdf.variables['vomecrty'] #get missing value spval = src_varu._FillValue # ARCTIC2 grid sub-sample src_varu = src_varu[:] src_varv = src_varv[:] print "shape 1", src_varu.shape, src_varv.shape src_varu = np.squeeze(src_varu) src_varv = np.squeeze(src_varv) print "shape 2", src_varu.shape, src_varv.shape src_varu = src_varu[:,np.r_[ystart:np.size(src_varu,1),-1],:] src_varv = src_varv[:,np.r_[ystart:np.size(src_varv,1),-1],:] print "shape 3", src_varu.shape, src_varv.shape # get weights file wts_file_a = 'remap_weights_GLORYS_to_ARCTIC2_bilinear_t_to_rho.nc' wts_file_u = 'remap_weights_GLORYS_to_ARCTIC2_bilinear_u_to_rho.nc' wts_file_v = 'remap_weights_GLORYS_to_ARCTIC2_bilinear_v_to_rho.nc' # build intermediate zgrid zlevel = -src_grd.z_t[::-1,0,0] nzlevel = len(zlevel) dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel) dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord) # create variable in destination file print 'Creating variable u_north' ncu.createVariable('u_north', 'f8', ('ocean_time', 's_rho', 'xi_u'), fill_value=spval) ncu.variables['u_north'].long_name = '3D u-momentum north boundary condition' ncu.variables['u_north'].units = 'meter second-1' ncu.variables['u_north'].field = 'u_north, scalar, series' print 'Creating variable u_south' ncu.createVariable('u_south', 'f8', ('ocean_time', 's_rho', 'xi_u'), fill_value=spval) ncu.variables['u_south'].long_name = '3D u-momentum south boundary condition' ncu.variables['u_south'].units = 'meter second-1' ncu.variables['u_south'].field = 'u_south, scalar, series' print 'Creating variable u_east' ncu.createVariable('u_east', 'f8', ('ocean_time', 's_rho', 'eta_u'), fill_value=spval) ncu.variables['u_east'].long_name = '3D u-momentum east boundary condition' ncu.variables['u_east'].units = 'meter second-1' ncu.variables['u_east'].field = 'u_east, scalar, series' print 'Creating variable u_west' ncu.createVariable('u_west', 'f8', ('ocean_time', 's_rho', 'eta_u'), fill_value=spval) ncu.variables['u_west'].long_name = '3D u-momentum west boundary condition' ncu.variables['u_west'].units = 'meter second-1' ncu.variables['u_west'].field = 'u_east, scalar, series' # create variable in destination file print 'Creating variable ubar_north' ncu.createVariable('ubar_north', 'f8', ('ocean_time', 'xi_u'), fill_value=spval) ncu.variables['ubar_north'].long_name = '2D u-momentum north boundary condition' ncu.variables['ubar_north'].units = 'meter second-1' ncu.variables['ubar_north'].field = 'ubar_north, scalar, series' print 'Creating variable ubar_south' ncu.createVariable('ubar_south', 'f8', ('ocean_time', 'xi_u'), fill_value=spval) ncu.variables['ubar_south'].long_name = '2D u-momentum south boundary condition' ncu.variables['ubar_south'].units = 'meter second-1' ncu.variables['ubar_south'].field = 'ubar_south, scalar, series' print 'Creating variable ubar_east' ncu.createVariable('ubar_east', 'f8', ('ocean_time', 'eta_u'), fill_value=spval) ncu.variables['ubar_east'].long_name = '2D u-momentum east boundary condition' ncu.variables['ubar_east'].units = 'meter second-1' ncu.variables['ubar_east'].field = 'ubar_east, scalar, series' print 'Creating variable ubar_west' ncu.createVariable('ubar_west', 'f8', ('ocean_time', 'eta_u'), fill_value=spval) ncu.variables['ubar_west'].long_name = '2D u-momentum west boundary condition' ncu.variables['ubar_west'].units = 'meter second-1' ncu.variables['ubar_west'].field = 'ubar_east, scalar, series' print 'Creating variable v_north' ncv.createVariable('v_north', 'f8', ('ocean_time', 's_rho', 'xi_v'), fill_value=spval) ncv.variables['v_north'].long_name = '3D v-momentum north boundary condition' ncv.variables['v_north'].units = 'meter second-1' ncv.variables['v_north'].field = 'v_north, scalar, series' print 'Creating variable v_south' ncv.createVariable('v_south', 'f8', ('ocean_time', 's_rho', 'xi_v'), fill_value=spval) ncv.variables['v_south'].long_name = '3D v-momentum south boundary condition' ncv.variables['v_south'].units = 'meter second-1' ncv.variables['v_south'].field = 'v_south, scalar, series' print 'Creating variable v_east' ncv.createVariable('v_east', 'f8', ('ocean_time', 's_rho', 'eta_v'), fill_value=spval) ncv.variables['v_east'].long_name = '3D v-momentum east boundary condition' ncv.variables['v_east'].units = 'meter second-1' ncv.variables['v_east'].field = 'v_east, scalar, series' print 'Creating variable v_west' ncv.createVariable('v_west', 'f8', ('ocean_time', 's_rho', 'eta_v'), fill_value=spval) ncv.variables['v_west'].long_name = '3D v-momentum west boundary condition' ncv.variables['v_west'].units = 'meter second-1' ncv.variables['v_west'].field = 'v_east, scalar, series' print 'Creating variable vbar_north' ncv.createVariable('vbar_north', 'f8', ('ocean_time', 'xi_v'), fill_value=spval) ncv.variables['vbar_north'].long_name = '2D v-momentum north boundary condition' ncv.variables['vbar_north'].units = 'meter second-1' ncv.variables['vbar_north'].field = 'vbar_north, scalar, series' print 'Creating variable vbar_south' ncv.createVariable('vbar_south', 'f8', ('ocean_time', 'xi_v'), fill_value=spval) ncv.variables['vbar_south'].long_name = '2D v-momentum south boundary condition' ncv.variables['vbar_south'].units = 'meter second-1' ncv.variables['vbar_south'].field = 'vbar_south, scalar, series' print 'Creating variable vbar_east' ncv.createVariable('vbar_east', 'f8', ('ocean_time', 'eta_v'), fill_value=spval) ncv.variables['vbar_east'].long_name = '2D v-momentum east boundary condition' ncv.variables['vbar_east'].units = 'meter second-1' ncv.variables['vbar_east'].field = 'vbar_east, scalar, series' print 'Creating variable vbar_west' ncv.createVariable('vbar_west', 'f8', ('ocean_time', 'eta_v'), fill_value=spval) ncv.variables['vbar_west'].long_name = '2D v-momentum west boundary condition' ncv.variables['vbar_west'].units = 'meter second-1' ncv.variables['vbar_west'].field = 'vbar_east, scalar, series' # remaping print 'remapping and rotating u and v from', src_grd.name, \ 'to', dst_grd.name print 'time =', time # flood the grid print 'flood the grid' src_uz = pyroms_toolbox.CGrid_GLORYS.flood(src_varu, src_grd, Cpos='u', \ spval=spval, dmax=dmax, cdepth=cdepth, kk=kk) src_vz = pyroms_toolbox.CGrid_GLORYS.flood(src_varv, src_grd, Cpos='v', \ spval=spval, dmax=dmax, cdepth=cdepth, kk=kk) # horizontal interpolation using scrip weights print 'horizontal interpolation using scrip weights' dst_uz = pyroms.remapping.remap(src_uz, wts_file_u, \ spval=spval) dst_vz = pyroms.remapping.remap(src_vz, wts_file_v, \ spval=spval) # vertical interpolation from standard z level to sigma print 'vertical interpolation from standard z level to sigma' dst_u_north = pyroms.remapping.z2roms(dst_uz[::-1, Mp-2:Mp, 0:Lp], \ dst_grdz, dst_grd, Cpos='rho', spval=spval, \ flood=False, irange=(0,Lp), jrange=(Mp-2,Mp)) dst_u_south = pyroms.remapping.z2roms(dst_uz[::-1, 0:2, 0:Lp], \ dst_grdz, dst_grd, Cpos='rho', spval=spval, \ flood=False, irange=(0,Lp), jrange=(0,2)) dst_u_east = pyroms.remapping.z2roms(dst_uz[::-1, 0:Mp, Lp-2:Lp], \ dst_grdz, dst_grd, Cpos='rho', spval=spval, \ flood=False, irange=(Lp-2,Lp), jrange=(0,Mp)) dst_u_west = pyroms.remapping.z2roms(dst_uz[::-1, 0:Mp, 0:2], \ dst_grdz, dst_grd, Cpos='rho', spval=spval, \ flood=False, irange=(0,2), jrange=(0,Mp)) dst_v_north = pyroms.remapping.z2roms(dst_vz[::-1, Mp-2:Mp, 0:Lp], \ dst_grdz, dst_grd, Cpos='rho', spval=spval, \ flood=False, irange=(0,Lp), jrange=(Mp-2,Mp)) dst_v_south = pyroms.remapping.z2roms(dst_vz[::-1, 0:2, 0:Lp], \ dst_grdz, dst_grd, Cpos='rho', spval=spval, \ flood=False, irange=(0,Lp), jrange=(0,2)) dst_v_east = pyroms.remapping.z2roms(dst_vz[::-1, 0:Mp, Lp-2:Lp], \ dst_grdz, dst_grd, Cpos='rho', spval=spval, \ flood=False, irange=(Lp-2,Lp), jrange=(0,Mp)) dst_v_west = pyroms.remapping.z2roms(dst_vz[::-1, 0:Mp, 0:2], \ dst_grdz, dst_grd, Cpos='rho', spval=spval, \ flood=False, irange=(0,2), jrange=(0,Mp)) # rotate u,v fields src_angle = src_grd.angle src_angle = pyroms.remapping.remap(src_angle, wts_file_a) dst_angle = dst_grd.hgrid.angle_rho angle = dst_angle - src_angle angle = np.tile(angle, (dst_grd.vgrid.N, 1, 1)) U_north = dst_u_north + dst_v_north*1j eitheta_north = np.exp(-1j*angle[:,Mp-2:Mp, 0:Lp]) U_north = U_north * eitheta_north dst_u_north = np.real(U_north) dst_v_north = np.imag(U_north) U_south = dst_u_south + dst_v_south*1j eitheta_south = np.exp(-1j*angle[:,0:2, 0:Lp]) U_south = U_south * eitheta_south dst_u_south = np.real(U_south) dst_v_south = np.imag(U_south) U_east = dst_u_east + dst_v_east*1j eitheta_east = np.exp(-1j*angle[:,0:Mp, Lp-2:Lp]) U_east = U_east * eitheta_east dst_u_east = np.real(U_east) dst_v_east = np.imag(U_east) U_west = dst_u_west + dst_v_west*1j eitheta_west = np.exp(-1j*angle[:,0:Mp, 0:2]) U_west = U_west * eitheta_west dst_u_west = np.real(U_west) dst_v_west = np.imag(U_west) # move back to u,v points dst_u_north = 0.5 * np.squeeze(dst_u_north[:,-1,:-1] + dst_u_north[:,-1,1:]) dst_v_north = 0.5 * np.squeeze(dst_v_north[:,:-1,:] + dst_v_north[:,1:,:]) dst_u_south = 0.5 * np.squeeze(dst_u_south[:,0,:-1] + dst_u_south[:,0,1:]) dst_v_south = 0.5 * np.squeeze(dst_v_south[:,:-1,:] + dst_v_south[:,1:,:]) dst_u_east = 0.5 * np.squeeze(dst_u_east[:,:,:-1] + dst_u_east[:,:,1:]) dst_v_east = 0.5 * np.squeeze(dst_v_east[:,:-1,-1] + dst_v_east[:,1:,-1]) dst_u_west = 0.5 * np.squeeze(dst_u_west[:,:,:-1] + dst_u_west[:,:,1:]) dst_v_west = 0.5 * np.squeeze(dst_v_west[:,:-1,0] + dst_v_west[:,1:,0]) # spval idxu_north = np.where(dst_grd.hgrid.mask_u[-1,:] == 0) idxv_north = np.where(dst_grd.hgrid.mask_v[-1,:] == 0) idxu_south = np.where(dst_grd.hgrid.mask_u[0,:] == 0) idxv_south = np.where(dst_grd.hgrid.mask_v[0,:] == 0) idxu_east = np.where(dst_grd.hgrid.mask_u[:,-1] == 0) idxv_east = np.where(dst_grd.hgrid.mask_v[:,-1] == 0) idxu_west = np.where(dst_grd.hgrid.mask_u[:,0] == 0) idxv_west = np.where(dst_grd.hgrid.mask_v[:,0] == 0) for n in range(dst_grd.vgrid.N): dst_u_north[n, idxu_north[0]] = spval dst_v_north[n, idxv_north[0]] = spval dst_u_south[n, idxu_south[0]] = spval dst_v_south[n, idxv_south[0]] = spval dst_u_east[n, idxu_east[0]] = spval dst_v_east[n, idxv_east[0]] = spval dst_u_west[n, idxu_west[0]] = spval dst_v_west[n, idxv_west[0]] = spval # compute depth average velocity ubar and vbar # get z at the right position z_u_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:-1] + dst_grd.vgrid.z_w[0,:,-1,1:]) z_v_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:] + dst_grd.vgrid.z_w[0,:,-2,:]) z_u_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:-1] + dst_grd.vgrid.z_w[0,:,0,1:]) z_v_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:] + dst_grd.vgrid.z_w[0,:,1,:]) z_u_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:,-1] + dst_grd.vgrid.z_w[0,:,:,-2]) z_v_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,-1] + dst_grd.vgrid.z_w[0,:,1:,-1]) z_u_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:,0] + dst_grd.vgrid.z_w[0,:,:,1]) z_v_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,0] + dst_grd.vgrid.z_w[0,:,1:,0]) dst_ubar_north = np.zeros(dst_u_north.shape[1]) dst_ubar_south = np.zeros(dst_u_south.shape[1]) dst_ubar_east = np.zeros(dst_u_east.shape[1]) dst_ubar_west = np.zeros(dst_u_west.shape[1]) dst_vbar_north = np.zeros(dst_v_north.shape[1]) dst_vbar_south = np.zeros(dst_v_south.shape[1]) dst_vbar_east = np.zeros(dst_v_east.shape[1]) dst_vbar_west = np.zeros(dst_v_west.shape[1]) for i in range(dst_u_north.shape[1]): dst_ubar_north[i] = (dst_u_north[:,i] * np.diff(z_u_north[:,i])).sum() / -z_u_north[0,i] dst_ubar_south[i] = (dst_u_south[:,i] * np.diff(z_u_south[:,i])).sum() / -z_u_south[0,i] for i in range(dst_v_north.shape[1]): dst_vbar_north[i] = (dst_v_north[:,i] * np.diff(z_v_north[:,i])).sum() / -z_v_north[0,i] dst_vbar_south[i] = (dst_v_south[:,i] * np.diff(z_v_south[:,i])).sum() / -z_v_south[0,i] for j in range(dst_u_east.shape[1]): dst_ubar_east[j] = (dst_u_east[:,j] * np.diff(z_u_east[:,j])).sum() / -z_u_east[0,j] dst_ubar_west[j] = (dst_u_west[:,j] * np.diff(z_u_west[:,j])).sum() / -z_u_west[0,j] for j in range(dst_v_east.shape[1]): dst_vbar_east[j] = (dst_v_east[:,j] * np.diff(z_v_east[:,j])).sum() / -z_v_east[0,j] dst_vbar_west[j] = (dst_v_west[:,j] * np.diff(z_v_west[:,j])).sum() / -z_v_west[0,j] #mask dst_ubar_north = np.ma.masked_where(dst_grd.hgrid.mask_u[-1,:] == 0, dst_ubar_north) dst_ubar_south = np.ma.masked_where(dst_grd.hgrid.mask_u[0,:] == 0, dst_ubar_south) dst_ubar_east = np.ma.masked_where(dst_grd.hgrid.mask_u[:,-1] == 0, dst_ubar_east) dst_ubar_west = np.ma.masked_where(dst_grd.hgrid.mask_u[:,0] == 0, dst_ubar_west) dst_vbar_north = np.ma.masked_where(dst_grd.hgrid.mask_v[-1,:] == 0, dst_vbar_north) dst_vbar_south = np.ma.masked_where(dst_grd.hgrid.mask_v[0,:] == 0, dst_vbar_south) dst_vbar_east = np.ma.masked_where(dst_grd.hgrid.mask_v[:,-1] == 0, dst_vbar_east) dst_vbar_west = np.ma.masked_where(dst_grd.hgrid.mask_v[:,0] == 0, dst_vbar_west) # write data in destination file print 'write data in destination file' ncu.variables['ocean_time'][0] = time ncu.variables['u_north'][0] = dst_u_north ncu.variables['u_south'][0] = dst_u_south ncu.variables['u_east'][0] = dst_u_east ncu.variables['u_west'][0] = dst_u_west ncu.variables['ubar_north'][0] = dst_ubar_north ncu.variables['ubar_south'][0] = dst_ubar_south ncu.variables['ubar_east'][0] = dst_ubar_east ncu.variables['ubar_west'][0] = dst_ubar_west ncv.variables['ocean_time'][0] = time ncv.variables['v_north'][0] = dst_v_north ncv.variables['v_south'][0] = dst_v_south ncv.variables['v_east'][0] = dst_v_east ncv.variables['v_west'][0] = dst_v_west ncv.variables['vbar_north'][0] = dst_vbar_north ncv.variables['vbar_south'][0] = dst_vbar_south ncv.variables['vbar_east'][0] = dst_vbar_east ncv.variables['vbar_west'][0] = dst_vbar_west # print dst_u.shape # print dst_ubar.shape # print dst_v.shape # print dst_vbar.shape # close file ncu.close() ncv.close() cdf.close()
kshedstrom/pyroms
examples/Arctic_GLORYS/remap_bdry_uv.py
Python
bsd-3-clause
17,288
[ "NetCDF" ]
bfd1610d04d8658dc805478679bdc6ce70b2c2b0d7d2a94c437fe80a6504da04
def agts(queue): setups = queue.add('setups.py') run = queue.add('run.py', ncpus=8, walltime=25, deps=[setups]) dks = queue.add('dks.py', ncpus=8, walltime=25, deps=[setups]) box = queue.add('h2o_xas_box1.py', ncpus=8, walltime=25, deps=[setups]) queue.add('submit.agts.py', deps=[run, dks, box], creates=['xas_h2o_spectrum.png', 'h2o_xas_box.png'], show=['xas_h2o_spectrum.png', 'h2o_xas_box.png']) if __name__ == '__main__': from gpaw.test import equal execfile('plot.py') e_dks = float(open('dks.py.output').readline().split()[2]) equal(e_dks, 532.774, 0.001) execfile('h2o_xas_box2.py')
qsnake/gpaw
doc/tutorials/xas/submit.agts.py
Python
gpl-3.0
660
[ "GPAW" ]
d357aec5a8d5373b56aa8d01eb1d1976447351e545a84eed0549f9bd4c29eebf
from common import Modules, load_yara_rules, PHPParseModule, ModuleMetadata from re import compile as recompile, MULTILINE from urllib import urlencode class pbot(PHPParseModule): def __init__(self): md = ModuleMetadata( module_name="pbot", bot_name="pBot", description="PHP IRC bot which can be used to drop other malware, spread and launch denial of service " "attacks", authors=["Brian Wallace (@botnet_hunter)"], version="1.0.0", date="March 14, 2014", references=[] ) PHPParseModule.__init__(self, md) self.yara_rules = None pass def _generate_yara_rules(self): if self.yara_rules is None: self.yara_rules = load_yara_rules("pbot.yara") return self.yara_rules def get_config_values(self, config): try: p = recompile(r'[\'"](?P<key>[^\'"]+)[\'"][\s]*=>[\s]*[\'"](?P<value>[^\'"]+)[\'"]', MULTILINE) results = p.findall(config) ret = {} for pair in results: ret[unicode(pair[0], errors='ignore')] = unicode(pair[1], errors='ignore') return ret except: return {} def get_bot_information(self, file_data): ret = {} try: p = recompile(r'var[\s]+\$config[\s]*=[\s]*array[\s]*\([\s]*(\"[^\"]*\"[\s]*=>.*,?[\s]*)*(//)?\);', MULTILINE) result = p.search(file_data) if result is None: return {} ret = self.get_config_values(result.group(0)) uris = [] server = ret['server'] if 'server' in ret else None server_pass = ret['pass'] if "pass" in ret else None port = int(ret['port']) if 'port' in ret else 6667 chan = ret['chan'] if 'chan' in ret else None chan2 = ret['chan2'] if 'chan2' in ret else None key = ret['key'] if 'key' in ret else server_pass uris.append("pbot://{0}:{1}/?{2}".format(server, port, urlencode({"server_pass": server_pass, "chan": chan, "channel_pass": key}))) if chan2 is not None: uris.append("pbot://{0}:{1}/?{2}".format(server, port, urlencode({"server_pass": server_pass, "chan": chan2, "channel_pass": key}))) ret['c2s'] = [] for uri in uris: ret['c2s'].append({"c2_uri": uri}) except KeyboardInterrupt: raise except: pass return ret Modules.list.append(pbot())
bwall/bamfdetect
BAMF_Detect/modules/pbot.py
Python
mit
2,741
[ "Brian" ]
393c1bacd4c0d98b6fa8dbbb054461750866a644fc04de526b6ec9e8b5f92f40
# Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. import json import os import unittest from monty.json import MontyDecoder from pymatgen.alchemy.filters import ( ContainsSpecieFilter, RemoveDuplicatesFilter, RemoveExistingFilter, SpecieProximityFilter, ) from pymatgen.alchemy.transmuters import StandardTransmuter from pymatgen.analysis.structure_matcher import StructureMatcher from pymatgen.core.lattice import Lattice from pymatgen.core.periodic_table import Species from pymatgen.core.structure import Structure from pymatgen.util.testing import PymatgenTest class ContainsSpecieFilterTest(PymatgenTest): def test_filtering(self): coords = [[0, 0, 0], [0.75, 0.75, 0.75], [0.5, 0.5, 0.5], [0.25, 0.25, 0.25]] lattice = Lattice([[3.0, 0.0, 0.0], [1.0, 3.0, 0.00], [0.00, -2.0, 3.0]]) s = Structure( lattice, [ {"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25}, {"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25}, {"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25}, {"Si4+": 0.5, "O2-": 0.25, "P5+": 0.25}, ], coords, ) species1 = [Species("Si", 5), Species("Mg", 2)] f1 = ContainsSpecieFilter(species1, strict_compare=True, AND=False) self.assertFalse(f1.test(s), "Incorrect filter") f2 = ContainsSpecieFilter(species1, strict_compare=False, AND=False) self.assertTrue(f2.test(s), "Incorrect filter") species2 = [Species("Si", 4), Species("Mg", 2)] f3 = ContainsSpecieFilter(species2, strict_compare=True, AND=False) self.assertTrue(f3.test(s), "Incorrect filter") f4 = ContainsSpecieFilter(species2, strict_compare=False, AND=False) self.assertTrue(f4.test(s), "Incorrect filter") species3 = [Species("Si", 5), Species("O", -2)] f5 = ContainsSpecieFilter(species3, strict_compare=True, AND=True) self.assertFalse(f5.test(s), "Incorrect filter") f6 = ContainsSpecieFilter(species3, strict_compare=False, AND=True) self.assertTrue(f6.test(s), "Incorrect filter") species4 = [Species("Si", 4), Species("Mg", 2)] f7 = ContainsSpecieFilter(species4, strict_compare=True, AND=True) self.assertFalse(f7.test(s), "Incorrect filter") f8 = ContainsSpecieFilter(species4, strict_compare=False, AND=True) self.assertFalse(f8.test(s), "Incorrect filter") def test_to_from_dict(self): species1 = ["Si5+", "Mg2+"] f1 = ContainsSpecieFilter(species1, strict_compare=True, AND=False) d = f1.as_dict() self.assertIsInstance(ContainsSpecieFilter.from_dict(d), ContainsSpecieFilter) class SpecieProximityFilterTest(PymatgenTest): def test_filter(self): s = self.get_structure("Li10GeP2S12") sf = SpecieProximityFilter({"Li": 1}) self.assertTrue(sf.test(s)) sf = SpecieProximityFilter({"Li": 2}) self.assertFalse(sf.test(s)) sf = SpecieProximityFilter({"P": 1}) self.assertTrue(sf.test(s)) sf = SpecieProximityFilter({"P": 5}) self.assertFalse(sf.test(s)) def test_to_from_dict(self): sf = SpecieProximityFilter({"Li": 1}) d = sf.as_dict() self.assertIsInstance(SpecieProximityFilter.from_dict(d), SpecieProximityFilter) class RemoveDuplicatesFilterTest(unittest.TestCase): def setUp(self): with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "TiO2_entries.json")) as fp: entries = json.load(fp, cls=MontyDecoder) self._struct_list = [e.structure for e in entries] self._sm = StructureMatcher() def test_filter(self): transmuter = StandardTransmuter.from_structures(self._struct_list) fil = RemoveDuplicatesFilter() transmuter.apply_filter(fil) self.assertEqual(len(transmuter.transformed_structures), 11) def test_to_from_dict(self): fil = RemoveDuplicatesFilter() d = fil.as_dict() self.assertIsInstance(RemoveDuplicatesFilter().from_dict(d), RemoveDuplicatesFilter) class RemoveExistingFilterTest(unittest.TestCase): def setUp(self): with open(os.path.join(PymatgenTest.TEST_FILES_DIR, "TiO2_entries.json")) as fp: entries = json.load(fp, cls=MontyDecoder) self._struct_list = [e.structure for e in entries] self._sm = StructureMatcher() self._exisiting_structures = self._struct_list[:-1] def test_filter(self): fil = RemoveExistingFilter(self._exisiting_structures) transmuter = StandardTransmuter.from_structures(self._struct_list) transmuter.apply_filter(fil) self.assertEqual(len(transmuter.transformed_structures), 1) self.assertTrue( self._sm.fit( self._struct_list[-1], transmuter.transformed_structures[-1].final_structure, ) ) if __name__ == "__main__": # import sys;sys.argv = ['', 'Test.testName'] unittest.main()
vorwerkc/pymatgen
pymatgen/alchemy/tests/test_filters.py
Python
mit
5,078
[ "pymatgen" ]
41ddd91533cf06db02f0da6bd3523744efd35e463c6fd2b1219632918bf1179e
### Refer to Chulkov and Echenique, PRB 67, 245402 (2003) for comparison of results ### import numpy as np import sys import time from math import sqrt from ase import Atoms, Atom from ase.visualize import view from ase.units import Bohr from ase.lattice.surface import * from ase.parallel import paropen from gpaw import GPAW from gpaw.mpi import serial_comm, rank, size from gpaw.utilities import devnull from gpaw.response.df import DF if rank != 0: sys.stdout = devnull GS = 1 EELS = 1 check = 1 nband = 30 if GS: kpts = (64,64,1) atoms = hcp0001('Be',size=(1,1,1)) atoms.cell[2][2] = (21.2) atoms.set_pbc(True) atoms.center(axis=2) view(atoms) calc = GPAW( gpts=(12,12,108), xc='LDA', txt='be.txt', kpts=kpts, basis='dzp', nbands=nband+5, parallel={'domain':1, 'band':1}, convergence={'bands':nband}, eigensolver = 'cg', width=0.1) atoms.set_calculator(calc) atoms.get_potential_energy() if EELS: for i in range(1, 2): w = np.linspace(0, 15, 301) q = np.array([-i/64., i/64., 0.]) # Gamma - K ecut = 40 + i*10 df = DF(calc=calc, q=q, w=w, eta=0.05, ecut = ecut, txt='df_' + str(i) + '.out') df.get_surface_response_function(z0=21.2/2, filename='be_EELS') df.get_EELS_spectrum() df.check_sum_rule() df.write('df_' + str(i) + '.pckl') if check: d = np.loadtxt('be_EELS') wpeak1 = 2.50 # eV wpeak2 = 9.95 Nw1 = 50 Nw2 = 199 if (d[Nw1, 1] > d[Nw1-1, 1] and d[Nw1, 1] > d[Nw1+1, 1] and d[Nw2, 1] > d[Nw2-1, 1] and d[Nw2, 1] > d[Nw2+1, 1]): pass else: raise ValueError('Plasmon peak not correct ! ') if (np.abs(d[Nw1, 1] - 10.1346526489) > 1e-5 or np.abs(d[Nw2, 1] - 2.17958316492 ) > 1e-5): print d[Nw1, 1], d[Nw, 1] raise ValueError('Please check spectrum strength ! ')
qsnake/gpaw
gpaw/test/big/response/be_1ml_surf_response.py
Python
gpl-3.0
2,081
[ "ASE", "GPAW" ]
105cbca4cb3dbbcf30cb5db0fc95e7d96cba4eb6ce7e437d7d122c8f233d4732
# Copyright (C) 2014 by Per Unneberg # pylint: disable=R0904 import os import unittest import logging from collections import OrderedDict, Counter from nose.tools import raises from snakemakelib.report.picard import PicardMetrics, PicardHistMetrics, AlignMetrics, InsertMetrics, HsMetrics, DuplicationMetrics, _read_picard_metrics, combine_metrics, DataFrame, _make_unique import pytest logger = logging.getLogger(__name__) @pytest.fixture(autouse=True) def setUp(): """Set up test fixtures for metrics test""" global PM, PH, AMa, IMa, DMa, HMa, AMf, IMf, DMf, HMf, align_metrics, dup_metrics, insert_metrics, hs_metrics, alnmet, insmet, dupmet, hsmet, inshist, duphist, Adf, Idf, Idfh, Ddf, Ddfh, Hdf metricsroot = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, 'data', 'metrics', 'J.Doe_00_01') metricsfiles = [] for root, dirs, files in os.walk(metricsroot): metricsfiles += [os.path.join(root, x) for x in files if x.endswith('metrics')] align_metrics = sorted([x for x in metricsfiles if x.endswith('align_metrics')]) hs_metrics = sorted([x for x in metricsfiles if x.endswith('hs_metrics')]) insert_metrics = sorted([x for x in metricsfiles if x.endswith('insert_metrics')]) dup_metrics = sorted([x for x in metricsfiles if x.endswith('dup_metrics')]) (alnmet, _) = _read_picard_metrics(align_metrics[0]) (insmet, inshist) = _read_picard_metrics(insert_metrics[0]) (hsmet, _) = _read_picard_metrics(hs_metrics[0]) (dupmet, duphist) = _read_picard_metrics(dup_metrics[0]) PM = PicardMetrics(identifier="PM", filename=align_metrics[0]) PH = PicardHistMetrics(identifier="PH", filename=insert_metrics[0]) AMf = AlignMetrics(identifier="AM", filename=align_metrics[0]) IMf = InsertMetrics(identifier="IM", filename=insert_metrics[0]) HMf = HsMetrics(identifier="HM", filename=hs_metrics[0]) DMf = DuplicationMetrics(identifier="DM", filename=dup_metrics[0]) AMa = AlignMetrics(*alnmet, identifier="AM") IMa = InsertMetrics(*insmet, hist=inshist, identifier="IM") HMa = HsMetrics(*hsmet, identifier="HM") DMa = DuplicationMetrics(*dupmet, hist=duphist, identifier="DM") Adf = DataFrame(*alnmet) Idf = DataFrame(*insmet) Idfh = DataFrame(*inshist) Ddf = DataFrame(*dupmet) Ddfh = DataFrame(*duphist) Hdf = DataFrame(*hsmet) class TestDataFrame(unittest.TestCase): """Test DataFrame object""" def test_dataframe_init(self): """Test initialization of DataFrame""" df = DataFrame(*alnmet) self.assertListEqual(df.colnames[0:3], ['CATEGORY', 'TOTAL_READS', 'PF_READS']) self.assertTupleEqual((3,25), df.dim) df = DataFrame(*inshist) self.assertTupleEqual((257,2), df.dim) self.assertListEqual(df.colnames, ['insert_size', 'All_Reads.fr_count']) def test_x(self): """Test getting x in two ways""" self.assertListEqual(['FIRST_OF_PAIR', 'SECOND_OF_PAIR', 'PAIR'], Adf.x('CATEGORY')) self.assertListEqual(['FIRST_OF_PAIR', 'SECOND_OF_PAIR', 'PAIR'], Adf[['CATEGORY']].x()) def test_y(self): """Test getting y in two ways""" self.assertListEqual(['FIRST_OF_PAIR', 'SECOND_OF_PAIR', 'PAIR'], Adf.y('CATEGORY')) self.assertListEqual(['FIRST_OF_PAIR', 'SECOND_OF_PAIR', 'PAIR'], Adf[['CATEGORY']].y()) def test_slice_x(self): """Test getting x as slice in two ways""" self.assertListEqual(Adf.x('CATEGORY', [1]), ['SECOND_OF_PAIR']) self.assertListEqual([86, 87, 88, 89, 90, 91, 92, 93, 94, 95], [int(x) for x in Idfh.x(indices=list(range(10,20)))]) self.assertListEqual([86, 87, 88, 89, 90, 91, 92, 93, 94, 95], [int(x) for x in Idfh.y()[10:20]]) def test_slice_y(self): """Test getting y as slice in two ways""" self.assertListEqual(Adf.y('CATEGORY', [1]), ['SECOND_OF_PAIR']) self.assertListEqual([int(x) for x in Idfh.y(indices=list(range(3,7)))], [79, 80, 81, 82]) self.assertListEqual([int(x) for x in Idfh.y()[3:7]], [79, 80, 81, 82]) def test_format(self): """Test formatting output of data frame""" fmt = OrderedDict([('LIBRARY', ('s', str)), ('UNPAIRED_READS_EXAMINED', ('3.2h', int)), ('READ_PAIRS_EXAMINED', ('3.2h', int)), ('UNMAPPED_READS', ('3.2h', int)), ('UNPAIRED_READ_DUPLICATES', ('3.2h', int)), ('READ_PAIR_DUPLICATES', ('3.2h', int)), ('READ_PAIR_OPTICAL_DUPLICATES', ('3.2f', float)), ('PERCENT_DUPLICATION', ('3.2%', float)), ('ESTIMATED_LIBRARY_SIZE', ('3.2h', int))]) df = DataFrame(*dupmet, **fmt) self.assertListEqual(sorted(list(df._format.items())), sorted(list(fmt.items()))) class TestReadPicardMetrics(unittest.TestCase): """Test reading picard metrics""" def test_picard_read_metrics(self): """Test function _read_picard_metrics""" (metrics, hist) = _read_picard_metrics(align_metrics[0]) self.assertIsNone(hist) self.assertEqual(len(metrics), 4) (metrics, hist) = _read_picard_metrics(insert_metrics[0]) self.assertListEqual(sorted(hist[0]), sorted(['insert_size', 'All_Reads.fr_count'])) self.assertEqual(metrics[1][0], 156) class TestPicardMetrics(unittest.TestCase): """Test PicardMetrics classes""" @raises(ValueError) def test_missing_args(self): """Test instantiating PicardMetrics with missing arguments""" PicardMetrics() def test_init(self): """Test instantiating PicardMetrics in two ways""" p1 = PicardMetrics(filename=align_metrics[0], identifier="PM") p2 = PicardMetrics(*alnmet, identifier="PM") self.assertListEqual(p1.metrics.data, p2.metrics.data) def test_iter(self): """Test PicardMetrics iteration""" i = 0 for m in PM.metrics: self.assertListEqual(list(PM.metrics.data[i]), list(m)) i += 1 def test_get_single_column(self): """Test getting single column""" pms = PM.metrics[['SAMPLE']] self.assertEqual(pms.colnames, ['SAMPLE']) self.assertListEqual([OrderedDict([('SAMPLE', '')]), OrderedDict([('SAMPLE', '')]), OrderedDict([('SAMPLE', '')])], pms.data) def test_set_sample(self): """Test updating SAMPLE key in PicardMetrics object""" PM.metrics['SAMPLE'] = 'sample' pms = PM.metrics[['SAMPLE']] self.assertListEqual([OrderedDict([('SAMPLE', 'sample')]), OrderedDict([('SAMPLE', 'sample')]), OrderedDict([('SAMPLE', 'sample')])], pms.data) class TestPicardHistMetrics(unittest.TestCase): @raises(ValueError) def test_missing_args(self): """Test instantiating PicardHistMetrics with missing arguments""" PicardHistMetrics() @raises(ValueError) def test_missing_hist(self): """Test instantiating PicardHistMetrics with missing hist argument""" args = [('MEDIAN_INSERT_SIZE', '156'), ('MEDIAN_ABSOLUTE_DEVIATION', '39')] p = PicardHistMetrics(*args) def test_init(self): """Test instantiating PicardHistMetrics object""" p1 = PicardHistMetrics(filename=insert_metrics[0], hist="test", identifier="PM") p2 = PicardHistMetrics(*insmet, hist="test", identifier="PM") self.assertListEqual(p1.metrics.data, p2.metrics.data) class TestAlignMetrics(unittest.TestCase): """Test AlignMetrics classes""" def test_equality(self): """Test that two instances have identical metrics""" self.assertListEqual(AMa.metrics.data, AMf.metrics.data) def test_summary(self): """Test AlignMetrics summary""" self.assertEqual('FIRST_OF_PAIR 2.00k 2.00k 100.00%', AMa.summary().split("\n")[1][0:33]) def test_subset_summary(self): """Test AlignMetrics subset summary""" columns = ['CATEGORY', 'TOTAL_READS', 'PF_READS_ALIGNED'] am = AMa[columns] self.assertEqual('SECOND_OF_PAIR 2.00k 1.95k', am.summary().split("\n")[2]) def test_category(self): """Test AlignMetrics category retrieval""" AMc = AMa.category() self.assertTupleEqual(AMc.metrics.dim, (1, 25)) self.assertEqual(AMc.summary().split("\n")[1].split("\t")[0], "PAIR") AMc = AMa.category('FIRST_OF_PAIR') self.assertTupleEqual(AMc.metrics.dim, (1, 25)) self.assertEqual(AMc.summary().split("\n")[1].split("\t")[0], "FIRST_OF_PAIR") def test_plot_tuple(self): """Test retrieval of plot tuple""" pass class TestInsertMetrics(unittest.TestCase): """Test InsertMetrics classes""" def test_equality(self): """Test that two instances have identical metrics""" self.assertListEqual(IMa.metrics.data, IMf.metrics.data) def test_subset_type(self): """Test subsetting an InsertMetrics object""" columns = ['MEDIAN_INSERT_SIZE', 'MEDIAN_ABSOLUTE_DEVIATION', 'MIN_INSERT_SIZE'] im = IMa[columns] self.assertListEqual(im.metrics.colnames, columns) self.assertIsInstance(im, InsertMetrics) def test_summary(self): """Test InsertMetrics summary""" self.assertListEqual(IMa.summary().split("\n")[1].split("\t"), ['156', '39', '70', '485', '167.819', '61.549', '1.73k', 'FR', '15', '29', '43', '61', '79', '93', '111', '133', '195', '443', '', '', '']) def test_subset_summary(self): """Test InsertMetrics subset summary""" columns = ['MEDIAN_INSERT_SIZE', 'MEDIAN_ABSOLUTE_DEVIATION', 'MIN_INSERT_SIZE'] im = IMa[columns] self.assertListEqual(im.summary().split("\n")[1].split("\t"), ['156', '39', '70']) def test_plot_tuple(self): """Test retrieval of plot tuple""" pass class TestDuplicationMetrics(unittest.TestCase): """Test DuplicationMetrics classes""" def test_equality(self): """Test that two instances have identical metrics""" self.assertListEqual(DMa.metrics.data, DMf.metrics.data) def test_subset_type(self): """Test subsetting an DuplicationMetrics object""" columns = ['LIBRARY', 'UNPAIRED_READS_EXAMINED', 'READ_PAIRS_EXAMINED'] dm = DMa[columns] self.assertListEqual(dm.metrics.colnames, columns) self.assertIsInstance(dm, DuplicationMetrics) def test_summary(self): """Test DuplicationMetrics summary""" self.assertEqual(DMa.summary().split("\n")[1].split("\t"), ['lib', '54.00', '1.95k', '60.00', '43.00', '215.00', '0.00', '11.99%', '8.14k']) def test_subset_summary(self): """Test DuplicationMetrics subset summary""" columns = ['LIBRARY', 'UNPAIRED_READS_EXAMINED', 'READ_PAIRS_EXAMINED', 'PERCENT_DUPLICATION'] dm = DMa[columns] self.assertListEqual(['lib', '54.00', '1.95k', '11.99%'], dm.summary().split("\n")[1].split("\t")) def test_plot_tuple(self): """Test retrieval of plot tuple""" pass class TestHsMetrics(unittest.TestCase): """Test HsMetrics classes""" def test_equality(self): """Test that two instances have identical metrics""" self.assertListEqual(HMa.metrics.data, HMf.metrics.data) def test_subset_type(self): """Test subsetting an HsMetrics object""" columns = ['BAIT_SET', 'GENOME_SIZE', 'BAIT_TERRITORY'] hm = HMa[columns] self.assertListEqual(hm.metrics.colnames, columns) self.assertIsInstance(hm, HsMetrics) def test_summary(self): """Test HsMetrics summary""" self.assertListEqual(HMa.summary().split("\n")[1].split("\t")[0:6], ['chr11_baits', '2.00M', '301.00', '301.00', '1.00', '4.00k']) def test_subset_summary(self): """Test HsMetrics subset summary""" columns = ['GENOME_SIZE', 'BAIT_TERRITORY', 'TARGET_TERRITORY', 'ZERO_CVG_TARGETS_PCT', 'PCT_TARGET_BASES_2X', 'PCT_TARGET_BASES_10X', 'PCT_TARGET_BASES_30X'] hm = HMa[columns] self.assertListEqual(['2.00M', '301.00', '301.00', '0.00%', '76.41%', '50.83%', '16.61%'], hm.summary().split("\n")[1].split("\t")) def test_plot_tuple(self): """Test retrieval of plot tuple""" pass class TestCombineMetrics(unittest.TestCase): """Test methods for combining metrics""" def test_combine_metrics(self): """Test combining metrics""" amsa = AMa.category() mlist = list(zip([DMa],[IMa],[amsa],[HMa])) colnames = [c for sublist in [m.metrics.colnames for mtup in mlist for m in mtup] for c in sublist] cnt = Counter(colnames) # Merge colnames pm = combine_metrics(mlist) self.assertEqual((len(cnt)), pm.metrics.dim[1]) # Make colnames unique pm = combine_metrics(mlist, uniquify=True) self.assertEqual((len(colnames)), pm.metrics.dim[1]) def test_combine_multiple_metrics(self): """Test combining multiple metrics""" mlist =(list( zip( [AlignMetrics(filename=x).category() for x in align_metrics], [InsertMetrics(filename=x) for x in insert_metrics], [DuplicationMetrics(filename=x) for x in dup_metrics], [HsMetrics(filename=x) for x in hs_metrics] ) )) pm = combine_metrics(mlist) self.assertTupleEqual(pm.metrics.dim, (2,91)) self.assertListEqual(pm.summary(raw=True).split("\n")[1].split("\t")[:25], [v for k,v in AMa.metrics.data[2].items()]) def test_plot_tuple(self): """Test retrieval of plot tuple""" pass
percyfal/snakemakelib
snakemakelib/tests/test_metrics.py
Python
mit
13,715
[ "ADF" ]
e41c30ac9984f4058ca6c1dce33598126a7b14aaa06625bdb29170b1ef866425
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class RA4base(RPackage): """Automated Affymetrix Array Analysis.""" homepage = "https://www.bioconductor.org/packages/a4Base/" url = "https://www.bioconductor.org/packages/release/bioc/src/contrib/a4Base_1.24.0.tar.gz" list_url = homepage version('1.24.0', '98f53cb437f1b8bb7ba8c2628c0f44c6') depends_on('r-biobase', type=('build', 'run')) depends_on('r-annotationdbi', type=('build', 'run')) depends_on('r-annaffy', type=('build', 'run')) depends_on('r-mpm', type=('build', 'run')) depends_on('r-genefilter', type=('build', 'run')) depends_on('r-limma', type=('build', 'run')) depends_on('r-multtest', type=('build', 'run')) depends_on('r-glmnet', type=('build', 'run')) depends_on('r-a4preproc', type=('build', 'run')) depends_on('r-a4core', type=('build', 'run')) depends_on('r-gplots', type=('build', 'run'))
wscullin/spack
var/spack/repos/builtin/packages/r-a4base/package.py
Python
lgpl-2.1
2,140
[ "Bioconductor" ]
ec68c8336ea0246e007d8433edbb8415201485a929e1ec956b86610d19c194aa
''' ''' import numpy as np from numpy import random, average from L500analysis.utils.averages.probability_distributions import create_pdf from L500analysis.utils.utils import match_nearest class WeightedAverages : def __init__(self, probability_distribution='uniform random', pdf_sample_size=1000, min_candidate=0, max_candidate=1) : '''Probability distribution defaults to a uniform random distribution. In development: gaussian pdf_sample_size: The number of points evenly sampled from the pdf min_candidate: candidate value that corresponds to the first sample point in the pdf max_candidate: candidate value that corresponds to the first sample point in the pdf ''' self.pdf_type = probability_distribution self.pdf_sample_size = pdf_sample_size self.min_candidate = min_candidate self.max_candidate = max_candidate def _create_pdf(self) : return create_pdf(self.pdf_sample_size, self.pdf_type) def _create_array_of_candidates(self) : separation = float(self.max_candidate-self.min_candidate)/self.pdf_sample_size candidates = np.array(self.min_candidate, self.max_candidate, separation) assert(len(candidates)==self.pdf_sample_size) return candidates def generate_random_set(self, number_of_items_in_set) : pdf = self._create_pdf() array_of_candidates = self._create_array_of_candidates() self._generated_set = random.choice( array_of_candidates, number_items_in_set, p=pdf ) def match_sample_to_generated_set(self, sample=None) : return match_nearest(estimates=self._generated_set, exactvals=sample)
cavestruz/L500analysis
utils/averaging/weighted_averages.py
Python
mit
1,863
[ "Gaussian" ]
d8b6aae2404d501029a79f7b966aeba65772d44a432719fedfe6d27149c4daba
#!/usr/bin/env python # # This code was copied from the data generation program of Tencent Alchemy # project (https://github.com/tencent-alchemy). # # # Copyright 2019 Tencent America LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: Qiming Sun <osirpt.sun@gmail.com> # import numpy import scipy.linalg from pyscf import gto from pyscf import lib from pyscf import scf from pyscf import df from pyscf.ao2mo.outcore import balance_partition from pyscf.gto.moleintor import getints, make_cintopt from pyscf.lib import logger from pyscf.grad import rhf as rhf_grad def get_jk(mf_grad, mol=None, dm=None, hermi=0, with_j=True, with_k=True): if mol is None: mol = mf_grad.mol #if dm is None: dm = mf_grad.base.make_rdm1() #TODO: dm has to be the SCF density matrix in this version. dm should be # extended to any 1-particle density matrix dm = mf_grad.base.make_rdm1() with_df = mf_grad.base.with_df auxmol = with_df.auxmol if auxmol is None: auxmol = df.addons.make_auxmol(with_df.mol, with_df.auxbasis) ao_loc = mol.ao_loc nbas = mol.nbas nauxbas = auxmol.nbas get_int3c_s1 = _int3c_wrapper(mol, auxmol, 'int3c2e', 's1') get_int3c_s2 = _int3c_wrapper(mol, auxmol, 'int3c2e', 's2ij') get_int3c_ip1 = _int3c_wrapper(mol, auxmol, 'int3c2e_ip1', 's1') get_int3c_ip2 = _int3c_wrapper(mol, auxmol, 'int3c2e_ip2', 's2ij') nao = mol.nao naux = auxmol.nao dms = numpy.asarray(dm) out_shape = dms.shape[:-2] + (3,) + dms.shape[-2:] dms = dms.reshape(-1,nao,nao) nset = dms.shape[0] auxslices = auxmol.aoslice_by_atom() aux_loc = auxmol.ao_loc max_memory = mf_grad.max_memory - lib.current_memory()[0] blksize = int(min(max(max_memory * .5e6/8 / (nao**2*3), 20), naux, 240)) ao_ranges = balance_partition(aux_loc, blksize) if not with_k: idx = numpy.arange(nao) dm_tril = dms + dms.transpose(0,2,1) dm_tril[:,idx,idx] *= .5 dm_tril = lib.pack_tril(dm_tril) # (i,j|P) rhoj = numpy.empty((nset,naux)) for shl0, shl1, nL in ao_ranges: int3c = get_int3c_s2((0, nbas, 0, nbas, shl0, shl1)) # (i,j|P) p0, p1 = aux_loc[shl0], aux_loc[shl1] rhoj[:,p0:p1] = numpy.einsum('wp,nw->np', int3c, dm_tril) int3c = None # (P|Q) int2c = auxmol.intor('int2c2e', aosym='s1') rhoj = scipy.linalg.solve(int2c, rhoj.T, sym_pos=True).T int2c = None # (d/dX i,j|P) vj = numpy.zeros((nset,3,nao,nao)) for shl0, shl1, nL in ao_ranges: int3c = get_int3c_ip1((0, nbas, 0, nbas, shl0, shl1)) # (i,j|P) p0, p1 = aux_loc[shl0], aux_loc[shl1] vj += numpy.einsum('xijp,np->nxij', int3c, rhoj[:,p0:p1]) int3c = None if mf_grad.auxbasis_response: # (i,j|d/dX P) vjaux = numpy.empty((3,naux)) for shl0, shl1, nL in ao_ranges: int3c = get_int3c_ip2((0, nbas, 0, nbas, shl0, shl1)) # (i,j|P) p0, p1 = aux_loc[shl0], aux_loc[shl1] vjaux[:,p0:p1] = numpy.einsum('xwp,mw,np->xp', int3c, dm_tril, rhoj[:,p0:p1]) int3c = None # (d/dX P|Q) int2c_e1 = auxmol.intor('int2c2e_ip1', aosym='s1') vjaux -= numpy.einsum('xpq,mp,nq->xp', int2c_e1, rhoj, rhoj) vjaux = [-vjaux[:,p0:p1].sum(axis=1) for p0, p1 in auxslices[:,2:]] vj = lib.tag_array(-vj.reshape(out_shape), aux=numpy.array(vjaux)) else: vj = -vj.reshape(out_shape) return vj, None mo_coeff = mf_grad.base.mo_coeff mo_occ = mf_grad.base.mo_occ nmo = mo_occ.shape[-1] if isinstance(mf_grad.base, scf.rohf.ROHF): mo_coeff = numpy.vstack((mo_coeff,mo_coeff)) mo_occa = numpy.array(mo_occ> 0, dtype=numpy.double) mo_occb = numpy.array(mo_occ==2, dtype=numpy.double) assert(mo_occa.sum() + mo_occb.sum() == mo_occ.sum()) mo_occ = numpy.vstack((mo_occa, mo_occb)) mo_coeff = numpy.asarray(mo_coeff).reshape(-1,nao,nmo) mo_occ = numpy.asarray(mo_occ).reshape(-1,nmo) rhoj = numpy.zeros((nset,naux)) f_rhok = lib.H5TmpFile() orbo = [] for i in range(nset): c = numpy.einsum('pi,i->pi', mo_coeff[i][:,mo_occ[i]>0], numpy.sqrt(mo_occ[i][mo_occ[i]>0])) nocc = c.shape[1] orbo.append(c) # (P|Q) int2c = scipy.linalg.cho_factor(auxmol.intor('int2c2e', aosym='s1')) max_memory = mf_grad.max_memory - lib.current_memory()[0] blksize = max_memory * .5e6/8 / (naux*nao) mol_ao_ranges = balance_partition(ao_loc, blksize) nsteps = len(mol_ao_ranges) for istep, (shl0, shl1, nd) in enumerate(mol_ao_ranges): int3c = get_int3c_s1((0, nbas, shl0, shl1, 0, nauxbas)) p0, p1 = ao_loc[shl0], ao_loc[shl1] rhoj += numpy.einsum('nlk,klp->np', dms[:,p0:p1], int3c) for i in range(nset): v = lib.einsum('ko,klp->plo', orbo[i], int3c) v = scipy.linalg.cho_solve(int2c, v.reshape(naux,-1)) f_rhok['%s/%s'%(i,istep)] = v.reshape(naux,p1-p0,-1) int3c = v = None rhoj = scipy.linalg.cho_solve(int2c, rhoj.T).T int2c = None def load(set_id, p0, p1): nocc = orbo[set_id].shape[1] buf = numpy.empty((p1-p0,nocc,nao)) col1 = 0 for istep in range(nsteps): dat = f_rhok['%s/%s'%(set_id,istep)][p0:p1] col0, col1 = col1, col1 + dat.shape[1] buf[:p1-p0,:,col0:col1] = dat.transpose(0,2,1) return buf vj = numpy.zeros((nset,3,nao,nao)) vk = numpy.zeros((nset,3,nao,nao)) # (d/dX i,j|P) for shl0, shl1, nL in ao_ranges: int3c = get_int3c_ip1((0, nbas, 0, nbas, shl0, shl1)) # (i,j|P) p0, p1 = aux_loc[shl0], aux_loc[shl1] vj += numpy.einsum('xijp,np->nxij', int3c, rhoj[:,p0:p1]) for i in range(nset): tmp = lib.einsum('xijp,jo->xipo', int3c, orbo[i]) rhok = load(i, p0, p1) vk[i] += lib.einsum('xipo,pok->xik', tmp, rhok) tmp = rhok = None int3c = None max_memory = mf_grad.max_memory - lib.current_memory()[0] blksize = int(min(max(max_memory * .5e6/8 / (nao*nocc), 20), naux)) rhok_oo = [] for i in range(nset): nocc = orbo[i].shape[1] tmp = numpy.empty((naux,nocc,nocc)) for p0, p1 in lib.prange(0, naux, blksize): rhok = load(i, p0, p1) tmp[p0:p1] = lib.einsum('pok,kr->por', rhok, orbo[i]) rhok_oo.append(tmp) rhok = tmp = None if mf_grad.auxbasis_response: vjaux = numpy.zeros((3,naux)) vkaux = numpy.zeros((3,naux)) # (i,j|d/dX P) for shl0, shl1, nL in ao_ranges: int3c = get_int3c_ip2((0, nbas, 0, nbas, shl0, shl1)) # (i,j|P) p0, p1 = aux_loc[shl0], aux_loc[shl1] int3c = int3c.transpose(0,2,1).reshape(3*(p1-p0),-1) int3c = lib.unpack_tril(int3c) int3c = int3c.reshape(3,p1-p0,nao,nao) vjaux[:,p0:p1] = numpy.einsum('xpij,mji,np->xp', int3c, dms, rhoj[:,p0:p1]) for i in range(nset): tmp = rhok_oo[i][p0:p1] tmp = lib.einsum('por,ir->pio', tmp, orbo[i]) tmp = lib.einsum('pio,jo->pij', tmp, orbo[i]) vkaux[:,p0:p1] += lib.einsum('xpij,pij->xp', int3c, tmp) int3c = tmp = None # (d/dX P|Q) int2c_e1 = auxmol.intor('int2c2e_ip1') vjaux -= numpy.einsum('xpq,mp,nq->xp', int2c_e1, rhoj, rhoj) for i in range(nset): tmp = lib.einsum('pij,qij->pq', rhok_oo[i], rhok_oo[i]) vkaux -= numpy.einsum('xpq,pq->xp', int2c_e1, tmp) vjaux = [-vjaux[:,p0:p1].sum(axis=1) for p0, p1 in auxslices[:,2:]] vkaux = [-vkaux[:,p0:p1].sum(axis=1) for p0, p1 in auxslices[:,2:]] vj = lib.tag_array(-vj.reshape(out_shape), aux=numpy.array(vjaux)) vk = lib.tag_array(-vk.reshape(out_shape), aux=numpy.array(vkaux)) else: vj = -vj.reshape(out_shape) vk = -vk.reshape(out_shape) return vj, vk def _int3c_wrapper(mol, auxmol, intor, aosym): nbas = mol.nbas pmol = mol + auxmol intor = mol._add_suffix(intor) opt = make_cintopt(mol._atm, mol._bas, mol._env, intor) def get_int3c(shls_slice=None): if shls_slice is None: shls_slice = (0, nbas, 0, nbas, nbas, pmol.nbas) else: shls_slice = shls_slice[:4] + (nbas+shls_slice[4], nbas+shls_slice[5]) return getints(intor, pmol._atm, pmol._bas, pmol._env, shls_slice, aosym=aosym, cintopt=opt) return get_int3c class Gradients(rhf_grad.Gradients): '''Restricted density-fitting Hartree-Fock gradients''' def __init__(self, mf): # Whether to include the response of DF auxiliary basis when computing # nuclear gradients of J/K matrices self.auxbasis_response = True rhf_grad.Gradients.__init__(self, mf) get_jk = get_jk def get_j(self, mol=None, dm=None, hermi=0): return self.get_jk(mol, dm, with_k=False)[0] def get_k(self, mol=None, dm=None, hermi=0): return self.get_jk(mol, dm, with_j=False)[1] def get_veff(self, mol=None, dm=None): vj, vk = self.get_jk(mol, dm) vhf = vj - vk*.5 if self.auxbasis_response: e1_aux = vj.aux - vk.aux*.5 logger.debug1(self, 'sum(auxbasis response) %s', e1_aux.sum(axis=0)) vhf = lib.tag_array(vhf, aux=e1_aux) return vhf def extra_force(self, atom_id, envs): if self.auxbasis_response: return envs['vhf'].aux[atom_id] else: return 0 Grad = Gradients if __name__ == '__main__': mol = gto.Mole() mol.atom = [ ['O' , (0. , 0. , 0.)], [1 , (0. , -0.757 , 0.587)], [1 , (0. , 0.757 , 0.587)] ] mol.basis = '631g' mol.build() mf = scf.RHF(mol).density_fit(auxbasis='ccpvdz-jkfit').run() g = Gradients(mf).set(auxbasis_response=not False).kernel() print(lib.finger(g) - 0.0055166381900824879) g = Gradients(mf).kernel() print(lib.finger(g) - 0.005516638190173352) print(abs(g-scf.RHF(mol).run().Gradients().kernel()).max()) # -0.0000000000 -0.0000000000 -0.0241140368 # 0.0000000000 0.0043935801 0.0120570184 # 0.0000000000 -0.0043935801 0.0120570184 mfs = mf.as_scanner() e1 = mfs([['O' , (0. , 0. , 0.001)], [1 , (0. , -0.757 , 0.587)], [1 , (0. , 0.757 , 0.587)] ]) e2 = mfs([['O' , (0. , 0. ,-0.001)], [1 , (0. , -0.757 , 0.587)], [1 , (0. , 0.757 , 0.587)] ]) print((e1-e2)/0.002*lib.param.BOHR)
sunqm/pyscf
pyscf/df/grad/rhf.py
Python
apache-2.0
11,472
[ "PySCF" ]
6b460841167552fb6bff096e8520574df5e193dc370604695e0753f76c45a54a
from gpaw.xc.gllb.contribution import Contribution from gpaw.xc import XC from gpaw.xc.pawcorrection import rnablaY_nLv from gpaw.sphere.lebedev import weight_n import numpy as np from numpy import dot as dot3 # Avoid dotblas bug! from math import pi, sqrt class C_XC(Contribution): def __init__(self, nlfunc, weight, functional = 'LDA'): Contribution.__init__(self, nlfunc, weight) self.functional = functional def get_name(self): return 'XC' def get_desc(self): return "("+self.functional+")" def initialize(self): self.xc = XC(self.functional) self.vt_sg = self.nlfunc.finegd.empty(self.nlfunc.nspins) self.e_g = self.nlfunc.finegd.empty() def initialize_1d(self): self.ae = self.nlfunc.ae self.xc = XC(self.functional) self.v_g = np.zeros(self.ae.N) def calculate_spinpaired(self, e_g, n_g, v_g): self.e_g[:] = 0.0 self.vt_sg[:] = 0.0 self.xc.calculate(self.nlfunc.finegd, n_g[None, ...], self.vt_sg, self.e_g) v_g += self.weight * self.vt_sg[0] e_g += self.weight * self.e_g def calculate_spinpolarized(self, e_g, n_sg, v_sg): self.e_g[:] = 0.0 self.vt_sg[:] = 0.0 self.xc.calculate(self.nlfunc.finegd, n_sg, self.vt_sg, self.e_g) #self.xc.get_energy_and_potential(na_g, self.vt_sg[0], nb_g, self.vt_sg[1], e_g=self.e_g) v_sg[0] += self.weight * self.vt_sg[0] v_sg[1] += self.weight * self.vt_sg[1] e_g += self.weight * self.e_g def calculate_energy_and_derivatives(self, setup, D_sp, H_sp, a, addcoredensity=True): E = self.xc.calculate_paw_correction(setup, D_sp, H_sp, True, a) E += setup.xc_correction.Exc0 return E def add_xc_potential_and_energy_1d(self, v_g): self.v_g[:] = 0.0 Exc = self.xc.calculate_spherical(self.ae.rgd, self.ae.n.reshape((1, -1)), self.v_g.reshape((1, -1))) v_g += self.weight * self.v_g return self.weight * Exc def add_smooth_xc_potential_and_energy_1d(self, vt_g): self.v_g[:] = 0.0 Exc = self.xc.calculate_spherical(self.ae.rgd, self.ae.nt.reshape((1, -1)), self.v_g.reshape((1, -1))) vt_g += self.weight * self.v_g return self.weight * Exc def initialize_from_atomic_orbitals(self, basis_functions): # LDA needs only density, which is already initialized pass def add_extra_setup_data(self, dict): # LDA has not any special data pass def write(self, writer, natoms): # LDA has not any special data to be written pass def read(self, reader): # LDA has not any special data to be read pass
robwarm/gpaw-symm
gpaw/xc/gllb/c_xc.py
Python
gpl-3.0
2,938
[ "GPAW" ]
03fef6fa1cc18f3b3907bcea651f6a8720643997910efdfdff2ad029115a71d3
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # this test is designed to check the operation of the 8bit # export of BMPs # Image pipeline reader = vtk.vtkBMPReader() reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/masonry.bmp") reader.SetAllow8BitBMP(1) map = vtk.vtkImageMapToColors() map.SetInputConnection(reader.GetOutputPort()) map.SetLookupTable(reader.GetLookupTable()) map.SetOutputFormatToRGB() viewer = vtk.vtkImageViewer() viewer.SetInputConnection(map.GetOutputPort()) viewer.SetColorWindow(256) viewer.SetColorLevel(127.5) #make interface viewer.Render() # --- end of script --
HopeFOAM/HopeFOAM
ThirdParty-0.1/ParaView-5.0.1/VTK/IO/Image/Testing/Python/TestBMPReader.py
Python
gpl-3.0
687
[ "VTK" ]
0ee3c7bfd1f500ac5de855175311efabd6cbd2cf3c62328b6ffe455cd05c31a8
import redistrib from setuptools import setup requirements = [ 'click==6.7', 'hiredis==0.2.0', 'retrying==1.3.3', 'six==1.11.0', 'Werkzeug==0.14.1', ] setup( name='redis-trib', version=redistrib.__version__, author='Neuron Teckid', author_email='lene13@gmail.com', license='MIT', keywords='Redis Cluster', url=redistrib.REPO, description='Redis Cluster tools in Python', packages=['redistrib'], long_description='Visit ' + redistrib.REPO + ' for details please.', install_requires=requirements, zip_safe=False, entry_points=dict( console_scripts=[ 'redis-trib.py=redistrib.console:main', ], ), )
HunanTV/redis-trib.py
setup.py
Python
mit
698
[ "NEURON", "VisIt" ]
aa2dcf4f95d92632886d7c6d0e4117c460bd0419e2fde8caf8d437e81faf3d9f
import galaxy.tools from galaxy.tools.parameters.basic import ( DataToolParameter, DataCollectionToolParameter, SelectToolParameter, ) from galaxy.tools.parameters.grouping import ( Repeat, Conditional, ) PARAMS_UNWRAPPED = object() class WrappedParameters( object ): def __init__( self, trans, tool, incoming ): self.trans = trans self.tool = tool self.incoming = incoming self._params = PARAMS_UNWRAPPED @property def params( self ): if self._params is PARAMS_UNWRAPPED: params = make_dict_copy( self.incoming ) self.wrap_values( self.tool.inputs, params, skip_missing_values=not self.tool.check_values ) self._params = params return self._params def wrap_values( self, inputs, input_values, skip_missing_values=False ): trans = self.trans tool = self.tool incoming = self.incoming # Wrap tool inputs as necessary for input in inputs.itervalues(): if input.name not in input_values and skip_missing_values: continue if isinstance( input, Repeat ): for d in input_values[ input.name ]: self.wrap_values( input.inputs, d, skip_missing_values=skip_missing_values ) elif isinstance( input, Conditional ): values = input_values[ input.name ] current = values[ "__current_case__" ] self.wrap_values( input.cases[current].inputs, values, skip_missing_values=skip_missing_values ) elif isinstance( input, DataToolParameter ) and input.multiple: input_values[ input.name ] = \ galaxy.tools.DatasetListWrapper( input_values[ input.name ], datatypes_registry=trans.app.datatypes_registry, tool=tool, name=input.name ) elif isinstance( input, DataToolParameter ): input_values[ input.name ] = \ galaxy.tools.DatasetFilenameWrapper( input_values[ input.name ], datatypes_registry=trans.app.datatypes_registry, tool=tool, name=input.name ) elif isinstance( input, SelectToolParameter ): input_values[ input.name ] = galaxy.tools.SelectToolParameterWrapper( input, input_values[ input.name ], tool.app, other_values=incoming ) elif isinstance( input, DataCollectionToolParameter ): input_values[ input.name ] = galaxy.tools.DatasetCollectionWrapper( input_values[ input.name ], datatypes_registry=trans.app.datatypes_registry, tool=tool, name=input.name, ) else: input_values[ input.name ] = galaxy.tools.InputValueWrapper( input, input_values[ input.name ], incoming ) def make_dict_copy( from_dict ): """ Makes a copy of input dictionary from_dict such that all values that are dictionaries result in creation of a new dictionary ( a sort of deepcopy ). We may need to handle other complex types ( e.g., lists, etc ), but not sure... Yes, we need to handle lists (and now are)... """ copy_from_dict = {} for key, value in from_dict.items(): if type( value ).__name__ == 'dict': copy_from_dict[ key ] = make_dict_copy( value ) elif isinstance( value, list ): copy_from_dict[ key ] = make_list_copy( value ) else: copy_from_dict[ key ] = value return copy_from_dict def make_list_copy( from_list ): new_list = [] for value in from_list: if isinstance( value, dict ): new_list.append( make_dict_copy( value ) ) elif isinstance( value, list ): new_list.append( make_list_copy( value ) ) else: new_list.append( value ) return new_list __all__ = [ WrappedParameters, make_dict_copy ]
mikel-egana-aranguren/SADI-Galaxy-Docker
galaxy-dist/lib/galaxy/tools/parameters/wrapped.py
Python
gpl-3.0
4,251
[ "Galaxy" ]
d4c93b01869f7b8ac28b523dbd232676f69c0691f82ad8bb33b0aef23afce220
# Copyright (c) 2016, Xilinx, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. __author__ = "Peter Ogden" __copyright__ = "Copyright 2017, Xilinx" __email__ = "ogden@xilinx.com" import pycparser import struct import functools import itertools import collections import math import re import os from pycparser import c_ast from pycparser import c_generator from pycparser.plyparser import ParseError from copy import deepcopy from pynq.ps import ZU_ARCH, CPU_ARCH, ZYNQ_ARCH from .compile import preprocess from .streams import SimpleMBStream from .streams import InterruptMBStream from . import MicroblazeProgram # Use a global parser and generator _parser = pycparser.CParser() _generator = c_generator.CGenerator() if CPU_ARCH == ZYNQ_ARCH: PTR_OFFSET = "0x20000000" elif CPU_ARCH == ZU_ARCH: PTR_OFFSET = "0x80000000" else: PTR_OFFSET = "0x0" # First we define a series of classes to represent types # Each class is responsible for one particular type of C # types class PrimitiveWrapper: """ Wrapper for C primitives that can be represented by a single Struct string. """ def __init__(self, struct_string, type_): self._struct = struct.Struct(struct_string) self.typedefname = None self.blocks = False self._type = type_ def param_encode(self, old_val): return self._struct.pack(old_val) def param_decode(self, old_val, stream): pass def return_decode(self, stream): data = stream.read(self._struct.size) return self._struct.unpack(data)[0] def pre_argument(self, name): commands = [] commands.append(_generate_decl(name, self._type)) commands.append(_generate_read(name)) return commands def post_argument(self, name): return [] class VoidPointerWrapper: """ Wrapper for a void* pointer that will refer to a physically contiguous chunk of memory. """ def __init__(self, type_): self._type = type_ self.typedefname = None self.blocks = False self._ptrstruct = struct.Struct('I') def param_encode(self, old_val): return self._ptrstruct.pack(old_val.physical_address) def param_decode(self, old_val, stream): pass def return_decode(self, stream): raise RuntimeError("Cannot return a void*") def pre_argument(self, name): commands = [] commands.append(_generate_decl( name + '_int', c_ast.TypeDecl(name + '_int', [], c_ast.IdentifierType(['unsigned', 'int'])))) commands.append(_generate_read(name +'_int')) commands.append(c_ast.Assignment( '|=', c_ast.ID(name + '_int'), c_ast.Constant('int', PTR_OFFSET))) commands.append( c_ast.Decl(name, [], [], [], c_ast.PtrDecl( [], c_ast.TypeDecl(name, [], c_ast.IdentifierType(['void'])), ), c_ast.Cast( c_ast.Typename( None, [], c_ast.PtrDecl( [], c_ast.TypeDecl( None, [], c_ast.IdentifierType(['void'])))), c_ast.ID(name + '_int')), [] )) return commands def post_argument(self, name): return [] class ConstPointerWrapper: """ Wrapper for const T pointers, transfers data in only one direction. """ def __init__(self, type_, struct_string): self._lenstruct = struct.Struct('h') self._struct_string = struct_string self.typedefname = None self.blocks = False self._type = type_ def param_encode(self, old_val): packed = struct.pack(self._struct_string * len(old_val), *old_val) return self._lenstruct.pack(len(old_val)) + packed def param_decode(self, old_val, stream): pass def return_decode(self, stream): raise RuntimeError("Cannot use a const T* decoder as a return value") def pre_argument(self, name): commands = [] commands.append( _generate_decl( name + '_len', c_ast.TypeDecl(name + '_len', [], c_ast.IdentifierType(['unsigned', 'short'])))) commands.append(_generate_read(name + '_len')) commands.append(_generate_arraydecl(name, self._type, c_ast.ID(name + '_len'))) commands.append(_generate_read(name, address=False)) return commands def post_argument(self, name): return [] class ConstCharPointerWrapper(ConstPointerWrapper): """ Wrapper for const char*s which accepts Python strings and makes sure they are NULL-terminated """ def __init__(self, type_): super().__init__(type_, 'b') def param_encode(self, old_val): if type(old_val) is str: val = bytearray(old_val.encode()) else: val = bytearray(old_val) val.append(0) return super().param_encode(val) class PointerWrapper: """ Wrapper for non-const T pointers that retrieves any data modified by the called function. """ def __init__(self, type_, struct_string): self._lenstruct = struct.Struct('h') self._struct_string = struct_string self.typedefname = None self.blocks = True self._type = type_ def param_encode(self, old_val): packed = struct.pack(self._struct_string * len(old_val), *old_val) return self._lenstruct.pack(len(old_val)) + packed def param_decode(self, old_val, stream): data = stream.read(self._lenstruct.size) length = self._lenstruct.unpack(data)[0] assert(length == len(old_val)) data = stream.read(length * struct.calcsize(self._struct_string)) old_val[:] = struct.unpack(self._struct_string * len(old_val), data) def return_decode(self, stream): raise RuntimeError("Cannot use a T* decoder as a return value") def pre_argument(self, name): commands = [] commands.append( _generate_decl( name + '_len', c_ast.TypeDecl(name + '_len', [], c_ast.IdentifierType(['unsigned', 'short'])))) commands.append(_generate_read(name + '_len')) commands.append(_generate_arraydecl(name, self._type, c_ast.ID(name + '_len'))) commands.append(_generate_read(name, address=False)) return commands def post_argument(self, name): commands = [] commands.append(_generate_write(name + '_len')) commands.append(_generate_write(name, address=False)) return commands class VoidWrapper: """ Wraps void - only valid for return types """ def __init__(self): self.typedefname = None self.blocks = False def param_encode(self, old_val): return b'' def param_decode(self, old_val, stream): pass def return_decode(self, stream): return None def pre_argument(self, name): return [] def post_argument(self, name): return [] def _type_to_struct_string(tdecl): if type(tdecl) is not c_ast.TypeDecl: raise RuntimeError("Unsupport Type") names = tdecl.type.names signed = True if len(names) > 1: if names[0] == 'unsigned': signed = False name = names[1] else: name = names[0] if name == 'void': return '' if name in ['long', 'int']: if names.count('long') == 2: if signed: return 'q' else: return 'Q' else: if signed: return 'i' else: return 'I' if name == 'short': if signed: return 'h' else: return 'H' if name == 'char': if signed: return 'b' else: return 'B' if name == 'float': return 'f' raise RuntimeError('Unknown type {}'.format(name)) class MicroblazeError(Exception): pass class PyIntWrapper(PrimitiveWrapper): def __init__(self, type_): super().__init__('i', type_) def return_decode(self, stream): data = stream.read(self._struct.size) val = self._struct.unpack(data)[0] if val < 0: raise MicroblazeError(os.strerror(-val)) return val class PyVoidWrapper(PrimitiveWrapper): def __init__(self, type_): super().__init__('i', type_) def return_decode(self, stream): data = stream.read(self._struct.size) val = self._struct.unpack(data)[0] if val < 0: raise MicroblazeError(os.strerror(-val)) # Swallow the return value class PyBoolWrapper(PrimitiveWrapper): def __init__(self, type_): super().__init__('i', type_) def return_decode(self, stream): data = stream.read(self._struct.size) val = self._struct.unpack(data)[0] if val < 0: raise MicroblazeError(os.strerror(-val)) return bool(val) class PyFloatWrapper(PrimitiveWrapper): def __init__(self, type_): super().__init__('f', type_) def return_decode(self, stream): data = stream.read(self._struct.size) val = self._struct.unpack(data)[0] if math.isnan(val): raise MicroblazeError("An Unknown Error Occurred") return val _interface_overrides = { 'py_int': PyIntWrapper, 'py_bool': PyBoolWrapper, 'py_float': PyFloatWrapper, 'py_void': PyVoidWrapper } def _type_to_interface(tdecl, typedefs): """ Returns a wrapper for a given C AST """ if type(tdecl) is c_ast.PtrDecl: nested_type = tdecl.type if type(nested_type) is not c_ast.TypeDecl: raise RuntimeError("Only single level pointers supported") struct_string = _type_to_struct_string(nested_type) if struct_string: if 'const' in nested_type.quals: if struct_string == 'b': return ConstCharPointerWrapper(tdecl) else: return ConstPointerWrapper(tdecl, struct_string) else: return PointerWrapper(tdecl, struct_string) else: return VoidPointerWrapper(tdecl) elif type(tdecl) is not c_ast.TypeDecl: raise RuntimeError("Unsupport Type") names = tdecl.type.names if len(names) == 1 and names[0] in typedefs: if names[0] in _interface_overrides: interface = _interface_overrides[names[0]](tdecl) else: interface = _type_to_interface(typedefs[names[0]], typedefs) interface.typedefname = names[0] return interface struct_string = _type_to_struct_string(tdecl) if struct_string: return PrimitiveWrapper(struct_string, tdecl) else: return VoidWrapper() def _generate_read(name, size=None, address=True): """ Helper function to generate read functions. size should be an AST fragment """ if size is None: size = c_ast.UnaryOp('sizeof', c_ast.ID(name)) if address: target = c_ast.UnaryOp('&', c_ast.ID(name)) else: target = c_ast.ID(name) return c_ast.FuncCall( c_ast.ID('_rpc_read'), c_ast.ExprList([target, size])) def _generate_write(name, address=True): """ Helper function generate write functions """ if address: target = c_ast.UnaryOp('&', c_ast.ID(name)) else: target = c_ast.ID(name) return c_ast.FuncCall( c_ast.ID('_rpc_write'), c_ast.ExprList([target, c_ast.UnaryOp('sizeof', c_ast.ID(name))])) def _generate_decl(name, decl): """ Generates a new declaration with a difference name but same type as the provided decl. """ typedecl = c_ast.TypeDecl(name, [], decl.type) return c_ast.Decl(name, [], [], [], typedecl, [], []) def _generate_arraydecl(name, decl, length): """ Generates a new declaration with an array type base on an existing declaration """ typedecl = c_ast.TypeDecl(name, [], decl.type) arraydecl = c_ast.ArrayDecl(typedecl, length, []) return c_ast.Decl(name, [], [], [], arraydecl, [], []) class FuncAdapter: """Provides the C and Python interfaces for a function declaration Attributes ---------- return_interface : TypeWrapper The type wrapper for the return type arg_interfaces : [TypeWrapper] An array of type wrappers for the arguments call_ast : pycparser.c_ast Syntax tree for the wrapped function call """ def __init__(self, decl, typedefs): self.return_interface = _type_to_interface(decl.type, typedefs) self.name = decl.type.declname self.docstring = _get_docstring(decl.coord) self.arg_interfaces = [] self.args = [] self.blocks = False self.coord = decl.coord block_contents = [] post_block_contents = [] func_args = [] if decl.args: for i, arg in enumerate(decl.args.params): if type(arg) is c_ast.EllipsisParam: raise RuntimeError("vararg functions not supported") interface = _type_to_interface(arg.type, typedefs) if type(interface) is VoidWrapper: continue block_contents.extend(interface.pre_argument('arg' + str(i))) post_block_contents.extend(interface.post_argument( 'arg' + str(i))) func_args.append(c_ast.ID('arg' + str(i))) self.arg_interfaces.append(interface) self.blocks = self.blocks | interface.blocks if arg.name: self.args.append(arg.name) else: self.args.append(f'arg{len(self.args)}') function_call = c_ast.FuncCall(c_ast.ID(self.name), c_ast.ExprList(func_args)) self.returns = type(self.return_interface) is not VoidWrapper if self.returns: ret_assign = c_ast.Decl( 'ret', [], [], [], c_ast.TypeDecl('ret', [], decl.type.type), function_call, [] ) block_contents.append(ret_assign) block_contents.append(_generate_write('return_command')) block_contents.extend(post_block_contents) block_contents.append(_generate_write('ret')) self.blocks = True else: block_contents.append(function_call) if self.blocks: block_contents.append(_generate_write('return_command')) else: block_contents.append(_generate_write('void_command')) block_contents.extend(post_block_contents) self.call_ast = c_ast.Compound(block_contents) self.filename = decl.coord.file def pack_args(self, *args): """Create a bytes of the provided arguments """ if len(args) != len(self.arg_interfaces): raise RuntimeError( "Wrong number of arguments: expected{0} got {1}".format( len(self.arg_interfaces), len(args) )) return b''.join( [f.param_encode(a) for f, a in itertools.zip_longest( self.arg_interfaces, args )] ) def receive_response(self, stream, *args): """Reads the response stream, updates arguments and returns the value of the function call if applicable """ if len(args) != len(self.arg_interfaces): raise RuntimeError( "Wrong number of arguments: expected{0} got {1}".format( len(self.arg_interfaces), len(args) )) [f.param_decode(a, stream) for f, a in itertools.zip_longest( self.arg_interfaces, args )] return_value = self.return_interface.return_decode(stream) return return_value class ParsedEnum: """Holds the values of an enum from the C source """ def __init__(self): self.name = None self.file = None self.items = {} class FuncDefVisitor(pycparser.c_ast.NodeVisitor): """Primary visitor that parses out function definitions, typedes and enumerations from a syntax tree """ def __init__(self): self.functions = {} self.typedefs = {} self.enums = [] self.defined = [] self.typedef_coords = {} self.function_coords = {} def visit_Typedef(self, node): self.typedefs[node.name] = node.type self.typedef_coords[node.name] = node.coord def visit_FuncDef(self, node): self.defined.append(node.decl.name) self.visit(node.decl) def visit_FuncDecl(self, node): if node.coord.file.startswith('/opt/microblaze'): return if type(node.type) is not c_ast.TypeDecl: # Ignore functions that are returning pointers return name = node.type.declname if 'static' in node.type.quals: # Don't process static functions return try: self.functions[name] = FuncAdapter(node, self.typedefs) self.function_coords[name] = node.coord except RuntimeError as e: if node.coord.file == '<stdin>': print("Could not create interface for funcion {}: {}".format( name, e)) def visit_Enum(self, node): enum = ParsedEnum() if node.name: enum.name = node.name enum.file = node.coord.file cur_index = 0 for entry in node.values.enumerators: if entry.value: cur_index = int(entry.value.value, 0) enum.items[entry.name] = cur_index cur_index += 1 self.enums.append(enum) def _build_case(functions): """ Builds the switch statement that will form the foundation of the RPC handler """ cases = [] for i, func in enumerate(functions.values()): case = c_ast.Case( c_ast.Constant('int', str(i)), [ func.call_ast, c_ast.Break() ]) cases.append(case) return c_ast.Switch( c_ast.ID('command'), c_ast.Compound(cases) ) def _build_handle_function(functions): """ Wraps the switch statement in a function definition """ case_statement = _build_case(functions) available_check = c_ast.If( c_ast.BinaryOp( '<', c_ast.FuncCall( c_ast.ID('mailbox_available'), c_ast.ExprList([c_ast.Constant('int', '2')]) ), c_ast.Constant('int', '4') ), c_ast.Return(None), None ) handle_decl = c_ast.FuncDecl( None, c_ast.TypeDecl('_handle_events', [], c_ast.IdentifierType(['void'])), ) command_decl = c_ast.Decl('command', [], [], [], c_ast.TypeDecl('command', [], c_ast.IdentifierType(['int'])), [], []) command_read = _generate_read('command') body = c_ast.Compound([available_check, command_decl, command_read, case_statement]) return c_ast.FuncDef(handle_decl, [], body) def _build_main(program_text, functions): sections = [] sections.append(R""" extern "C" { #include <unistd.h> #include <mailbox_io.h> } static const char return_command = 0; static const char void_command = 1; static void _rpc_read(void* data, int size) { int available = mailbox_available(2); while (available < size) { available = mailbox_available(2); } mailbox_read(2, data, size); } static void _rpc_write(const void* data, int size) { int available = mailbox_available(3); while (available < size) { available = mailbox_available(3); } mailbox_write(3, data, size); } """) sections.append(program_text) sections.append(_generator.visit(_build_handle_function(functions))) sections.append(R""" int main() { while (1) { _handle_events(); } } """) return "\n".join(sections) def _pyprintf(stream): format_string = stream.read_string() in_special = False args = [] for i in range(len(format_string)): if in_special: if format_string[i:i+1] in [b'd']: args.append(stream.read_int32()) elif format_string[i:i+1] in [b'x', b'X', b'o', b'u']: # perform unsigned conversion args.append(stream.read_uint32()) elif format_string[i:i+1] in [b'f', b'F', b'g', b'G', b'e', b'E']: args.append(stream.read_float()) elif format_string[i:i+1] == b's': args.append(stream.read_string().decode()) elif format_string[i:i+1] == b'c': args.append(stream.read_byte()) in_special = False elif format_string[i:i+1] == b'%': in_special = True print(format_string.decode() % tuple(args), end='') def _handle_command(command, stream): if command == 1: # Void return pass elif command == 2: # print command _pyprintf(stream) else: raise RuntimeError('Unknown command {}'.format(command)) def _function_wrapper(stream, index, adapter, return_type, *args): """ Calls a function in the microblaze, designed to be used with functools.partial to build a new thing """ arg_string = struct.pack('i', index) arg_string += adapter.pack_args(*args) stream.write(arg_string) if not adapter.returns: return None command = stream.read(1)[0] while command != 0: _handle_command(command, stream) command = stream.read(1)[0] response = adapter.receive_response(stream, *args) if return_type: return return_type(response) else: return response def _create_typedef_classes(typedefs, typedef_coords): """ Creates an anonymous class for each typedef in the C function """ classes = {} for k, v in typedefs.items(): class Wrapper: """Wrapper class for a C typedef The attributes are dynamically from the C definition using the functions name `type_`. If a function named this way takes `type` as the parameter it is added as a member function otherwise it is added as a static method. """ def __init__(self, val): self._val = val def __index__(self): return self._val def __int__(self): return self._val def _call_func(self, function, *args): return function(self._val, *args) def __repr__(self): return "typedef {0} containing {1}".format(type(self).__name__, repr(self._val)) _file = typedef_coords[k].file Wrapper.__name__ = k if k in typedef_coords: doc = _get_docstring(typedef_coords[k]) if doc: Wrapper.__doc__ = doc classes[k] = Wrapper return classes def _filter_typedefs(typedefs, function_names): used_typedefs = set() for t in typedefs: if len([f for f in function_names if f.startswith(t + "_")]) > 0: used_typedefs.add(t) return used_typedefs def _get_docstring(coord): try: with open(coord.file) as f: lines = f.readlines() except: return None # We need to subtract 2 as coord is 1-indexed keyline = lines[coord.line - 2].rstrip() comment_lines = collections.deque() if keyline.startswith('// '): i = coord.line - 2 while i >= 0 and lines[i].startswith('// '): comment_lines.appendleft(lines[i][3:].rstrip()) i -= 1 elif keyline.endswith('*/'): i = coord.line - 2 # Strip comment close line = re.sub(r'\W*\*+/\W*', '', lines[i]) if line: comment_lines.appendleft(line.rstrip()) i -= 1 # Add Intermediate lines while i >= 0 and not lines[i].startswith('/*'): line = lines[i].rstrip() line = re.sub(r' \* ?', '', line) comment_lines.appendleft(line) i -= 1 line = re.sub(r'/\*+\W*', '', lines[i].rstrip()) if line: comment_lines.appendleft(line) else: return None return "\n".join(comment_lines) class MicroblazeFunction: """Calls a specific function """ def __init__(self, stream, index, function, return_type): self.stream = stream self.index = index self.function = function self.return_type = return_type def _call_function(self, *args): arg_string = struct.pack('i', self.index) arg_string += self.function.pack_args(*args) self.stream.write(arg_string) def _handle_stream(self, *args): command = self.stream.read(1)[0] if command != 0: _handle_command(command, self.stream) return None, False return self.function.receive_response(self.stream, *args), True def __repr__(self): return "<MicroblazeFunction for " + self.function.name + ">" def call(self, *args): self._call_function(*args) if not self.function.blocks: return None return_value = None done = False while not done: return_value, done = self._handle_stream(*args) if self.return_type: return self.return_type(return_value) else: return return_value def __call__(self, *args): return self.call(*args) async def call_async(self, *args): self._call_function(*args) if not self.function.blocks: return None return_value = None done = False while return_value is None: await self.stream.wait_for_data_async() return_value, done = self._handle_stream(*args) if self.return_type: return self.return_type(return_value) else: return return_value def _create_function_class(function): call_func = f"""def call(self, {', '.join(function.args)}): {repr(function.docstring)} return self.call({', '.join(function.args)}) """ scope = {} exec(call_func, scope) derived = type("MicroblazeFuncion_"+function.name, (MicroblazeFunction,), {'__call__': scope['call'], '__doc__': function.docstring}) return derived def _create_instance_function(function): args = function.function.args func_string = f"""def wrapped({', '.join(['self'] + args[1:])}): {repr(function.__doc__)} return self._call_func(function, {', '.join(args[1:])}) """ scope = {'function': function} exec(func_string, scope) wrapped = scope['wrapped'] return wrapped class MicroblazeRPC: """ Provides a python interface to the Microblaze based on an RPC mechanism. The attributes of the class are generated dynamically from the typedefs, enumerations and functions given in the provided source. Functions are added as methods, the values in enumerations are added as constants to the class and types are added as classes. """ def __init__(self, iop, program_text): """ Create a new RPC instance Parameters ---------- iop : MicroblazeHierarchy or mb_info Microblaze instance to run the RPC server on program_text : str Source of the program to extract functions from """ preprocessed = preprocess(program_text, mb_info=iop) try: ast = _parser.parse(preprocessed, filename='program_text') except ParseError as e: raise RuntimeError("Error parsing code\n" + str(e)) visitor = FuncDefVisitor() visitor.visit(ast) main_text = _build_main(program_text, visitor.functions) used_typedefs = _filter_typedefs(visitor.typedefs, visitor.functions.keys()) typedef_classes = _create_typedef_classes(visitor.typedefs, visitor.typedef_coords) self._mb = MicroblazeProgram(iop, main_text) self._rpc_stream = InterruptMBStream( self._mb, read_offset=0xFC00, write_offset=0xF800) self._build_functions(visitor.functions, typedef_classes) self._build_constants(visitor.enums, typedef_classes) self._populate_typedefs(typedef_classes, visitor.functions) self.visitor = visitor self.active_functions = 0 def _build_constants(self, enums, classes): byfile = collections.defaultdict(list) for enum in enums: for name, value in enum.items.items(): setattr(self, name, value) byfile[enum.file].append((name, value)) for c in classes.values(): if c._file in byfile: for k, v in byfile[c._file]: setattr(c, k, v) def _build_functions(self, functions, typedef_classes): index = 0 for k, v in functions.items(): return_type = None if v.return_interface.typedefname: return_type = typedef_classes[v.return_interface.typedefname] FunctionType = _create_function_class(v) setattr(self, k, FunctionType( self._rpc_stream, index, v, return_type) ) index += 1 def _populate_typedefs(self, typedef_classes, functions): for name, cls in typedef_classes.items(): for fname, func in functions.items(): if fname.startswith(name + "_"): subname = fname[len(name)+1:] if (len(func.arg_interfaces) > 0 and func.arg_interfaces[0].typedefname == name): setattr(cls, subname, _create_instance_function(getattr(self, fname))) getters = [s for s in dir(cls) if s.startswith('get_')] for g in getters: p = g[4:] # Strip the get_ off the front for the name setattr(cls, p, property(getattr(cls, "get_" + p), getattr(cls, "set_" + p, None))) def reset(self): """Reset and free the microblaze for use by other programs """ self._mb.reset() def release(self): """Alias for `reset()` """ self.reset() class MicroblazeLibrary(MicroblazeRPC): """Provides simple Python-only access to a set of Microblaze libraries. The members of this class are determined by the libraries chosen and can determined either by using ``dir`` on the instance or the ``?`` operator inside of IPython """ def __init__(self, iop, libraries): """Create a Python API for a list of C libraries Libraries should be passed as the name of the header file containing the desired functions but without the ``.h`` extension Parameters ---------- iop : mb_info / MicroblazeHierarchy The IOP to load the libraries on libraries : list List of the names of the libraries to load """ source_text = "\n".join(['#include <{}.h>'.format(lib) for lib in libraries]) super().__init__(iop, source_text)
schelleg/PYNQ
pynq/lib/pynqmicroblaze/rpc.py
Python
bsd-3-clause
34,235
[ "VisIt" ]
f1e275a1fa7e9ad849fe689b177de2b0768f18e00a7c31cee3d0888c97794c4a
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Control flow graph (CFG) structure for Python AST representation. The CFG is a digraph with edges representing valid control flow. Each node is associated with exactly one AST node, but not all AST nodes may have a corresponding CFG counterpart. Once built, the CFG itself is immutable, but the values it holds need not be; they are usually annotated with information extracted by walking the graph. """ # TODO(mdan): The notion of 'statements' below is inaccurate. # They should rather be called 'block statements', because they include # statements that may have a body, e.g. if and while. from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import weakref from enum import Enum # pylint:disable=g-bad-import-order import gast # pylint:enable=g-bad-import-order from tensorflow.python.autograph.pyct import compiler class Node(object): """A node in the CFG. Although new instances of this class are mutable, the objects that a user finds in the CFG are typically not. The nodes represent edges in the CFG graph, and maintain pointers to allow efficient walking in both forward and reverse order. The following property holds for all nodes: "child in node.next" iff "node in child.prev". Attributes: next: FrozenSet[Node, ...], the nodes that follow this node, in control flow order prev: FrozenSet[Node, ...], the nodes that precede this node, in reverse control flow order ast_node: ast.AST, the AST node corresponding to this CFG node """ def __init__(self, next_, prev, ast_node): self.next = next_ self.prev = prev self.ast_node = ast_node def freeze(self): self.next = frozenset(self.next) # Assumption: All CFG nodes have identical life spans, because the graph # owns them. Nodes should never be used outside the context of an existing # graph. self.prev = weakref.WeakSet(self.prev) def __repr__(self): if isinstance(self.ast_node, gast.FunctionDef): return 'def %s' % self.ast_node.name elif isinstance(self.ast_node, gast.withitem): return compiler.ast_to_source(self.ast_node.context_expr).strip() return compiler.ast_to_source(self.ast_node).strip() class Graph( collections.namedtuple( 'Graph', ['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])): """A Control Flow Graph. The CFG maintains an index to allow looking up a CFG node by the AST node to which it is associated. The index can also be enumerated in top-down, depth first order. Walking the graph in forward or reverse order is supported by double parent-child links. Note: the error nodes are not wired to their corresponding finally guards, because these are shared, and wiring them would create a reverse path from normal control flow into the error nodes, which we want to avoid. The graph also maintains edges corresponding to higher level statements like for-else loops. A node is considered successor of a statement if there is an edge from a node that is lexically a child of that statement to a node that is not. Statement predecessors are analogously defined. Attributes: entry: Node, the entry node exit: FrozenSet[Node, ...], the exit nodes error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised error (errors propagated from function calls are not accounted) index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG node stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes to their predecessor CFG nodes stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes to their successor CFG nodes """ def __repr__(self): result = 'digraph CFG {\n' for node in self.index.values(): result += ' %s [label="%s"];\n' % (id(node), node) for node in self.index.values(): for next_ in node.next: result += ' %s -> %s;\n' % (id(node), id(next_)) result += '}' return result class _WalkMode(Enum): FORWARD = 1 REVERSE = 2 # TODO(mdan): Rename to DataFlowAnalyzer. # TODO(mdan): Consider specializations that use gen/kill/transfer abstractions. class GraphVisitor(object): """Base class for a CFG visitors. This implementation is not thread safe. The visitor has some facilities to simplify dataflow analyses. In particular, it allows revisiting the nodes at the decision of the subclass. This can be used to visit the graph until the state reaches a fixed point. For more details on dataflow analysis, see https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec02-Dataflow.pdf Note: the literature generally suggests visiting successor nodes only when the state of the current node changed, regardless of whether that successor has ever been visited. This implementation visits every successor at least once. Attributes: graph: Graph in_: Dict[Node, Any], stores node-keyed state during a visit out: Dict[Node, Any], stores node-keyed state during a visit """ def __init__(self, graph): self.graph = graph self.reset() def init_state(self, node): """State initialization function. Optional to overload. An in/out state slot will be created for each node in the graph. Subclasses must overload this to control what that is initialized to. Args: node: Node """ raise NotImplementedError('Subclasses must implement this.') # TODO(mdan): Rename to flow? def visit_node(self, node): """Visitor function. Args: node: Node Returns: bool, whether the node should be revisited; subclasses can visit every reachable node exactly once by always returning False """ raise NotImplementedError('Subclasses must implement this.') def reset(self): self.in_ = { node: self.init_state(node) for node in self.graph.index.values() } self.out = { node: self.init_state(node) for node in self.graph.index.values() } def _visit_internal(self, mode): """Visits the CFG, depth-first.""" assert mode in (_WalkMode.FORWARD, _WalkMode.REVERSE) if mode == _WalkMode.FORWARD: open_ = [self.graph.entry] elif mode == _WalkMode.REVERSE: open_ = list(self.graph.exit) closed = set() while open_: node = open_.pop(0) closed.add(node) should_revisit = self.visit_node(node) if mode == _WalkMode.FORWARD: children = node.next elif mode == _WalkMode.REVERSE: children = node.prev for next_ in children: if should_revisit or next_ not in closed: open_.append(next_) def visit_forward(self): self._visit_internal(_WalkMode.FORWARD) def visit_reverse(self): self._visit_internal(_WalkMode.REVERSE) class GraphBuilder(object): """Builder that constructs a CFG from a given AST. This GraphBuilder facilitates constructing the DAG that forms the CFG when nodes are supplied in lexical order (i.e., top-down, depth first). Under these conditions, it supports building patterns found in typical structured programs. This builder ignores the flow generated by exceptions, which are assumed to always be catastrophic and present purely for diagnostic purposes (e.g. to print debug information). Statements like raise and try/catch sections are allowed and will generate control flow edges, but ordinaty statements are assumed not to raise exceptions. Finally sections are also correctly interleaved between break/continue/return nodes and their subsequent statements. Important concepts: * nodes - nodes refer refer to CFG nodes; AST nodes are qualified explicitly * leaf set - since the graph is constructed gradually, a leaf set maintains the CFG nodes that will precede the node that the builder expects to receive next; when an ordinary node is added, it is connected to the existing leaves and it in turn becomes the new leaf * jump nodes - nodes that should generate edges other than what ordinary nodes would; these correspond to break, continue and return statements * sections - logical delimiters for subgraphs that require special edges; there are various types of nodes, each admitting various types of jump nodes; sections are identified by their corresponding AST node """ # TODO(mdan): Perhaps detail this in a markdown doc. # TODO(mdan): Add exception support. def __init__(self, parent_ast_node): self.reset() self.parent = parent_ast_node def reset(self): """Resets the state of this factory.""" self.head = None self.errors = set() self.node_index = {} # TODO(mdan): Too many primitives. Use classes. self.leaves = set() # Note: This mechanism requires that nodes are added in lexical order (top # to bottom, depth first). self.active_stmts = set() self.owners = {} # type: Set[any] self.forward_edges = set() # type: Tuple[Node, Node] # (from, to) self.finally_sections = {} # Dict values represent (entry, exits) self.finally_section_subgraphs = { } # type: Dict[ast.AST, Tuple[Node, Set[Node]]] # Whether the guard section can be reached from the statement that precedes # it. self.finally_section_has_direct_flow = {} # Finally sections that await their first node. self.pending_finally_sections = set() # Exit jumps keyed by the section they affect. self.exits = {} # The entry of loop sections, keyed by the section. self.section_entry = {} # Continue jumps keyed by the section they affect. self.continues = {} # The entry of conditional sections, keyed by the section. self.cond_entry = {} # Lists of leaf nodes corresponding to each branch in the section. self.cond_leaves = {} def _connect_nodes(self, first, second): """Connects nodes to signify that control flows from first to second. Args: first: Union[Set[Node, ...], Node] second: Node """ if isinstance(first, Node): first.next.add(second) second.prev.add(first) self.forward_edges.add((first, second)) else: for node in first: self._connect_nodes(node, second) def _add_new_node(self, ast_node): """Grows the graph by adding a CFG node following the current leaves.""" if ast_node is self.node_index: raise ValueError('%s added twice' % ast_node) # Assumption: All CFG nodes have identical life spans, because the graph # owns them. Nodes should never be used outside the context of an existing # graph. node = Node(next_=set(), prev=weakref.WeakSet(), ast_node=ast_node) self.node_index[ast_node] = node self.owners[node] = frozenset(self.active_stmts) if self.head is None: self.head = node for leaf in self.leaves: self._connect_nodes(leaf, node) # If any finally section awaits its first node, populate it. for section_id in self.pending_finally_sections: self.finally_section_subgraphs[section_id][0] = node self.pending_finally_sections = set() return node def begin_statement(self, stmt): """Marks the beginning of a statement. Args: stmt: Hashable, a key by which the statement can be identified in the CFG's stmt_prev and stmt_next attributes """ self.active_stmts.add(stmt) def end_statement(self, stmt): """Marks the end of a statement. Args: stmt: Hashable, a key by which the statement can be identified in the CFG's stmt_prev and stmt_next attributes; must match a key previously passed to begin_statement. """ self.active_stmts.remove(stmt) def add_ordinary_node(self, ast_node): """Grows the graph by adding an ordinary CFG node. Ordinary nodes are followed by the next node, in lexical order, that is, they become the new leaf set. Args: ast_node: ast.AST Returns: Node """ node = self._add_new_node(ast_node) self.leaves = set((node,)) return node def _add_jump_node(self, ast_node, guards): """Grows the graph by adding a jump node. Jump nodes are added to the current leaf set, and the leaf set becomes empty. If the jump node is the last in a cond section, then it may be added back to the leaf set by a separate mechanism. Args: ast_node: ast.AST guards: Tuple[ast.AST, ...], the finally sections active for this node Returns: Node """ node = self._add_new_node(ast_node) self.leaves = set() # The guards themselves may not yet be complete, and will be wired later. self.finally_sections[node] = guards return node def _connect_jump_to_finally_sections(self, node): """Connects a jump node to the finally sections protecting it.""" cursor = set((node,)) if node not in self.finally_sections: return cursor for guard_section_id in self.finally_sections[node]: guard_begin, guard_ends = self.finally_section_subgraphs[guard_section_id] self._connect_nodes(cursor, guard_begin) cursor = guard_ends del self.finally_sections[node] # TODO(mdan): Should garbage-collect finally_section_subgraphs. return cursor def add_exit_node(self, ast_node, section_id, guards): """Grows the graph by adding an exit node. This node becomes an exit for the current section. Args: ast_node: ast.AST section_id: Hashable, the node for which ast_node should be considered to be an exit node guards: Tuple[ast.AST, ...], the finally sections that guard ast_node """ node = self._add_jump_node(ast_node, guards) self.exits[section_id].add(node) def add_continue_node(self, ast_node, section_id, guards): """Grows the graph by adding a reentry node. This node causes control flow to go back to the loop section's entry. Args: ast_node: ast.AST section_id: Hashable, the node for which ast_node should be considered to be an exit node guards: Tuple[ast.AST, ...], the finally sections that guard ast_node """ node = self._add_jump_node(ast_node, guards) self.continues[section_id].add(node) def add_error_node(self, ast_node, guards): """Grows the graph by adding an error node. This node becomes an exit for the entire graph. Args: ast_node: ast.AST guards: Tuple[ast.AST, ...], the finally sections that guard ast_node """ node = self._add_jump_node(ast_node, guards) self.errors.add(node) self.leaves = set() def enter_section(self, section_id): """Enters a regular section. Regular sections admit exit jumps, which end the section. Args: section_id: Hashable, the same node that will be used in calls to the ast_node arg passed to add_exit_node """ assert section_id not in self.exits self.exits[section_id] = set() def exit_section(self, section_id): """Exits a regular section.""" # Exits are jump nodes, which may be protected. for exit_ in self.exits[section_id]: self.leaves |= self._connect_jump_to_finally_sections(exit_) del self.exits[section_id] def enter_loop_section(self, section_id, entry_node): """Enters a loop section. Loop sections define an entry node. The end of the section always flows back to the entry node. These admit continue jump nodes which also flow to the entry node. Args: section_id: Hashable, the same node that will be used in calls to the ast_node arg passed to add_continue_node entry_node: ast.AST, the entry node into the loop (e.g. the test node for while loops) """ assert section_id not in self.section_entry assert section_id not in self.continues self.continues[section_id] = set() node = self.add_ordinary_node(entry_node) self.section_entry[section_id] = node def exit_loop_section(self, section_id): """Exits a loop section.""" self._connect_nodes(self.leaves, self.section_entry[section_id]) # continues are jump nodes, which may be protected. for reentry in self.continues[section_id]: guard_ends = self._connect_jump_to_finally_sections(reentry) self._connect_nodes(guard_ends, self.section_entry[section_id]) # Loop nodes always loop back. self.leaves = set((self.section_entry[section_id],)) del self.continues[section_id] del self.section_entry[section_id] def enter_cond_section(self, section_id): """Enters a conditional section. Conditional sections define an entry node, and one or more branches. Args: section_id: Hashable, the same node that will be used in calls to the section_id arg passed to new_cond_branch """ assert section_id not in self.cond_entry assert section_id not in self.cond_leaves self.cond_leaves[section_id] = [] def new_cond_branch(self, section_id): """Begins a new branch in a cond section.""" assert section_id in self.cond_leaves if section_id in self.cond_entry: # Subsequent splits move back to the split point, and memorize the # current leaves. self.cond_leaves[section_id].append(self.leaves) self.leaves = self.cond_entry[section_id] else: # If this is the first time we split a section, just remember the split # point. self.cond_entry[section_id] = self.leaves def exit_cond_section(self, section_id): """Exits a conditional section.""" for split in self.cond_leaves[section_id]: self.leaves |= split del self.cond_entry[section_id] del self.cond_leaves[section_id] def enter_finally_section(self, section_id): """Enters a finally section.""" # TODO(mdan): This, not the caller, should track the active sections. self.finally_section_subgraphs[section_id] = [None, None] if self.leaves: self.finally_section_has_direct_flow[section_id] = True else: self.finally_section_has_direct_flow[section_id] = False self.pending_finally_sections.add(section_id) def exit_finally_section(self, section_id): """Exits a finally section.""" assert section_id not in self.pending_finally_sections, 'Empty finally?' self.finally_section_subgraphs[section_id][1] = self.leaves # If the guard can only be reached by a jump, then it will not flow # into the statement that follows it. if not self.finally_section_has_direct_flow[section_id]: self.leaves = set() del self.finally_section_has_direct_flow[section_id] def build(self): """Returns the CFG accumulated so far and resets the builder. Returns: Graph """ # Freeze the nodes. for node in self.node_index.values(): node.freeze() # Build the statement edges. stmt_next = {} stmt_prev = {} for node, _ in self.forward_edges: for stmt in self.owners[node]: if stmt not in stmt_next: stmt_next[stmt] = set() if stmt not in stmt_prev: stmt_prev[stmt] = set() for first, second in self.forward_edges: stmts_exited = self.owners[first] - self.owners[second] for stmt in stmts_exited: stmt_next[stmt].add(second) stmts_entered = self.owners[second] - self.owners[first] for stmt in stmts_entered: stmt_prev[stmt].add(first) for stmt in stmt_next: stmt_next[stmt] = frozenset(stmt_next[stmt]) for stmt in stmt_prev: stmt_prev[stmt] = frozenset(stmt_prev[stmt]) # Construct the final graph object. result = Graph( entry=self.head, exit=self.leaves, error=self.errors, index=self.node_index, stmt_prev=stmt_prev, stmt_next=stmt_next) # Reset the state. self.reset() return result class AstToCfg(gast.NodeVisitor): """Converts an AST to CFGs. A separate CFG will be constructed for each function. """ def __init__(self): super(AstToCfg, self).__init__() self.builder_stack = [] self.builder = None self.cfgs = {} self.lexical_scopes = [] def _enter_lexical_scope(self, node): self.lexical_scopes.append(node) def _exit_lexical_scope(self, node): leaving_node = self.lexical_scopes.pop() assert node == leaving_node def _get_enclosing_finally_scopes(self, stop_at): included = [] for node in reversed(self.lexical_scopes): if isinstance(node, gast.Try) and node.finalbody: included.append(node) if isinstance(node, stop_at): return node, included return None, included def _process_basic_statement(self, node): self.generic_visit(node) self.builder.add_ordinary_node(node) def _process_exit_statement(self, node, *exits_nodes_of_type): # Note: this is safe because we process functions separately. try_node, guards = self._get_enclosing_finally_scopes( tuple(exits_nodes_of_type)) if try_node is None: raise ValueError( '%s that is not enclosed by any of %s' % (node, exits_nodes_of_type)) self.builder.add_exit_node(node, try_node, guards) def _process_continue_statement(self, node, *loops_to_nodes_of_type): # Note: this is safe because we process functions separately. try_node, guards = self._get_enclosing_finally_scopes( tuple(loops_to_nodes_of_type)) if try_node is None: raise ValueError('%s that is not enclosed by any of %s' % (node, loops_to_nodes_of_type)) self.builder.add_continue_node(node, try_node, guards) def visit_FunctionDef(self, node): # We also keep the FunctionDef node in the CFG. This allows us to determine # things like reaching definitions via closure. Note that the function body # will be stored in a separate graph, because function definitions are not # the same as function calls. if self.builder is not None: self.builder.add_ordinary_node(node) self.builder_stack.append(self.builder) self.builder = GraphBuilder(node) self._enter_lexical_scope(node) self.builder.enter_section(node) self._process_basic_statement(node.args) for stmt in node.body: self.visit(stmt) self.builder.exit_section(node) self._exit_lexical_scope(node) self.cfgs[node] = self.builder.build() self.builder = self.builder_stack.pop() def visit_Return(self, node): self._process_exit_statement(node, gast.FunctionDef) def visit_Expr(self, node): self._process_basic_statement(node) def visit_Assign(self, node): self._process_basic_statement(node) def visit_AnnAssign(self, node): self._process_basic_statement(node) def visit_AugAssign(self, node): self._process_basic_statement(node) def visit_Print(self, node): self._process_basic_statement(node) def visit_Raise(self, node): try_node, guards = self._get_enclosing_finally_scopes((gast.FunctionDef,)) if try_node is None: raise ValueError('%s that is not enclosed by any FunctionDef' % node) self.builder.add_error_node(node, guards) def visit_Assert(self, node): # Ignoring the effect of exceptions. self._process_basic_statement(node) def visit_Delete(self, node): self._process_basic_statement(node) def visit_If(self, node): # No need to track ifs as lexical scopes, for now. # Lexical scopes are generally tracked in order to be able to resolve the # targets of jump statements like break/continue/etc. Since there is no # statement that can interrupt a conditional, we don't need to track their # lexical scope. That may change in the future. self.builder.begin_statement(node) self.builder.enter_cond_section(node) self._process_basic_statement(node.test) self.builder.new_cond_branch(node) for stmt in node.body: self.visit(stmt) self.builder.new_cond_branch(node) for stmt in node.orelse: self.visit(stmt) self.builder.exit_cond_section(node) self.builder.end_statement(node) def visit_While(self, node): self.builder.begin_statement(node) self._enter_lexical_scope(node) self.builder.enter_section(node) self.builder.enter_loop_section(node, node.test) for stmt in node.body: self.visit(stmt) self.builder.exit_loop_section(node) # Note: although the orelse is technically part of the loop node, # the statements inside it don't affect the loop itself. For example, a # break in the loop's orelse will not affect the loop itself. self._exit_lexical_scope(node) for stmt in node.orelse: self.visit(stmt) self.builder.exit_section(node) self.builder.end_statement(node) def visit_For(self, node): self.builder.begin_statement(node) self._enter_lexical_scope(node) self.builder.enter_section(node) # Note: Strictly speaking, this should be node.target + node.iter. # However, the activity analysis accounts for this inconsistency, # so dataflow analysis produces the correct values. self.builder.enter_loop_section(node, node.iter) for stmt in node.body: self.visit(stmt) self.builder.exit_loop_section(node) # Note: although the orelse is technically part of the loop node, # they don't count as loop bodies. For example, a break in the loop's # orelse will affect the parent loop, not the current one. self._exit_lexical_scope(node) for stmt in node.orelse: self.visit(stmt) self.builder.exit_section(node) self.builder.end_statement(node) def visit_Break(self, node): self._process_exit_statement(node, gast.While, gast.For) def visit_Continue(self, node): self._process_continue_statement(node, gast.While, gast.For) def visit_Try(self, node): self._enter_lexical_scope(node) for stmt in node.body: self.visit(stmt) # Unlike loops, the orelse is a simple continuation of the body. for stmt in node.orelse: self.visit(stmt) self._exit_lexical_scope(node) if node.finalbody: self.builder.enter_finally_section(node) for stmt in node.finalbody: self.visit(stmt) self.builder.exit_finally_section(node) def visit_With(self, node): # TODO(mdan): Mark the context manager's exit call as exit guard. for item in node.items: self._process_basic_statement(item) for stmt in node.body: self.visit(stmt) def build(node): visitor = AstToCfg() visitor.visit(node) return visitor.cfgs
jbedorf/tensorflow
tensorflow/python/autograph/pyct/cfg.py
Python
apache-2.0
27,285
[ "VisIt" ]
f228227c148334aeb5b1614491ed74bb01a02c84da52ea601f81d3de0d68c235
#!/usr/bin/env python # -*- coding: utf-8 -*- # Run this test like so: # vtkpython TestLinePlot.py -D $VTK_DATA_ROOT \ # -B $VTK_DATA_ROOT/Baseline/Charts/ import os import vtk import vtk.test.Testing import math class TestLinePlot(vtk.test.Testing.vtkTest): def testLinePlot(self): "Test if line plots can be built with python" # Set up a 2D scene, add an XY chart to it view = vtk.vtkContextView() view.GetRenderer().SetBackground(1.0,1.0,1.0) view.GetRenderWindow().SetSize(400,300) chart = vtk.vtkChartXY() view.GetScene().AddItem(chart) # Create a table with some points in it table = vtk.vtkTable() arrX = vtk.vtkFloatArray() arrX.SetName("X Axis") arrC = vtk.vtkFloatArray() arrC.SetName("Cosine") arrS = vtk.vtkFloatArray() arrS.SetName("Sine") arrS2 = vtk.vtkFloatArray() arrS2.SetName("Sine2") numPoints = 69 inc = 7.5 / (numPoints - 1) for i in range(0,numPoints): arrX.InsertNextValue(i*inc) arrC.InsertNextValue(math.cos(i * inc) + 0.0) arrS.InsertNextValue(math.sin(i * inc) + 0.0) arrS2.InsertNextValue(math.sin(i * inc) + 0.5) table.AddColumn(arrX) table.AddColumn(arrC) table.AddColumn(arrS) table.AddColumn(arrS2) # Now add the line plots with appropriate colors line = chart.AddPlot(0) line.SetInput(table,0,1) line.SetColor(0,255,0,255) line.SetWidth(1.0) line = chart.AddPlot(0) line.SetInput(table,0,2) line.SetColor(255,0,0,255) line.SetWidth(5.0) line = chart.AddPlot(0) line.SetInput(table,0,3) line.SetColor(0,0,255,255) line.SetWidth(4.0) view.GetRenderWindow().SetMultiSamples(0) view.GetRenderWindow().Render() img_file = "TestLinePlot.png" vtk.test.Testing.compareImage(view.GetRenderWindow(),vtk.test.Testing.getAbsImagePath(img_file),threshold=25) vtk.test.Testing.interact() if __name__ == "__main__": vtk.test.Testing.main([(TestLinePlot, 'test')])
naucoin/VTKSlicerWidgets
Charts/Testing/Python/TestLinePlot.py
Python
bsd-3-clause
2,194
[ "VTK" ]
e04aac06dc53ae3023ffd06ce19306e0979d549d6702f3df14075dc9dd88424a
from __future__ import annotations import procrunner def test(dials_data, tmp_path): experiments = dials_data("centroid_test_data") / "experiments.json" result = procrunner.run( [ "dials.background", "output.plot=background.png", "image=1", str(experiments), ], working_directory=tmp_path, ) assert not result.returncode and not result.stderr assert (tmp_path / "background.png").is_file() for line in result.stdout.splitlines(): if line.startswith(b"Mean background"): assert line.endswith(b"0.559") break def test_multiple_imagesets(dials_data, tmp_path): filenames = list(dials_data("thaumatin_grid_scan").visit("thau_3_2_00*.cbf.bz2")) filenames.extend(dials_data("centroid_test_data").visit("centroid_*.cbf")) result = procrunner.run( [ "dials.background", "output.plot=background.png", "image=1,2", "size_inches=16,10", ] + filenames, working_directory=tmp_path, ) assert not result.returncode and not result.stderr assert (tmp_path / "background.png").is_file() lines = result.stdout.splitlines() assert b"For imageset 0 image 1:" in lines assert b"For imageset 0 image 2:" in lines assert b"For imageset 1 image 1:" in lines assert b"For imageset 1 image 2:" in lines
dials/dials
tests/command_line/test_background.py
Python
bsd-3-clause
1,439
[ "VisIt" ]
e3fc574f8f3d5651d3dd86f4351aea94937da22ed8fc7fbea5227446c54275e2
from __future__ import print_function from future import standard_library standard_library.install_aliases() from builtins import range from past.builtins import basestring import sys, os import numpy as np import operator try: # works with python 2.7 not 3 from StringIO import StringIO except: # works with python 3 from io import StringIO sys.path.insert(1, "../../") import h2o import imp import random import re import subprocess from subprocess import STDOUT,PIPE from h2o.utils.shared_utils import temp_ctr from h2o.model.binomial import H2OBinomialModel from h2o.model.clustering import H2OClusteringModel from h2o.model.multinomial import H2OMultinomialModel from h2o.model.ordinal import H2OOrdinalModel from h2o.model.regression import H2ORegressionModel from h2o.estimators.gbm import H2OGradientBoostingEstimator from h2o.estimators.deeplearning import H2ODeepLearningEstimator from h2o.estimators.random_forest import H2ORandomForestEstimator from h2o.estimators.glm import H2OGeneralizedLinearEstimator from h2o.estimators.kmeans import H2OKMeansEstimator from h2o.estimators.naive_bayes import H2ONaiveBayesEstimator from h2o.transforms.decomposition import H2OPCA from decimal import * import urllib.request, urllib.error, urllib.parse import numpy as np import shutil import string import copy import json import math from random import shuffle import scipy.special from h2o.utils.typechecks import assert_is_type def check_models(model1, model2, use_cross_validation=False, op='e'): """ Check that the given models are equivalent. :param model1: :param model2: :param use_cross_validation: boolean. if True, use validation metrics to determine model equality. Otherwise, use training metrics. :param op: comparison operator to use. 'e':==, 'g':>, 'ge':>= :return: None. Throw meaningful error messages if the check fails """ # 1. Check model types model1_type = model1.__class__.__name__ model2_type = model1.__class__.__name__ assert model1_type is model2_type, "The model types differ. The first model is of type {0} and the second " \ "models is of type {1}.".format(model1_type, model2_type) # 2. Check model metrics if isinstance(model1,H2OBinomialModel): # 2a. Binomial # F1 f1_1 = model1.F1(xval=use_cross_validation) f1_2 = model2.F1(xval=use_cross_validation) if op == 'e': assert f1_1[0][1] == f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \ "{1}. Expected the first to be == to the second.".format(f1_1[0][1], f1_2[0][1]) elif op == 'g': assert f1_1[0][1] > f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \ "{1}. Expected the first to be > than the second.".format(f1_1[0][1], f1_2[0][1]) elif op == 'ge': assert f1_1[0][1] >= f1_2[0][1], "The first model has an F1 of {0} and the second model has an F1 of " \ "{1}. Expected the first to be >= than the second.".format(f1_1[0][1], f1_2[0][1]) elif isinstance(model1,H2ORegressionModel): # 2b. Regression # MSE mse1 = model1.mse(xval=use_cross_validation) mse2 = model2.mse(xval=use_cross_validation) if op == 'e': assert mse1 == mse2, "The first model has an MSE of {0} and the second model has an MSE of " \ "{1}. Expected the first to be == to the second.".format(mse1, mse2) elif op == 'g': assert mse1 > mse2, "The first model has an MSE of {0} and the second model has an MSE of " \ "{1}. Expected the first to be > than the second.".format(mse1, mse2) elif op == 'ge': assert mse1 >= mse2, "The first model has an MSE of {0} and the second model has an MSE of " \ "{1}. Expected the first to be >= than the second.".format(mse1, mse2) elif isinstance(model1,H2OMultinomialModel) or isinstance(model1,H2OOrdinalModel): # 2c. Multinomial # hit-ratio pass elif isinstance(model1,H2OClusteringModel): # 2d. Clustering # totss totss1 = model1.totss(xval=use_cross_validation) totss2 = model2.totss(xval=use_cross_validation) if op == 'e': assert totss1 == totss2, "The first model has an TOTSS of {0} and the second model has an " \ "TOTSS of {1}. Expected the first to be == to the second.".format(totss1, totss2) elif op == 'g': assert totss1 > totss2, "The first model has an TOTSS of {0} and the second model has an " \ "TOTSS of {1}. Expected the first to be > than the second.".format(totss1, totss2) elif op == 'ge': assert totss1 >= totss2, "The first model has an TOTSS of {0} and the second model has an " \ "TOTSS of {1}. Expected the first to be >= than the second." \ "".format(totss1, totss2) def check_dims_values(python_obj, h2o_frame, rows, cols, dim_only=False): """ Check that the dimensions and values of the python object and H2OFrame are equivalent. Assumes that the python object conforms to the rules specified in the h2o frame documentation. :param python_obj: a (nested) list, tuple, dictionary, numpy.ndarray, ,or pandas.DataFrame :param h2o_frame: an H2OFrame :param rows: number of rows :param cols: number of columns :param dim_only: check the dimensions only :return: None """ h2o_rows, h2o_cols = h2o_frame.dim assert h2o_rows == rows and h2o_cols == cols, "failed dim check! h2o_rows:{0} rows:{1} h2o_cols:{2} cols:{3}" \ "".format(h2o_rows, rows, h2o_cols, cols) if not dim_only: if isinstance(python_obj, (list, tuple)): for c in range(cols): for r in range(rows): pval = python_obj[r] if isinstance(pval, (list, tuple)): pval = pval[c] hval = h2o_frame[r, c] assert pval == hval or abs(pval - hval) < 1e-10, \ "expected H2OFrame to have the same values as the python object for row {0} " \ "and column {1}, but h2o got {2} and python got {3}.".format(r, c, hval, pval) elif isinstance(python_obj, dict): for r in range(rows): for k in list(python_obj.keys()): pval = python_obj[k][r] if hasattr(python_obj[k],'__iter__') else python_obj[k] hval = h2o_frame[r,k] assert pval == hval, "expected H2OFrame to have the same values as the python object for row {0} " \ "and column {1}, but h2o got {2} and python got {3}.".format(r, k, hval, pval) def np_comparison_check(h2o_data, np_data, num_elements): """ Check values achieved by h2o against values achieved by numpy :param h2o_data: an H2OFrame or H2OVec :param np_data: a numpy array :param num_elements: number of elements to compare :return: None """ # Check for numpy try: imp.find_module('numpy') except ImportError: assert False, "failed comparison check because unable to import numpy" import numpy as np rows, cols = h2o_data.dim for i in range(num_elements): r = random.randint(0,rows-1) c = random.randint(0,cols-1) h2o_val = h2o_data[r,c] np_val = np_data[r,c] if len(np_data.shape) > 1 else np_data[r] if isinstance(np_val, np.bool_): np_val = bool(np_val) # numpy haz special bool type :( assert np.absolute(h2o_val - np_val) < 1e-5, \ "failed comparison check! h2o computed {0} and numpy computed {1}".format(h2o_val, np_val) # perform h2o predict and mojo predict. Frames containing h2o prediction is returned and mojo predict are # returned. def mojo_predict(model,tmpdir, mojoname): """ perform h2o predict and mojo predict. Frames containing h2o prediction is returned and mojo predict are returned. It is assumed that the input data set is saved as in.csv in tmpdir directory. :param model: h2o model where you want to use to perform prediction :param tmpdir: directory where your mojo zip files are stired :param mojoname: name of your mojo zip file. :return: the h2o prediction frame and the mojo prediction frame """ newTest = h2o.import_file(os.path.join(tmpdir, 'in.csv'), header=1) # Make sure h2o and mojo use same in.csv predict_h2o = model.predict(newTest) # load mojo and have it do predict outFileName = os.path.join(tmpdir, 'out_mojo.csv') mojoZip = os.path.join(tmpdir, mojoname) + ".zip" genJarDir = str.split(str(tmpdir),'/') genJarDir = '/'.join(genJarDir[0:genJarDir.index('h2o-py')]) # locate directory of genmodel.jar java_cmd = ["java", "-ea", "-cp", os.path.join(genJarDir, "h2o-assemblies/genmodel/build/libs/genmodel.jar"), "-Xmx12g", "-XX:MaxPermSize=2g", "-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv", "--input", os.path.join(tmpdir, 'in.csv'), "--output", outFileName, "--mojo", mojoZip, "--decimal"] p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT) o, e = p.communicate() pred_mojo = h2o.import_file(os.path.join(tmpdir, 'out_mojo.csv'), header=1) # load mojo prediction into a frame and compare # os.remove(mojoZip) return predict_h2o, pred_mojo # perform pojo predict. Frame containing pojo predict is returned. def pojo_predict(model, tmpdir, pojoname): h2o.download_pojo(model, path=tmpdir) h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar") java_file = os.path.join(tmpdir, pojoname + ".java") in_csv = (os.path.join(tmpdir, 'in.csv')) # import the test dataset print("Compiling Java Pojo") javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", java_file] subprocess.check_call(javac_cmd) out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv") cp_sep = ";" if sys.platform == "win32" else ":" java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g", "-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv", "--pojo", pojoname, "--input", in_csv, "--output", out_pojo_csv, "--decimal"] p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT) o, e = p.communicate() print("Java output: {0}".format(o)) assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv) predict_pojo = h2o.import_file(out_pojo_csv, header=1) return predict_pojo def javapredict(algo, equality, train, test, x, y, compile_only=False, separator=",", setInvNumNA=False,**kwargs): print("Creating model in H2O") if algo == "gbm": model = H2OGradientBoostingEstimator(**kwargs) elif algo == "random_forest": model = H2ORandomForestEstimator(**kwargs) elif algo == "deeplearning": model = H2ODeepLearningEstimator(**kwargs) elif algo == "glm": model = H2OGeneralizedLinearEstimator(**kwargs) elif algo == "naive_bayes": model = H2ONaiveBayesEstimator(**kwargs) elif algo == "kmeans": model = H2OKMeansEstimator(**kwargs) elif algo == "pca": model = H2OPCA(**kwargs) else: raise ValueError if algo == "kmeans" or algo == "pca": model.train(x=x, training_frame=train) else: model.train(x=x, y=y, training_frame=train) print(model) # HACK: munge model._id so that it conforms to Java class name. For example, change K-means to K_means. # TODO: clients should extract Java class name from header. regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]") pojoname = regex.sub("_", model._id) print("Downloading Java prediction model code from H2O") tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "results", pojoname)) os.makedirs(tmpdir) h2o.download_pojo(model, path=tmpdir) h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar") assert os.path.exists(h2o_genmodel_jar), "Expected file {0} to exist, but it does not.".format(h2o_genmodel_jar) print("h2o-genmodel.jar saved in {0}".format(h2o_genmodel_jar)) java_file = os.path.join(tmpdir, pojoname + ".java") assert os.path.exists(java_file), "Expected file {0} to exist, but it does not.".format(java_file) print("java code saved in {0}".format(java_file)) print("Compiling Java Pojo") javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", "-J-XX:MaxPermSize=256m", java_file] subprocess.check_call(javac_cmd) if not compile_only: print("Predicting in H2O") predictions = model.predict(test) predictions.summary() predictions.head() out_h2o_csv = os.path.join(tmpdir, "out_h2o.csv") h2o.download_csv(predictions, out_h2o_csv) assert os.path.exists(out_h2o_csv), "Expected file {0} to exist, but it does not.".format(out_h2o_csv) print("H2O Predictions saved in {0}".format(out_h2o_csv)) print("Setting up for Java POJO") in_csv = os.path.join(tmpdir, "in.csv") h2o.download_csv(test[x], in_csv) # hack: the PredictCsv driver can't handle quoted strings, so remove them f = open(in_csv, "r+") csv = f.read() csv = re.sub('\"', "", csv) csv = re.sub(",", separator, csv) # replace with arbitrary separator for input dataset f.seek(0) f.write(csv) f.truncate() f.close() assert os.path.exists(in_csv), "Expected file {0} to exist, but it does not.".format(in_csv) print("Input CSV to PredictCsv saved in {0}".format(in_csv)) print("Running PredictCsv Java Program") out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv") cp_sep = ";" if sys.platform == "win32" else ":" java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g", "-XX:MaxPermSize=2g", "-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.PredictCsv", "--pojo", pojoname, "--input", in_csv, "--output", out_pojo_csv, "--separator", separator] if setInvNumNA: java_cmd.append("--setConvertInvalidNum") p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT) o, e = p.communicate() print("Java output: {0}".format(o)) assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv) predictions2 = h2o.upload_file(path=out_pojo_csv) print("Pojo predictions saved in {0}".format(out_pojo_csv)) print("Comparing predictions between H2O and Java POJO") # Dimensions hr, hc = predictions.dim pr, pc = predictions2.dim assert hr == pr, "Expected the same number of rows, but got {0} and {1}".format(hr, pr) assert hc == pc, "Expected the same number of cols, but got {0} and {1}".format(hc, pc) # Value for r in range(hr): hp = predictions[r, 0] if equality == "numeric": pp = float.fromhex(predictions2[r, 0]) assert abs(hp - pp) < 1e-4, \ "Expected predictions to be the same (within 1e-4) for row %d, but got %r and %r" % (r, hp, pp) elif equality == "class": pp = predictions2[r, 0] assert hp == pp, "Expected predictions to be the same for row %d, but got %r and %r" % (r, hp, pp) else: raise ValueError def javamunge(assembly, pojoname, test, compile_only=False): """ Here's how to use: assembly is an already fit H2OAssembly; The test set should be used to compare the output here and the output of the POJO. """ print("Downloading munging POJO code from H2O") tmpdir = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "results", pojoname)) os.makedirs(tmpdir) assembly.to_pojo(pojoname, path=tmpdir, get_jar=True) h2o_genmodel_jar = os.path.join(tmpdir, "h2o-genmodel.jar") assert os.path.exists(h2o_genmodel_jar), "Expected file {0} to exist, but it does not.".format(h2o_genmodel_jar) print("h2o-genmodel.jar saved in {0}".format(h2o_genmodel_jar)) java_file = os.path.join(tmpdir, pojoname + ".java") assert os.path.exists(java_file), "Expected file {0} to exist, but it does not.".format(java_file) print("java code saved in {0}".format(java_file)) print("Compiling Java Pojo") javac_cmd = ["javac", "-cp", h2o_genmodel_jar, "-J-Xmx12g", "-J-XX:MaxPermSize=256m", java_file] subprocess.check_call(javac_cmd) if not compile_only: print("Setting up for Java POJO") in_csv = os.path.join(tmpdir, "in.csv") h2o.download_csv(test, in_csv) assert os.path.exists(in_csv), "Expected file {0} to exist, but it does not.".format(in_csv) print("Input CSV to mungedCSV saved in {0}".format(in_csv)) print("Predicting in H2O") munged = assembly.fit(test) munged.head() out_h2o_csv = os.path.join(tmpdir, "out_h2o.csv") h2o.download_csv(munged, out_h2o_csv) assert os.path.exists(out_h2o_csv), "Expected file {0} to exist, but it does not.".format(out_h2o_csv) print("Munged frame saved in {0}".format(out_h2o_csv)) print("Running PredictCsv Java Program") out_pojo_csv = os.path.join(tmpdir, "out_pojo.csv") cp_sep = ";" if sys.platform == "win32" else ":" java_cmd = ["java", "-ea", "-cp", h2o_genmodel_jar + cp_sep + tmpdir, "-Xmx12g", "-XX:MaxPermSize=2g", "-XX:ReservedCodeCacheSize=256m", "hex.genmodel.tools.MungeCsv", "--header", "--munger", pojoname, "--input", in_csv, "--output", out_pojo_csv] print("JAVA COMMAND: " + " ".join(java_cmd)) p = subprocess.Popen(java_cmd, stdout=PIPE, stderr=STDOUT) o, e = p.communicate() print("Java output: {0}".format(o)) assert os.path.exists(out_pojo_csv), "Expected file {0} to exist, but it does not.".format(out_pojo_csv) munged2 = h2o.upload_file(path=out_pojo_csv, col_types=test.types) print("Pojo predictions saved in {0}".format(out_pojo_csv)) print("Comparing predictions between H2O and Java POJO") # Dimensions hr, hc = munged.dim pr, pc = munged2.dim assert hr == pr, "Expected the same number of rows, but got {0} and {1}".format(hr, pr) assert hc == pc, "Expected the same number of cols, but got {0} and {1}".format(hc, pc) # Value import math import numbers munged.show() munged2.show() for r in range(hr): for c in range(hc): hp = munged[r,c] pp = munged2[r,c] if isinstance(hp, numbers.Number): assert isinstance(pp, numbers.Number) assert (math.fabs(hp-pp) < 1e-8) or (math.isnan(hp) and math.isnan(pp)), "Expected munged rows to be the same for row {0}, but got {1}, and {2}".format(r, hp, pp) else: assert hp==pp, "Expected munged rows to be the same for row {0}, but got {1}, and {2}".format(r, hp, pp) def locate(path): """ Search for a relative path and turn it into an absolute path. This is handy when hunting for data files to be passed into h2o and used by import file. Note: This function is for unit testing purposes only. Parameters ---------- path : str Path to search for :return: Absolute path if it is found. None otherwise. """ if (test_is_on_hadoop()): # Jenkins jobs create symbolic links to smalldata and bigdata on the machine that starts the test. However, # in an h2o multinode hadoop cluster scenario, the clustered machines don't know about the symbolic link. # Consequently, `locate` needs to return the actual path to the data on the clustered machines. ALL jenkins # machines store smalldata and bigdata in /home/0xdiag/. If ON.HADOOP is set by the run.py, the path arg MUST # be an immediate subdirectory of /home/0xdiag/. Moreover, the only guaranteed subdirectories of /home/0xdiag/ # are smalldata and bigdata. p = os.path.realpath(os.path.join("/home/0xdiag/", path)) if not os.path.exists(p): raise ValueError("File not found: " + path) return p else: tmp_dir = os.path.realpath(os.getcwd()) possible_result = os.path.join(tmp_dir, path) while (True): if (os.path.exists(possible_result)): return possible_result next_tmp_dir = os.path.dirname(tmp_dir) if (next_tmp_dir == tmp_dir): raise ValueError("File not found: " + path) tmp_dir = next_tmp_dir possible_result = os.path.join(tmp_dir, path) def hadoop_namenode_is_accessible(): url = "http://{0}:50070".format(hadoop_namenode()) try: urllib.urlopen(url) internal = True except: internal = False return internal def test_is_on_hadoop(): if hasattr(sys.modules["tests.pyunit_utils"], '__on_hadoop__'): return sys.modules["tests.pyunit_utils"].__on_hadoop__ return False def hadoop_namenode(): if os.getenv("NAME_NODE"): return os.getenv("NAME_NODE").split(".")[0] elif hasattr(sys.modules["tests.pyunit_utils"], '__hadoop_namenode__'): return sys.modules["tests.pyunit_utils"].__hadoop_namenode__ return None def pyunit_exec(test_name): with open(test_name, "r") as t: pyunit = t.read() pyunit_c = compile(pyunit, os.path.abspath(test_name), 'exec') exec(pyunit_c, {}) def standalone_test(test): h2o.init(strict_version_check=False) h2o.remove_all() h2o.log_and_echo("------------------------------------------------------------") h2o.log_and_echo("") h2o.log_and_echo("STARTING TEST") h2o.log_and_echo("") h2o.log_and_echo("------------------------------------------------------------") test() def make_random_grid_space(algo, ncols=None, nrows=None): """ Construct a dictionary of the form {gbm_parameter:list_of_values, ...}, which will eventually be passed to H2OGridSearch to build a grid object. The gbm parameters, and their associated values, are randomly selected. :param algo: a string {"gbm", "rf", "dl", "km", "glm"} representing the algo dimension of the grid space :param ncols: Used for mtries selection or k (pca) :param nrows: Used for k (pca) :return: a dictionary of parameter_name:list_of_values """ grid_space = {} if algo in ["gbm", "rf"]: if random.randint(0,1): grid_space['ntrees'] = random.sample(list(range(1,6)),random.randint(2,3)) if random.randint(0,1): grid_space['max_depth'] = random.sample(list(range(1,6)),random.randint(2,3)) if random.randint(0,1): grid_space['min_rows'] = random.sample(list(range(1,11)),random.randint(2,3)) if random.randint(0,1): grid_space['nbins'] = random.sample(list(range(2,21)),random.randint(2,3)) if random.randint(0,1): grid_space['nbins_cats'] = random.sample(list(range(2,1025)),random.randint(2,3)) if algo == "gbm": if random.randint(0,1): grid_space['learn_rate'] = [random.random() for _ in range(random.randint(2,3))] grid_space['distribution'] = random.sample(['bernoulli', 'multinomial', 'gaussian', 'poisson', 'tweedie', 'gamma'], 1) if algo == "rf": if random.randint(0,1): grid_space['mtries'] = random.sample(list(range(1,ncols+1)),random.randint(2,3)) if random.randint(0,1): grid_space['sample_rate'] = [random.random() for r in range(random.randint(2,3))] elif algo == "km": grid_space['k'] = random.sample(list(range(1,10)),random.randint(2,3)) if random.randint(0,1): grid_space['max_iterations'] = random.sample(list(range(1,1000)),random.randint(2,3)) if random.randint(0,1): grid_space['standardize'] = [True, False] if random.randint(0,1): grid_space['seed'] = random.sample(list(range(1,1000)),random.randint(2,3)) if random.randint(0,1): grid_space['init'] = random.sample(['Random','PlusPlus','Furthest'],random.randint(2,3)) elif algo == "glm": if random.randint(0,1): grid_space['alpha'] = [random.random() for r in range(random.randint(2,3))] grid_space['family'] = random.sample(['binomial','gaussian','poisson','tweedie','gamma'], 1) if grid_space['family'] == "tweedie": if random.randint(0,1): grid_space['tweedie_variance_power'] = [round(random.random()+1,6) for r in range(random.randint(2,3))] grid_space['tweedie_link_power'] = 1 - grid_space['tweedie_variance_power'] elif algo == "dl": if random.randint(0,1): grid_space['activation'] = \ random.sample(["Rectifier", "Tanh", "TanhWithDropout", "RectifierWithDropout", "MaxoutWithDropout"], random.randint(2,3)) if random.randint(0,1): grid_space['l2'] = [0.001*random.random() for r in range(random.randint(2,3))] grid_space['distribution'] = random.sample(['bernoulli','multinomial','gaussian','poisson','tweedie','gamma'],1) return grid_space elif algo == "naiveBayes": grid_space['laplace'] = 0 if random.randint(0,1): grid_space['laplace'] = [round(random.random() + r, 6) for r in random.sample(list(range(0,11)), random.randint(2,3))] if random.randint(0,1): grid_space['min_sdev'] = [round(random.random(),6) for r in range(random.randint(2,3))] if random.randint(0,1): grid_space['eps_sdev'] = [round(random.random(),6) for r in range(random.randint(2,3))] elif algo == "pca": if random.randint(0,1): grid_space['max_iterations'] = random.sample(list(range(1,1000)),random.randint(2,3)) if random.randint(0,1): grid_space['transform'] = random.sample(["NONE","STANDARDIZE","NORMALIZE","DEMEAN","DESCALE"], random.randint(2,3)) grid_space['k'] = random.sample(list(range(1,min(ncols,nrows))),random.randint(2,3)) else: raise ValueError return grid_space # Validate given models' parameters against expected values def expect_model_param(models, attribute_name, expected_values): print("param: {0}".format(attribute_name)) actual_values = list(set([m.params[attribute_name]['actual'] \ if type(m.params[attribute_name]['actual']) != list else m.params[attribute_name]['actual'][0] for m in models.models])) # possible for actual to be a list (GLM) if type(expected_values) != list: expected_values = [expected_values] # limit precision. Rounding happens in some models like RF actual_values = [x if isinstance(x,basestring) else round(float(x),5) for x in actual_values] expected_values = [x if isinstance(x,basestring) else round(float(x),5) for x in expected_values] print("actual values: {0}".format(actual_values)) print("expected values: {0}".format(expected_values)) actual_values_len = len(actual_values) expected_values_len = len(expected_values) assert actual_values_len == expected_values_len, "Expected values len: {0}. Actual values len: " \ "{1}".format(expected_values_len, actual_values_len) actual_values = sorted(actual_values) expected_values = sorted(expected_values) for i in range(len(actual_values)): if isinstance(actual_values[i], float): assert abs(actual_values[i]-expected_values[i]) < 1.1e-5, "Too large of a difference betewen actual and " \ "expected value. Actual value: {}. Expected value: {}"\ .format(actual_values[i], expected_values[i]) else: assert actual_values[i] == expected_values[i], "Expected: {}. Actual: {}"\ .format(expected_values[i], actual_values[i]) def rest_ctr(): return h2o.connection().requests_count def write_syn_floating_point_dataset_glm(csv_training_data_filename, csv_validation_data_filename, csv_test_data_filename, csv_weight_name, row_count, col_count, data_type, max_p_value, min_p_value, max_w_value, min_w_value, noise_std, family_type, valid_row_count, test_row_count, class_number=2, class_method=('probability', 'probability', 'probability'), class_margin=[0.0, 0.0, 0.0]): """ Generate random data sets to test the GLM algo using the following steps: 1. randomly generate the intercept and weight vector; 2. generate a set of predictors X; 3. generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the relationship between the response Y (K possible classes) and predictor vector X is assumed to be Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)) :param csv_training_data_filename: string representing full path filename to store training data set. Set to null string if no training data set is to be generated. :param csv_validation_data_filename: string representing full path filename to store validation data set. Set to null string if no validation data set is to be generated. :param csv_test_data_filename: string representing full path filename to store test data set. Set to null string if no test data set is to be generated. :param csv_weight_name: string representing full path filename to store intercept and weight used to generate all data sets. :param row_count: integer representing number of samples (predictor, response) in training data set :param col_count: integer representing the number of predictors in the data set :param data_type: integer representing the type of predictors or weights (1: integers, 2: real) :param max_p_value: integer representing maximum predictor values :param min_p_value: integer representing minimum predictor values :param max_w_value: integer representing maximum intercept/weight values :param min_w_value: integer representing minimum intercept/weight values :param noise_std: Gaussian noise standard deviation used to generate noise e to add to response :param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported by our GLM algo :param valid_row_count: integer representing number of samples (predictor, response) in validation data set :param test_row_count: integer representing number of samples (predictor, response) in test data set :param class_number: integer, optional, representing number of classes for binomial and multinomial :param class_method: string tuple, optional, describing how we derive the final response from the class probabilities generated for binomial and multinomial family_type for training/validation/test data set respectively. If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability exceeds the second highest class probability by the value set in margin. If the maximum class probability fails to be greater by the margin than the second highest class probability, the data sample is discarded. :param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to exceed the second highest class probability in order for us to keep the data sample for training/validation/test data set respectively. This field is only meaningful if class_method is set to 'threshold' :return: None """ # generate bias b and weight as a column vector weights = generate_weights_glm(csv_weight_name, col_count, data_type, min_w_value, max_w_value, family_type=family_type, class_number=class_number) # generate training data set if len(csv_training_data_filename) > 0: generate_training_set_glm(csv_training_data_filename, row_count, col_count, min_p_value, max_p_value, data_type, family_type, noise_std, weights, class_method=class_method[0], class_margin=class_margin[0], weightChange=True) # generate validation data set if len(csv_validation_data_filename) > 0: generate_training_set_glm(csv_validation_data_filename, valid_row_count, col_count, min_p_value, max_p_value, data_type, family_type, noise_std, weights, class_method=class_method[1], class_margin=class_margin[1]) # generate test data set if len(csv_test_data_filename) > 0: generate_training_set_glm(csv_test_data_filename, test_row_count, col_count, min_p_value, max_p_value, data_type, family_type, noise_std, weights, class_method=class_method[2], class_margin=class_margin[2]) def write_syn_mixed_dataset_glm(csv_training_data_filename, csv_training_data_filename_true_one_hot, csv_validation_data_filename, csv_validation_filename_true_one_hot, csv_test_data_filename, csv_test_filename_true_one_hot, csv_weight_filename, row_count, col_count, max_p_value, min_p_value, max_w_value, min_w_value, noise_std, family_type, valid_row_count, test_row_count, enum_col, enum_level_vec, class_number=2, class_method=['probability', 'probability', 'probability'], class_margin=[0.0, 0.0, 0.0]): """ This function differs from write_syn_floating_point_dataset_glm in one small point. The predictors in this case contains categorical data as well as real data. Generate random data sets to test the GLM algo using the following steps: 1. randomly generate the intercept and weight vector; 2. generate a set of predictors X; 3. generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the relationship between the response Y (K possible classes) and predictor vector X is assumed to be Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)) :param csv_training_data_filename: string representing full path filename to store training data set. Set to null string if no training data set is to be generated. :param csv_training_data_filename_true_one_hot: string representing full path filename to store training data set with true one-hot encoding. Set to null string if no training data set is to be generated. :param csv_validation_data_filename: string representing full path filename to store validation data set. Set to null string if no validation data set is to be generated. :param csv_validation_filename_true_one_hot: string representing full path filename to store validation data set with true one-hot. Set to null string if no validation data set is to be generated. :param csv_test_data_filename: string representing full path filename to store test data set. Set to null string if no test data set is to be generated. :param csv_test_filename_true_one_hot: string representing full path filename to store test data set with true one-hot encoding. Set to null string if no test data set is to be generated. :param csv_weight_filename: string representing full path filename to store intercept and weight used to generate all data sets. :param row_count: integer representing number of samples (predictor, response) in training data set :param col_count: integer representing the number of predictors in the data set :param max_p_value: integer representing maximum predictor values :param min_p_value: integer representing minimum predictor values :param max_w_value: integer representing maximum intercept/weight values :param min_w_value: integer representing minimum intercept/weight values :param noise_std: Gaussian noise standard deviation used to generate noise e to add to response :param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported by our GLM algo :param valid_row_count: integer representing number of samples (predictor, response) in validation data set :param test_row_count: integer representing number of samples (predictor, response) in test data set :param enum_col: integer representing actual number of categorical columns in data set :param enum_level_vec: vector containing maximum integer value for each categorical column :param class_number: integer, optional, representing number classes for binomial and multinomial :param class_method: string tuple, optional, describing how we derive the final response from the class probabilities generated for binomial and multinomial family_type for training/validation/test data set respectively. If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability exceeds the second highest class probability by the value set in margin. If the maximum class probability fails to be greater by margin than the second highest class probability, the data sample is discarded. :param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to exceed the second highest class probability by in order for us to keep the data sample for training/validation/test data set respectively. This field is only meaningful if class_method is set to 'threshold' :return: None """ # add column count of encoded categorical predictors, if maximum value for enum is 3, it has 4 levels. # hence 4 bits are used to encode it with true one hot encoding. That is why we are adding 1 bit per # categorical columns added to our predictors new_col_count = col_count - enum_col + sum(enum_level_vec) + enum_level_vec.shape[0] # generate the weights to be applied to the training/validation/test data sets # this is for true one hot encoding. For reference+one hot encoding, will skip # few extra weights weights = generate_weights_glm(csv_weight_filename, new_col_count, 2, min_w_value, max_w_value, family_type=family_type, class_number=class_number) # generate training data set if len(csv_training_data_filename) > 0: generate_training_set_mixed_glm(csv_training_data_filename, csv_training_data_filename_true_one_hot, row_count, col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col, enum_level_vec, class_number=class_number, class_method=class_method[0], class_margin=class_margin[0], weightChange=True) # generate validation data set if len(csv_validation_data_filename) > 0: generate_training_set_mixed_glm(csv_validation_data_filename, csv_validation_filename_true_one_hot, valid_row_count, col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col, enum_level_vec, class_number=class_number, class_method=class_method[1], class_margin=class_margin[1]) # generate test data set if len(csv_test_data_filename) > 0: generate_training_set_mixed_glm(csv_test_data_filename, csv_test_filename_true_one_hot, test_row_count, col_count, min_p_value, max_p_value, family_type, noise_std, weights, enum_col, enum_level_vec, class_number=class_number, class_method=class_method[2], class_margin=class_margin[2]) def generate_weights_glm(csv_weight_filename, col_count, data_type, min_w_value, max_w_value, family_type='gaussian', class_number=2): """ Generate random intercept and weight vectors (integer or real) for GLM algo and save the values in a file specified by csv_weight_filename. :param csv_weight_filename: string representing full path filename to store intercept and weight used to generate all data set :param col_count: integer representing the number of predictors in the data set :param data_type: integer representing the type of predictors or weights (1: integers, 2: real) :param max_w_value: integer representing maximum intercept/weight values :param min_w_value: integer representing minimum intercept/weight values :param family_type: string ,optional, represents the various distribution families (gaussian, multinomial, binomial) supported by our GLM algo :param class_number: integer, optional, representing number classes for binomial and multinomial :return: column vector of size 1+colCount representing intercept and weight or matrix of size 1+colCount by class_number """ # first generate random intercept and weight if 'gaussian' in family_type.lower(): if data_type == 1: # generate random integer intercept/weight weight = np.random.random_integers(min_w_value, max_w_value, [col_count+1, 1]) elif data_type == 2: # generate real intercept/weights weight = np.random.uniform(min_w_value, max_w_value, [col_count+1, 1]) else: assert False, "dataType must be 1 or 2 for now." elif ('binomial' in family_type.lower()) or ('multinomial' in family_type.lower() or ('ordinal' in family_type.lower())): if 'binomial' in family_type.lower(): # for binomial, only need 1 set of weight class_number -= 1 if class_number <= 0: assert False, "class_number must be >= 2!" if isinstance(col_count, np.ndarray): temp_col_count = col_count[0] else: temp_col_count = col_count if data_type == 1: # generate random integer intercept/weight weight = np.random.random_integers(min_w_value, max_w_value, [temp_col_count+1, class_number]) elif data_type == 2: # generate real intercept/weights weight = np.random.uniform(min_w_value, max_w_value, [temp_col_count+1, class_number]) else: assert False, "dataType must be 1 or 2 for now." # special treatment for ordinal weights if 'ordinal' in family_type.lower(): num_pred = len(weight) for index in range(class_number): weight[0,index] = 0 for indP in range(1,num_pred): weight[indP,index] = weight[indP,0] # make sure betas for all classes are the same np.savetxt(csv_weight_filename, weight.transpose(), delimiter=",") return weight def generate_training_set_glm(csv_filename, row_count, col_count, min_p_value, max_p_value, data_type, family_type, noise_std, weight, class_method='probability', class_margin=0.0, weightChange=False): """ Generate supervised data set given weights for the GLM algo. First randomly generate the predictors, then call function generate_response_glm to generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the relationship between the response Y (K possible classes) and predictor vector X is assumed to be Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)). The predictors and responses are saved in a file specified by csv_filename. :param csv_filename: string representing full path filename to store supervised data set :param row_count: integer representing the number of training samples in the data set :param col_count: integer representing the number of predictors in the data set :param max_p_value: integer representing maximum predictor values :param min_p_value: integer representing minimum predictor values :param data_type: integer representing the type of predictors or weights (1: integers, 2: real) :param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported by our GLM algo :param noise_std: Gaussian noise standard deviation used to generate noise e to add to response :param weight: vector representing w in our formula to generate the response. :param class_method: string tuple, optional, describing how we derive the final response from the class probabilities generated for binomial and multinomial family-type for training/validation/test data set respectively. If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability exceeds the second highest class probability by the value set in the margin. If the maximum class probability fails to be greater by the margin than the second highest class probability, the data sample is discarded. :param class_margin: float tuple, optional, denotes the threshold by how much the maximum class probability has to exceed the second highest class probability in order for us to keep the data sample for training/validation/test data set respectively. This field is only meaningful if class_method is set to 'threshold' :return: None """ if data_type == 1: # generate random integers x_mat = np.random.random_integers(min_p_value, max_p_value, [row_count, col_count]) elif data_type == 2: # generate random real numbers x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count]) else: assert False, "dataType must be 1 or 2 for now. " # generate the response vector to the input predictors response_y = generate_response_glm(weight, x_mat, noise_std, family_type, class_method=class_method, class_margin=class_margin, weightChange=weightChange) # for family_type = 'multinomial' or 'binomial', response_y can be -ve to indicate bad sample data. # need to delete this data sample before proceeding if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()) or ('ordinal' in family_type.lower()): if 'threshold' in class_method.lower(): if np.any(response_y < 0): # remove negative entries out of data set (x_mat, response_y) = remove_negative_response(x_mat, response_y) # write to file in csv format np.savetxt(csv_filename, np.concatenate((x_mat, response_y), axis=1), delimiter=",") def generate_clusters(cluster_center_list, cluster_pt_number_list, cluster_radius_list): """ This function is used to generate clusters of points around cluster_centers listed in cluster_center_list. The radius of the cluster of points are specified by cluster_pt_number_list. The size of each cluster could be different and it is specified in cluster_radius_list. :param cluster_center_list: list of coordinates of cluster centers :param cluster_pt_number_list: number of points to generate for each cluster center :param cluster_radius_list: list of size of each cluster :return: list of sample points that belong to various clusters """ k = len(cluster_pt_number_list) # number of clusters to generate clusters for if (not(k == len(cluster_center_list))) or (not(k == len(cluster_radius_list))): assert False, "Length of list cluster_center_list, cluster_pt_number_list, cluster_radius_list must be the same!" training_sets = [] for k_ind in range(k): new_cluster_data = generate_one_cluster(cluster_center_list[k_ind], cluster_pt_number_list[k_ind], cluster_radius_list[k_ind]) if k_ind > 0: training_sets = np.concatenate((training_sets, new_cluster_data), axis=0) else: training_sets = new_cluster_data # want to shuffle the data samples so that the clusters are all mixed up map(np.random.shuffle, training_sets) return training_sets def generate_one_cluster(cluster_center, cluster_number, cluster_size): """ This function will generate a full cluster wither cluster_number points centered on cluster_center with maximum radius cluster_size :param cluster_center: python list denoting coordinates of cluster center :param cluster_number: integer denoting number of points to generate for this cluster :param cluster_size: float denoting radius of cluster :return: np matrix denoting a cluster """ pt_dists = np.random.uniform(0, cluster_size, [cluster_number, 1]) coord_pts = len(cluster_center) # dimension of each cluster point one_cluster_data = np.zeros((cluster_number, coord_pts), dtype=np.float) for p_ind in range(cluster_number): coord_indices = list(range(coord_pts)) random.shuffle(coord_indices) # randomly determine which coordinate to generate left_radius = pt_dists[p_ind] for c_ind in range(coord_pts): coord_index = coord_indices[c_ind] one_cluster_data[p_ind, coord_index] = random.uniform(-1*left_radius+cluster_center[coord_index], left_radius+cluster_center[coord_index]) left_radius = math.sqrt(pow(left_radius, 2)-pow((one_cluster_data[p_ind, coord_index]- cluster_center[coord_index]), 2)) return one_cluster_data def remove_negative_response(x_mat, response_y): """ Recall that when the user chooses to generate a data set for multinomial or binomial using the 'threshold' method, response y is set to the class with the maximum class probability if the maximum class probability exceeds the second highest class probability by the value set in margin. If the maximum class probability fails to be greater by margin than the second highest class probability, the data sample is discarded. However, when we generate the data set, we keep all samples. For data sample with maximum class probability that fails to be greater by margin than the second highest class probability, the response is set to be -1. This function will remove all data samples (predictors and responses) with response set to -1. :param x_mat: predictor matrix containing all predictor values :param response_y: response that can be negative if that data sample is to be removed :return: tuple containing x_mat, response_y with negative data samples removed. """ y_response_negative = np.where(response_y < 0) # matrix of True or False x_mat = np.delete(x_mat,y_response_negative[0].transpose(),axis=0) # remove predictor row with negative response # remove rows with negative response response_y = response_y[response_y >= 0] return x_mat,response_y.transpose() def generate_training_set_mixed_glm(csv_filename, csv_filename_true_one_hot, row_count, col_count, min_p_value, max_p_value, family_type, noise_std, weight, enum_col, enum_level_vec, class_number=2, class_method='probability', class_margin=0.0, weightChange=False): """ Generate supervised data set given weights for the GLM algo with mixed categorical and real value predictors. First randomly generate the predictors, then call function generate_response_glm to generate the corresponding response y using the formula: y = w^T x+b+e where T is transpose, e is a random Gaussian noise added. For the Binomial family, the relationship between the response Y and predictor vector X is assumed to be Prob(Y = 1|X) = exp(W^T * X + e)/(1+exp(W^T * X + e)). For the Multinomial family, the relationship between the response Y (K possible classes) and predictor vector X is assumed to be Prob(Y = c|X) = exp(Wc^T * X + e)/(sum k=0 to K-1 (ep(Wk^T *X+e)) e is the random Gaussian noise added to the response. The predictors and responses are saved in a file specified by csv_filename. :param csv_filename: string representing full path filename to store supervised data set :param csv_filename_true_one_hot: string representing full path filename to store data set with true one-hot encoding. :param row_count: integer representing the number of training samples in the data set :param col_count: integer representing the number of predictors in the data set :param max_p_value: integer representing maximum predictor values :param min_p_value: integer representing minimum predictor values :param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported by our GLM algo :param noise_std: Gaussian noise standard deviation used to generate noise e to add to response :param weight: vector representing w in our formula to generate the response. :param enum_col: integer representing actual number of categorical columns in data set :param enum_level_vec: vector containing maximum integer value for each categorical column :param class_number: integer, optional, representing number classes for binomial and multinomial :param class_method: string, optional, describing how we derive the final response from the class probabilities generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability exceeds the second highest class probability by the value set in margin. If the maximum class probability fails to be greater by margin than the second highest class probability, the data sample is discarded. :param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed the second highest class probability by in order for us to keep the data set sample. This field is only meaningful if class_method is set to 'threshold' :return: None """ # generate the random training data sets enum_dataset = np.zeros((row_count, enum_col), dtype=np.int) # generate the categorical predictors # generate categorical data columns for indc in range(enum_col): enum_dataset[:, indc] = np.random.random_integers(0, enum_level_vec[indc], row_count) # generate real data columns x_mat = np.random.uniform(min_p_value, max_p_value, [row_count, col_count-enum_col]) x_mat = np.concatenate((enum_dataset, x_mat), axis=1) # concatenate categorical and real predictor columns if len(csv_filename_true_one_hot) > 0: generate_and_save_mixed_glm(csv_filename_true_one_hot, x_mat, enum_level_vec, enum_col, True, weight, noise_std, family_type, class_method=class_method, class_margin=class_margin, weightChange=weightChange) if len(csv_filename) > 0: generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, False, weight, noise_std, family_type, class_method=class_method, class_margin=class_margin, weightChange=False) def generate_and_save_mixed_glm(csv_filename, x_mat, enum_level_vec, enum_col, true_one_hot, weight, noise_std, family_type, class_method='probability', class_margin=0.0, weightChange=False): """ Given the weights and input data matrix with mixed categorical and real value predictors, this function will generate a supervised data set and save the input data and response in a csv format file specified by csv_filename. It will first encode the enums without using one hot encoding with or without a reference level first before generating a response Y. :param csv_filename: string representing full path filename to store supervised data set with reference level plus true one-hot encoding. :param x_mat: predictor matrix with mixed columns (categorical/real values) :param enum_level_vec: vector containing maximum integer value for each categorical column :param enum_col: integer representing actual number of categorical columns in data set :param true_one_hot: bool indicating whether we are using true one hot encoding or reference level plus one hot encoding :param weight: vector representing w in our formula to generate the response :param noise_std: Gaussian noise standard deviation used to generate noise e to add to response :param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported by our GLM algo :param class_method: string, optional, describing how we derive the final response from the class probabilities generated for binomial and multinomial family_type. If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability exceeds the second highest class probability by the value set in the margin. If the maximum class probability fails to be greater by margin than the second highest class probability, the data sample is discarded. :param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed the second highest class probability in order for us to keep the data sample. This field is only meaningful if class_method is set to 'threshold' :return: None """ # encode the enums x_mat_encoded = encode_enum_dataset(x_mat, enum_level_vec, enum_col, true_one_hot, False) # extract the correct weight dimension for the data set if not true_one_hot: (num_row, num_col) = x_mat_encoded.shape weight = weight[0:num_col+1] # +1 to take care of the intercept term # generate the corresponding response vector given the weight and encoded input predictors response_y = generate_response_glm(weight, x_mat_encoded, noise_std, family_type, class_method=class_method, class_margin=class_margin, weightChange=weightChange) # for familyType = 'multinomial' or 'binomial', response_y can be -ve to indicate bad sample data. # need to delete this before proceeding if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()): if 'threshold' in class_method.lower(): (x_mat,response_y) = remove_negative_response(x_mat, response_y) # write generated data set to file in csv format np.savetxt(csv_filename, np.concatenate((x_mat, response_y), axis=1), delimiter=",") def encode_enum_dataset(dataset, enum_level_vec, enum_col, true_one_hot, include_nans): """ Given 2-d numpy array of predictors with categorical and real columns, this function will encode the enum columns with 1-hot encoding or with reference plus one hot encoding :param dataset: 2-d numpy array of predictors with both categorical and real columns :param enum_level_vec: vector containing maximum level for each categorical column :param enum_col: number of categorical columns in the data set :param true_one_hot: bool indicating if we are using true one hot encoding or with one reference level + one hot encoding :param include_nans: bool indicating if we have nans in categorical columns :return: data set with categorical columns encoded with 1-hot encoding or 1-hot encoding plus reference """ (num_row, num_col) = dataset.shape # split the data set into categorical and real parts enum_arrays = dataset[:, 0:enum_col] new_enum_arrays = [] # perform the encoding for each element of categorical part for indc in range(enum_col): enum_col_num = enum_level_vec[indc]+1 if not true_one_hot: enum_col_num -= 1 if include_nans and np.any(enum_arrays[:, indc]): enum_col_num += 1 new_temp_enum = np.zeros((num_row, enum_col_num[0])) one_hot_matrix = one_hot_encoding(enum_col_num) last_col_index = enum_col_num-1 # encode each enum using 1-hot encoding or plus reference value for indr in range(num_row): enum_val = enum_arrays[indr, indc] if true_one_hot: # not using true one hot new_temp_enum[indr, :] = replace_with_encoded_bits(one_hot_matrix, enum_val, 0, last_col_index) else: if enum_val: new_temp_enum[indr, :] = replace_with_encoded_bits(one_hot_matrix, enum_val, 1, last_col_index) if indc == 0: new_enum_arrays = new_temp_enum else: new_enum_arrays = np.concatenate((new_enum_arrays, new_temp_enum), axis=1) return np.concatenate((new_enum_arrays, dataset[:, enum_col:num_col]), axis=1) def replace_with_encoded_bits(one_hot_matrix, enum_val, add_value, last_col_index): """ Generate encoded bits for a categorical data value using one hot encoding. :param one_hot_matrix: matrix representing the encoding of categorical data value to 1-hot encoding :param enum_val: categorical data value, could be np.nan :param add_value: set to 1 if a reference value is needed in addition to 1-hot encoding :param last_col_index: index into encoding for np.nan if exists :return: vector representing the encoded values for a enum value """ if np.isnan(enum_val): # if data value is np.nan return one_hot_matrix[last_col_index] else: return one_hot_matrix[int(enum_val-add_value)] def one_hot_encoding(enum_level): """ Generate the one_hot_encoding matrix given the number of enum_level. :param enum_level: generate the actual one-hot encoding matrix :return: numpy array for the enum_level specified. Note, enum_level <= 6 """ if enum_level >= 2: base_array = np.array([[0, 1], [1, 0]]) # for 2 enum levels for enum_index in range(3, enum_level+1): # loop to build encoding for enum levels > 2 (num_row, num_col) = base_array.shape col_zeros = np.asmatrix(np.zeros(num_row)).transpose() # column of zero matrix base_array = np.concatenate((col_zeros, base_array), axis=1) # add column of zero row_zeros = np.asmatrix(np.zeros(num_row+1)) # add row of zeros row_zeros[0, 0] = 1 # set first element to 1 base_array = np.concatenate((base_array, row_zeros), axis=0) return base_array else: assert False, "enum_level must be >= 2." def generate_response_glm(weight, x_mat, noise_std, family_type, class_method='probability', class_margin=0.0, weightChange=False, even_distribution=True): """ Generate response vector given weight matrix, predictors matrix for the GLM algo. :param weight: vector representing w in our formula to generate the response :param x_mat: random numpy matrix (2-D ndarray) containing the predictors :param noise_std: Gaussian noise standard deviation used to generate noise e to add to response :param family_type: string represents the various distribution families (Gaussian, multinomial, binomial) supported by our GLM algo :param class_method: string, optional, describing how we derive the final response from the class probabilities generated for binomial and multinomial familyType. If set to 'probability', response y is generated randomly according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability exceeds the second highest class probability by the value set in the margin. If the maximum class probability fails to be greater by margin than the second highest class probability, the data sample is discarded. :param class_margin: float, optional, denotes the threshold by how much the maximum class probability has to exceed the second highest class probability in order for us to keep the data set sample. This field is only meaningful if class_method is set to 'threshold' :return: vector representing the response """ (num_row, num_col) = x_mat.shape temp_ones_col = np.asmatrix(np.ones(num_row)).transpose() x_mat = np.concatenate((temp_ones_col, x_mat), axis=1) response_y = x_mat * weight + noise_std * np.random.standard_normal([num_row, 1]) if 'ordinal' in family_type.lower(): (num_sample, num_class) = response_y.shape lastClass = num_class - 1 if weightChange: tresp = [] # generate the new y threshold for indP in range(num_sample): tresp.append(-response_y[indP,0]) tresp.sort() num_per_class = int(len(tresp)/num_class) if (even_distribution): for indC in range(lastClass): weight[0,indC] = tresp[(indC+1)*num_per_class] else: # do not generate evenly distributed class, generate randomly distributed classes splitInd = [] lowV = 0.1 highV = 1 v1 = 0 acc = 0 for indC in range(lastClass): tempf = random.uniform(lowV, highV) splitInd.append(v1+int(tempf*num_per_class)) v1 = splitInd[indC] # from last class acc += 1-tempf highV = 1+acc for indC in range(lastClass): # put in threshold weight[0,indC] = tresp[splitInd[indC]] response_y = x_mat * weight + noise_std * np.random.standard_normal([num_row, 1]) discrete_y = np.zeros((num_sample, 1), dtype=np.int) for indR in range(num_sample): discrete_y[indR, 0] = lastClass for indC in range(lastClass): if (response_y[indR, indC] >= 0): discrete_y[indR, 0] = indC break return discrete_y # added more to form Multinomial response if ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()): temp_mat = np.exp(response_y) # matrix of n by K where K = 1 for binomials if 'binomial' in family_type.lower(): ntemp_mat = temp_mat + 1 btemp_mat = temp_mat / ntemp_mat temp_mat = np.concatenate((1-btemp_mat, btemp_mat), axis=1) # inflate temp_mat to 2 classes response_y = derive_discrete_response(temp_mat, class_method, class_margin, family_type) return response_y def derive_discrete_response(prob_mat, class_method, class_margin, family_type='binomial'): """ This function is written to generate the final class response given the probabilities (Prob(y=k)). There are two methods that we use and is specified by the class_method. If class_method is set to 'probability', response y is generated randomly according to the class probabilities calculated. If set to 'threshold', response y is set to the class with the maximum class probability if the maximum class probability exceeds the second highest class probability by the value set in margin. If the maximum class probability fails to be greater by margin than the second highest class probability, the data sample will be discarded later by marking the final response as -1. :param prob_mat: probability matrix specifying the probability that y=k where k is a class :param class_method: string set to 'probability' or 'threshold' :param class_margin: if class_method='threshold', class_margin is the margin used to determine if a response is to be kept or discarded. :return: response vector representing class of y or -1 if an data sample is to be discarded. """ (num_sample, num_class) = prob_mat.shape lastCat = num_class-1 if 'probability' in class_method.lower(): prob_mat = normalize_matrix(prob_mat) discrete_y = np.zeros((num_sample, 1), dtype=np.int) if 'probability' in class_method.lower(): if 'ordinal' not in family_type.lower(): prob_mat = np.cumsum(prob_mat, axis=1) else: # for ordinal for indR in list(range(num_sample)): for indC in list(range(num_class)): prob_mat[indR, indC] = prob_mat[indR,indC]/prob_mat[indR,lastCat] random_v = np.random.uniform(0, 1, [num_sample, 1]) # choose the class that final response y belongs to according to the # probability prob(y=k) class_bool = random_v < prob_mat for indR in range(num_sample): for indC in range(num_class): if class_bool[indR, indC]: discrete_y[indR, 0] = indC break elif 'threshold' in class_method.lower(): discrete_y = np.argmax(prob_mat, axis=1) temp_mat = np.diff(np.sort(prob_mat, axis=1), axis=1) # check if max value exceeds second one by at least margin mat_diff = temp_mat[:, num_class-2] mat_bool = mat_diff < class_margin discrete_y[mat_bool] = -1 else: assert False, 'class_method should be set to "probability" or "threshold" only!' return discrete_y def normalize_matrix(mat): """ This function will normalize a matrix across each row such that the row sum is 1. :param mat: matrix containing prob(y=k) :return: normalized matrix containing prob(y=k) """ (n, K) = mat.shape kronmat = np.ones((1, K), dtype=float) row_sum = np.sum(mat, axis=1) row_sum_mat = np.kron(row_sum, kronmat) return mat/row_sum_mat def move_files(dir_path, old_name, new_file, action='move'): """ Simple function to move or copy a data set (old_name) to a special directory (dir_path) with new name (new_file) so that we will be able to re-run the tests if we have found something wrong with the algorithm under test with the data set. This is done to avoid losing the data set. :param dir_path: string representing full directory path where a file is to be moved to :param old_name: string representing file (filename with full directory path) to be moved to new directory. :param new_file: string representing the file name of the moved in the new directory :param action: string, optional, represent the action 'move' or 'copy' file :return: None """ new_name = os.path.join(dir_path, new_file) # generate new filename including directory path if os.path.isfile(old_name): # only move/copy file old_name if it actually exists if 'move' in action: motion = 'mv ' elif 'copy' in action: motion = 'cp ' else: assert False, "Illegal action setting. It can only be 'move' or 'copy'!" cmd = motion+old_name+' '+new_name # generate cmd line string to move the file subprocess.call(cmd, shell=True) def remove_files(filename): """ Simple function to remove data set saved in filename if the dynamic test is completed with no error. Some data sets we use can be rather big. This is performed to save space. :param filename: string representing the file to be removed. Full path is included. :return: None """ cmd = 'rm ' + filename subprocess.call(cmd, shell=True) def random_col_duplication(num_cols, duplication_threshold, max_number, to_scale, max_scale_factor): """ This function will randomly determine for each column if it should be duplicated. If it is to be duplicated, how many times, the duplication should be. In addition, a scaling factor will be randomly applied to each duplicated column if enabled. :param num_cols: integer representing number of predictors used :param duplication_threshold: threshold to determine if a column is to be duplicated. Set this number to be low if you want to encourage column duplication and vice versa :param max_number: maximum number of times a column is to be duplicated :param to_scale: bool indicating if a duplicated column is to be scaled :param max_scale_factor: real representing maximum scale value for repeated columns :return: a tuple containing two vectors: col_return, col_scale_return. col_return: vector indicating the column indices of the original data matrix that will be included in the new data matrix with duplicated columns col_scale_return: vector indicating for each new column in the new data matrix with duplicated columns, what scale should be applied to that column. """ col_indices = list(range(num_cols)) # contains column indices of predictors in original data set col_scales = [1]*num_cols # scaling factor for original data set, all ones. for ind in range(num_cols): # determine for each column if to duplicate it temp = random.uniform(0, 1) # generate random number from 0 to 1 if temp > duplication_threshold: # duplicate column if random number generated exceeds duplication_threshold rep_num = random.randint(1, max_number) # randomly determine how many times to repeat a column more_col_indices = [ind]*rep_num col_indices.extend(more_col_indices) temp_scale = [] for ind in range(rep_num): if to_scale: # for each duplicated column, determine a scaling factor to multiply the column with temp_scale.append(random.uniform(0, max_scale_factor)) else: temp_scale.append(1) col_scales.extend(temp_scale) # randomly shuffle the predictor column orders and the corresponding scaling factors new_col_indices = list(range(len(col_indices))) random.shuffle(new_col_indices) col_return = [col_indices[i] for i in new_col_indices] col_scale_return = [col_scales[i] for i in new_col_indices] return col_return, col_scale_return def duplicate_scale_cols(col_indices, col_scale, old_filename, new_filename): """ This function actually performs the column duplication with scaling giving the column indices and scaling factors for each column. It will first load the original data set from old_filename. After performing column duplication and scaling, the new data set will be written to file with new_filename. :param col_indices: vector indicating the column indices of the original data matrix that will be included in the new data matrix with duplicated columns :param col_scale: vector indicating for each new column in the new data matrix with duplicated columns, what scale should be applied to that column :param old_filename: string representing full directory path and filename where data set is stored :param new_filename: string representing full directory path and filename where new data set is to be stored :return: None """ # pd_frame = pd.read_csv(old_filename, header=None) # read in original data set # # pd_frame_new = pd.DataFrame() # new empty data frame # # for ind in range(len(col_indices)): # for each column # tempc = pd_frame.ix[:, col_indices[ind]]*col_scale[ind] # extract a column from old data frame and scale it # pd_frame_new = pd.concat([pd_frame_new, tempc], axis=1) # add it to the new data frame np_frame = np.asmatrix(np.genfromtxt(old_filename, delimiter=',', dtype=None)) (num_row, num_col) = np_frame.shape np_frame_new = np.asmatrix(np.zeros((num_row, len(col_indices)), dtype=np.float)) for ind in range(len(col_indices)): np_frame_new[:, ind] = np_frame[:, col_indices[ind]]*col_scale[ind] # done changing the data frame. Save it in a new file np.savetxt(new_filename, np_frame_new, delimiter=",") def insert_nan_in_data(old_filename, new_filename, missing_fraction): """ Give the filename of a data set stored in old_filename, this function will randomly determine for each predictor to replace its value with nan or not with probability missing_frac. The new data set will be stored in filename new_filename. :param old_filename: string representing full directory path and filename where data set is stored :param new_filename: string representing full directory path and filename where new data set with missing values is to be stored :param missing_fraction: real value representing the probability of replacing a predictor with nan. :return: None """ # pd_frame = pd.read_csv(old_filename, header=None) # read in a dataset np_frame = np.asmatrix(np.genfromtxt(old_filename, delimiter=',', dtype=None)) (row_count, col_count) = np_frame.shape random_matrix = np.random.uniform(0, 1, [row_count, col_count-1]) for indr in range(row_count): # for each predictor value, determine if to replace value with nan for indc in range(col_count-1): if random_matrix[indr, indc] < missing_fraction: np_frame[indr, indc] = np.nan # save new data set with missing values to new file np.savetxt(new_filename, np_frame, delimiter=",") # pd_frame.to_csv(new_filename, sep=',', header=False, index=False, na_rep='nan') def print_message_values(start_string, nump_array): """ This function prints the value of a nump_array with a string message in front of it. :param start_string: string representing message to be printed :param nump_array: array storing something :return: None """ print(start_string) print(nump_array) def show_test_results(test_name, curr_test_val, new_test_val): """ This function prints the test execution results which can be passed or failed. A message will be printed on screen to warn user of the test result. :param test_name: string representing test name :param curr_test_val: integer representing number of tests failed so far before the test specified in test_name is executed :param new_test_val: integer representing number of tests failed after the test specified in test_name is executed :return: integer: 0 if test passed and 1 if test faild. """ failed_string = "Ooops, " + test_name + " failed. I am sorry..." pass_string = "Yeah, " + test_name + " passed!" if (curr_test_val < new_test_val): # this test has failed print(failed_string) return 1 else: print(pass_string) return 0 def assert_H2OTwoDimTable_equal(table1, table2, col_header_list, tolerance=1e-6, check_sign=False, check_all=True, num_per_dim=10): """ This method compares two H2OTwoDimTables and verify that their difference is less than value set in tolerance. It is probably an overkill for I have assumed that the order of col_header_list may not be in the same order as the values in the table.cell_values[ind][0]. In addition, I do not assume an order for the names in the table.cell_values[ind][0] either for there is no reason for an order to exist. To limit the test run time, we can test a randomly sampled of points instead of all points :param table1: H2OTwoDimTable to be compared :param table2: the other H2OTwoDimTable to be compared :param col_header_list: list of strings denote names that we can the comparison to be performed :param tolerance: default to 1e-6 :param check_sign: bool, determine if the sign of values are important or not. For eigenvectors, they are not. :param check_all: bool, determine if we need to compare every single element :param num_per_dim: integer, number of elements to sample per dimension. We have 3 here. :return: None if comparison succeed and raise an error if comparison failed for whatever reason """ num_comparison = len(set(col_header_list)) size1 = len(table1.cell_values) size2 = len(table2.cell_values) worst_error = 0 assert size1==size2, "The two H2OTwoDimTables are of different size!" assert num_comparison<=size1, "H2OTwoDimTable do not have all the attributes specified in col_header_list." flip_sign_vec = generate_sign_vec(table1, table2) if check_sign else [1]*len(table1.cell_values[0]) # correct for sign change for eigenvector comparisons randRange1 = generate_for_indices(len(table1.cell_values), check_all, num_per_dim, 0) randRange2 = generate_for_indices(len(table2.cell_values), check_all, num_per_dim, 0) for ind in range(num_comparison): col_name = col_header_list[ind] next_name=False for name_ind1 in randRange1: if col_name!=str(table1.cell_values[name_ind1][0]): continue for name_ind2 in randRange2: if not(col_name==str(table2.cell_values[name_ind2][0])): continue # now we have the col header names, do the actual comparison if str(table1.cell_values[name_ind1][0])==str(table2.cell_values[name_ind2][0]): randRange3 = generate_for_indices(min(len(table2.cell_values[name_ind2]), len(table1.cell_values[name_ind1])), check_all, num_per_dim,1) for indC in randRange3: val1 = table1.cell_values[name_ind1][indC] val2 = table2.cell_values[name_ind2][indC]*flip_sign_vec[indC] if isinstance(val1, float) and isinstance(val2, float): compare_val_ratio = abs(val1-val2)/max(1, abs(val1), abs(val2)) if compare_val_ratio > tolerance: print("Table entry difference is {0}".format(compare_val_ratio)) assert False, "Table entries are not equal within tolerance." worst_error = max(worst_error, compare_val_ratio) else: assert False, "Tables contains non-numerical values. Comparison is for numericals only!" next_name=True break else: assert False, "Unknown metric names found in col_header_list." if next_name: # ready to go to the next name in col_header_list break print("******* Congrats! Test passed. Maximum difference of your comparison is {0}".format(worst_error)) def generate_for_indices(list_size, check_all, num_per_dim, start_val): if check_all: return list(range(start_val, list_size)) else: randomList = list(range(start_val, list_size)) shuffle(randomList) return randomList[0:min(list_size, num_per_dim)] def generate_sign_vec(table1, table2): sign_vec = [1]*len(table1.cell_values[0]) for indC in range(1, len(table2.cell_values[0])): # may need to look at other elements since some may be zero for indR in range(0, len(table2.cell_values)): if (abs(table1.cell_values[indR][indC]) > 0) and (abs(table2.cell_values[indR][indC]) > 0): sign_vec[indC] = int(np.sign(table1.cell_values[indR][indC]) * np.sign(table2.cell_values[indR][indC])) # if (np.sign(table1.cell_values[indR][indC])!=np.sign(table2.cell_values[indR][indC])): # sign_vec[indC] = -1 # else: # sign_vec[indC] = 1 break # found what we need. Goto next column return sign_vec def equal_two_arrays(array1, array2, eps, tolerance, throwError=True): """ This function will compare the values of two python tuples. First, if the values are below eps which denotes the significance level that we care, no comparison is performed. Next, False is returned if the different between any elements of the two array exceeds some tolerance. :param array1: numpy array containing some values of interest :param array2: numpy array containing some values of interest that we would like to compare it with array1 :param eps: significance level that we care about in order to perform the comparison :param tolerance: threshold for which we allow the two array elements to be different by :return: True if elements in array1 and array2 are close and False otherwise """ size1 = len(array1) if size1 == len(array2): # arrays must be the same size # compare two arrays for ind in range(size1): if not ((array1[ind] < eps) and (array2[ind] < eps)): # values to be compared are not too small, perform comparison # look at differences between elements of array1 and array2 compare_val_h2o_Py = abs(array1[ind] - array2[ind]) if compare_val_h2o_Py > tolerance: # difference is too high, return false if throwError: assert False, "The two arrays are not equal in value." else: return False return True # return True, elements of two arrays are close enough else: if throwError: assert False, "The two arrays are of different size!" else: return False def equal_2D_tables(table1, table2, tolerance=1e-6): """ This function will compare the values of two python tuples. First, if the values are below eps which denotes the significance level that we care, no comparison is performed. Next, False is returned if the different between any elements of the two array exceeds some tolerance. :param array1: numpy array containing some values of interest :param array2: numpy array containing some values of interest that we would like to compare it with array1 :param eps: significance level that we care about in order to perform the comparison :param tolerance: threshold for which we allow the two array elements to be different by :return: True if elements in array1 and array2 are close and False otherwise """ size1 = len(table1) if size1 == len(table2): # arrays must be the same size # compare two arrays for ind in range(size1): if len(table1[ind]) == len(table2[ind]): for ind2 in range(len(table1[ind])): if type(table1[ind][ind2]) == float: if abs(table1[ind][ind2]-table2[ind][ind2]) > tolerance: return False else: assert False, "The two arrays are of different size!" return True else: assert False, "The two arrays are of different size!" def compare_two_arrays(array1, array2, eps, tolerance, comparison_string, array1_string, array2_string, error_string, success_string, template_is_better, just_print=False): """ This function is written to print out the performance comparison results for various values that we care about. It will return 1 if the values of the two arrays exceed threshold specified in tolerance. The actual comparison is performed by calling function equal_two_array. :param array1: numpy array containing some values of interest :param array2: numpy array containing some values of interest that we would like to compare it with array1 :param eps: significance level that we care about in order to perform the comparison :param tolerance: threshold for which we allow the two array elements to be different by :param comparison_string: string stating what the comparison is about, e.g. "Comparing p-values ...." :param array1_string: string stating what is the array1 attribute of interest, e.g. "H2O p-values: " :param array2_string: string stating what is the array2 attribute of interest, e.g. "Theoretical p-values: " :param error_string: string stating what you want to say if the difference between array1 and array2 exceeds tolerance, e.g "P-values are not equal!" :param success_string: string stating what you want to say if the difference between array1 and array2 does not exceed tolerance "P-values are close enough!" :param template_is_better: bool, True, will return 1 if difference among elements of array1 and array2 exceeds tolerance. False, will always return 0 even if difference among elements of array1 and array2 exceeds tolerance. In this case, the system under test actually performs better than the template. :param just_print: bool if True will print attribute values without doing comparison. False will print attribute values and perform comparison :return: if template_is_better = True, return 0 if elements in array1 and array2 are close and 1 otherwise; if template_is_better = False, will always return 0 since system under tests performs better than template system. """ # display array1, array2 with proper description print(comparison_string) print(array1_string, array1) print(array2_string, array2) if just_print: # just print the two values and do no comparison return 0 else: # may need to actually perform comparison if template_is_better: try: assert equal_two_arrays(array1, array2, eps, tolerance), error_string print(success_string) sys.stdout.flush() return 0 except: sys.stdout.flush() return 1 else: print("Test result is actually better than comparison template!") return 0 def make_Rsandbox_dir(base_dir, test_name, make_dir): """ This function will remove directory "Rsandbox/test_name" off directory base_dir and contents if it exists. If make_dir is True, it will create a clean directory "Rsandbox/test_name" off directory base_dir. :param base_dir: string contains directory path where we want to build our Rsandbox/test_name off from :param test_name: string contains unit test name that the Rsandbox is created for :param make_dir: bool, True: will create directory baseDir/Rsandbox/test_name, False: will not create directory. :return: syndatasets_dir: string containing the full path of the directory name specified by base_dir, test_name """ # create the Rsandbox directory path for the test. syndatasets_dir = os.path.join(base_dir, "Rsandbox_" + test_name) if os.path.exists(syndatasets_dir): # remove Rsandbox directory if it exists shutil.rmtree(syndatasets_dir) if make_dir: # create Rsandbox directory if make_dir is True os.makedirs(syndatasets_dir) return syndatasets_dir def get_train_glm_params(model, what_param, family_type='gaussian'): """ This function will grab the various attributes (like coefficients, p-values, and others) off a GLM model that has been built. :param model: GLM model that we want to extract information from :param what_param: string indicating the model attribute of interest like 'p-value','weights',... :param family_type: string, optional, represents the various distribution families (gaussian, multinomial, binomial) supported by our GLM algo :return: attribute value of interest """ coeff_pvalues = model._model_json["output"]["coefficients_table"].cell_values if what_param == 'p-values': if 'gaussian' in family_type.lower(): p_value_h2o = [] for ind in range(len(coeff_pvalues)): p_value_h2o.append(coeff_pvalues[ind][-1]) return p_value_h2o else: assert False, "P-values are only available to Gaussian family." elif what_param == 'weights': if 'gaussian' in family_type.lower(): weights = [] for ind in range(len(coeff_pvalues)): weights.append(coeff_pvalues[ind][1]) return weights elif ('multinomial' in family_type.lower()) or ('binomial' in family_type.lower()): # for multinomial, the coefficients are organized as features by number of classes for # nonstandardized and then standardized weights. Need to grab the correct matrix as # number of classes by n_features matrix num_feature = len(coeff_pvalues) num_class = (len(coeff_pvalues[0])-1)/2 coeffs = np.zeros((num_class,num_feature), dtype=np.float) end_index = int(num_class+1) for col_index in range(len(coeff_pvalues)): coeffs[:, col_index] = coeff_pvalues[col_index][1:end_index] return coeffs elif what_param == 'best_lambda': lambda_str = model._model_json["output"]["model_summary"].cell_values[0][4].split('=') return float(str(lambda_str[-2]).split(',')[0]) elif what_param == 'confusion_matrix': if 'multinomial' in family_type.lower(): return model._model_json["output"]["training_metrics"]._metric_json["cm"]["table"] elif 'binomial' in family_type.lower(): return model.confusion_matrix().table else: assert False, "parameter value not found in GLM model" def less_than(val1, val2): """ Simple function that returns True if val1 <= val2 and False otherwise. :param val1: first value of interest :param val2: second value of interest :return: bool: True if val1 <= val2 and False otherwise """ if round(val1, 3) <= round(val2, 3): # only care to the 3rd position after decimal point return True else: return False def replace_nan_with_mean(data_with_nans, nans_row_col_indices, col_means): """ Given a data set with nans, row and column indices of where the nans are and the col_means, this function will replace the nans with the corresponding col_means. :param data_with_nans: data set matrix with nans :param nans_row_col_indices: matrix containing the row and column indices of where the nans are :param col_means: vector containing the column means of data_with_NAs :return: data_with_NAs: data set with nans replaced with column means """ num_NAs = len(nans_row_col_indices[0]) for ind in range(num_NAs): data_with_nans[nans_row_col_indices[0][ind], nans_row_col_indices[1][ind]] = \ col_means[nans_row_col_indices[1][ind]] return data_with_nans def remove_csv_files(dir_path, suffix=".csv", action='remove', new_dir_path=""): """ Given a directory, this function will gather all function ending with string specified in suffix. Next, it is going to delete those files if action is set to 'remove'. If action is set to 'copy', a new_dir_path must be specified where the files ending with suffix will be moved to this new directory instead. :param dir_path: string representing full path to directory of interest :param suffix: string representing suffix of filename that are to be found and deleted :param action: string, optional, denote the action to perform on files, 'remove' or 'move' :param new_dir_path: string, optional, representing full path to new directory :return: None """ filenames = os.listdir(dir_path) # list all files in directory # only collect files with filename ending with suffix to_remove = [filename for filename in filenames if filename.endswith(suffix)] # delete files ending with suffix for fn in to_remove: temp_fn = os.path.join(dir_path, fn) # only remove if file actually exists. if os.path.isfile(temp_fn): if 'remove' in action: remove_files(temp_fn) elif 'copy' in action: move_files(new_dir_path, temp_fn, fn, action=action) else: assert False, "action string can only be 'remove' or 'copy." def extract_comparison_attributes_and_print(model_h2o, h2o_model_test_metrics, end_test_str, want_p_values, attr1_bool, attr2_bool, att1_template, att2_template, att3_template, att4_template, compare_att1_str, h2o_att1_str, template_att1_str, att1_str_fail, att1_str_success, compare_att2_str, h2o_att2_str, template_att2_str, att2_str_fail, att2_str_success, compare_att3_str, h2o_att3_str, template_att3_str, att3_str_fail, att3_str_success, compare_att4_str, h2o_att4_str, template_att4_str, att4_str_fail, att4_str_success, failed_test_number, ignored_eps, allowed_diff, noise_var, template_must_be_better, attr3_bool=True, attr4_bool=True): """ This function basically will compare four attributes (weight, p-values, training data MSE, test data MSE) of a test with a template model. If the difference of comparison exceeds a certain threshold, the test will be determined as failed and vice versa. There are times when we do not care about p-values and/or weight comparisons but mainly concerned with MSEs. We can set the input parameters to indicate if this is the case. :param model_h2o: H2O model that we want to evaluate :param h2o_model_test_metrics: test performance of H2O model under evaluation :param end_test_str: string representing end test banner to be printed :param want_p_values: bool True if we want to care about p-values and False if we don't :param attr1_bool: bool True if we want to compare weight difference between H2O model and template model and False otherwise. :param attr2_bool: bool True if we want to compare p-value difference between H2O model and template model and False otherwise. :param att1_template: value of first template attribute, the weight vector :param att2_template: value of second template attribute, the p-value vector :param att3_template: value of third template attribute, the training data set MSE :param att4_template: value of fourth template attribute, the test data set MSE :param compare_att1_str: string describing the comparison of first attribute, e.g. "Comparing intercept and weights ...." :param h2o_att1_str: string describing H2O model first attribute values, e.g. "H2O intercept and weights: " :param template_att1_str: string describing template first attribute values, e.g. "Theoretical intercept and weights: " :param att1_str_fail: string describing message to print out if difference exceeds threshold, e.g. "Intercept and weights are not equal!" :param att1_str_success: string describing message to print out if difference < threshold, e.g. "Intercept and weights are close enough!" :param compare_att2_str: string describing the comparison of first attribute, e.g. "Comparing p-values ...." :param h2o_att2_str: string describing H2O model first attribute values, e.g. "H2O p-values: " :param template_att2_str: string describing template first attribute values, e.g. "Theoretical p-values: " :param att2_str_fail: string describing message to print out if difference exceeds threshold, e.g. "P-values are not equal!" :param att2_str_success: string describing message to print out if difference < threshold, e.g. "P-values are close enough!" :param compare_att3_str: string describing the comparison of first attribute, e.g. "Comparing training MSEs ...." :param h2o_att3_str: string describing H2O model first attribute values, e.g. "H2O training MSE: " :param template_att3_str: string describing template first attribute values, e.g. "Theoretical train MSE: " :param att3_str_fail: string describing message to print out if difference exceeds threshold, e.g. "Training MSEs are not equal!" :param att3_str_success: string describing message to print out if difference < threshold, e.g. "Training MSEs are close enough!" :param compare_att4_str: string describing the comparison of first attribute, e.g. "Comparing test MSEs ...." :param h2o_att4_str: string describing H2O model first attribute values, e.g. "H2O test MSE: " :param template_att4_str: string describing template first attribute values, e.g. "Theoretical test MSE: " :param att4_str_fail: string describing message to print out if difference exceeds threshold, e.g. "Test MSEs are not equal!" :param att4_str_success: string describing message to print out if difference < threshold, e.g. "Test MSEs are close enough!" :param failed_test_number: integer denote the number of tests failed :param ignored_eps: if value < than this value, no comparison is performed :param allowed_diff: threshold if exceeded will fail a test :param noise_var: Gaussian noise variance used to generate data set :param template_must_be_better: bool: True: template value must be lower, False: don't care :param attr3_bool: bool denoting if we should compare attribute 3 values :param attr4_bool: bool denoting if we should compare attribute 4 values :return: a tuple containing test h2o model training and test performance metrics that include: weight, pValues, mse_train, r2_train, mse_test, r2_test """ # grab weight from h2o model test1_weight = get_train_glm_params(model_h2o, 'weights') # grab p-values from h2o model test1_p_values = [] if want_p_values: test1_p_values = get_train_glm_params(model_h2o, 'p-values') # grab other performance metrics test1_mse_train = model_h2o.mse() test1_r2_train = model_h2o.r2() test1_mse_test = h2o_model_test_metrics.mse() test1_r2_test = h2o_model_test_metrics.r2() # compare performances of template and h2o model weights failed_test_number += compare_two_arrays(test1_weight, att1_template, ignored_eps, allowed_diff*100, compare_att1_str, h2o_att1_str, template_att1_str, att1_str_fail, att1_str_success, attr1_bool) # p-values if want_p_values: if np.isnan(np.asarray(test1_p_values)).any(): # p-values contain nan failed_test_number += 1 failed_test_number += compare_two_arrays(test1_p_values, att2_template, ignored_eps, allowed_diff, compare_att2_str, h2o_att2_str, template_att2_str, att2_str_fail, att2_str_success, attr2_bool) # Training MSE need_to_compare = less_than(att3_template, test1_mse_train) # in some cases, template value should always be better. Training data MSE should always # be better without regularization than with regularization if (not need_to_compare) and template_must_be_better: failed_test_number += 1 failed_test_number += compare_two_arrays([test1_mse_train], [att3_template], ignored_eps, noise_var, compare_att3_str, h2o_att3_str, template_att3_str, att3_str_fail, att3_str_success, attr3_bool) # Test MSE need_to_compare = less_than(att4_template, test1_mse_test) failed_test_number += compare_two_arrays([test1_mse_test], [att4_template], ignored_eps, noise_var, compare_att4_str, h2o_att4_str, template_att4_str, att4_str_fail, att4_str_success, need_to_compare, attr4_bool) # print end test banner print(end_test_str) print("*******************************************************************************************") sys.stdout.flush() return test1_weight, test1_p_values, test1_mse_train, test1_r2_train, test1_mse_test,\ test1_r2_test, failed_test_number def extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics, family_type, end_test_str, compare_att_str=["", "", "", "", "", "", ""], h2o_att_str=["", "", "", "", "", "", ""], template_att_str=["", "", "", "", "", "", ""], att_str_fail=["", "", "", "", "", "", ""], att_str_success=["", "", "", "", "", "", ""], test_model=None, test_model_metric=None, template_params=None, can_be_better_than_template=[ False, False, False, False, False, False], just_print=[True, True, True, True, True, True], ignored_eps=1e-15, allowed_diff=1e-5, failed_test_number=0): """ This function basically will compare and print out six performance metrics of a test with a template model. If the difference of comparison exceeds a certain threshold, the test will be determined as failed and vice versa. There are times when we do not care about comparisons but mainly concerned with logloss/prediction accuracy in determining if a test shall fail. We can set the input parameters to indicate if this is the case. :param model_h2o: H2O model that we want to evaluate :param h2o_model_test_metrics: test performance of H2O model under evaluation :param family_type: string represents the various distribution families (gaussian, multinomial, binomial) supported by our GLM algo :param end_test_str: string to be printed at the end of a test :param compare_att_str: array of strings describing what we are trying to compare :param h2o_att_str: array of strings describing each H2O attribute of interest :param template_att_str: array of strings describing template attribute of interest :param att_str_fail: array of strings to be printed if the comparison failed :param att_str_success: array of strings to be printed if comparison succeeded :param test_model: template model whose attributes we want to compare our H2O model with :param test_model_metric: performance on test data set of template model :param template_params: array containing template attribute values that we want to compare our H2O model with :param can_be_better_than_template: array of bool: True: template value must be lower, False: don't care :param just_print: array of bool for each attribute if True, no comparison is performed, just print the attributes and if False, will compare the attributes and print the attributes as well :param ignored_eps: if value < than this value, no comparison is performed :param allowed_diff: threshold if exceeded will fail a test :param failed_test_number: integer denote the number of tests failed so far :return: accumulated number of tests that have failed so far """ # grab performance metrics from h2o model (h2o_weight, h2o_logloss_train, h2o_confusion_matrix_train, h2o_accuracy_train, h2o_logloss_test, h2o_confusion_matrix_test, h2o_accuracy_test) = grab_model_params_metrics(model_h2o, h2o_model_test_metrics, family_type) # grab performance metrics from template model if test_model and test_model_metric: (template_weight, template_logloss_train, template_confusion_matrix_train, template_accuracy_train, template_logloss_test, template_confusion_matrix_test, template_accuracy_test) = \ grab_model_params_metrics(test_model, test_model_metric, family_type) elif template_params: # grab template comparison values from somewhere else (template_weight, template_logloss_train, template_confusion_matrix_train, template_accuracy_train, template_logloss_test, template_confusion_matrix_test, template_accuracy_test) = template_params else: assert False, "No valid template parameters are given for comparison." # print and/or compare the weights between template and H2O compare_index = 0 failed_test_number += compare_two_arrays(h2o_weight, template_weight, ignored_eps, allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index], template_att_str[compare_index], att_str_fail[compare_index], att_str_success[compare_index], True, just_print[compare_index]) compare_index += 1 # this is logloss from training data set, if not(just_print[compare_index]) and not(can_be_better_than_template[compare_index]): if (h2o_logloss_train < template_logloss_train) and \ (abs(h2o_logloss_train-template_logloss_train) > 1e-5): # H2O performed better than template which is not allowed failed_test_number += 1 # increment failed_test_number and just print the results compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps, allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index], template_att_str[compare_index], att_str_fail[compare_index], att_str_success[compare_index], True, True) else: failed_test_number += compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps, allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index], template_att_str[compare_index], att_str_fail[compare_index], att_str_success[compare_index], True, False) else: template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index], h2o_logloss_train, template_logloss_train, False) # print and compare the logloss between template and H2O for training data failed_test_number += compare_two_arrays([h2o_logloss_train], [template_logloss_train], ignored_eps, allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index], template_att_str[compare_index], att_str_fail[compare_index], att_str_success[compare_index], template_better, just_print[compare_index]) compare_index += 1 template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index], h2o_logloss_test, template_logloss_test, False) # print and compare the logloss between template and H2O for test data failed_test_number += compare_two_arrays([h2o_logloss_test], [template_logloss_test], ignored_eps, allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index], template_att_str[compare_index], att_str_fail[compare_index], att_str_success[compare_index], template_better, just_print[compare_index]) compare_index += 1 # print the confusion matrix from training data failed_test_number += compare_two_arrays(h2o_confusion_matrix_train, template_confusion_matrix_train, ignored_eps, allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index], template_att_str[compare_index], att_str_fail[compare_index], att_str_success[compare_index], True, just_print[compare_index]) compare_index += 1 # print the confusion matrix from test data failed_test_number += compare_two_arrays(h2o_confusion_matrix_test, template_confusion_matrix_test, ignored_eps, allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index], template_att_str[compare_index], att_str_fail[compare_index], att_str_success[compare_index], True, just_print[compare_index]) compare_index += 1 template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index], h2o_accuracy_train, template_accuracy_train, True) # print accuracy from training dataset failed_test_number += compare_two_arrays([h2o_accuracy_train], [template_accuracy_train], ignored_eps, allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index], template_att_str[compare_index], att_str_fail[compare_index], att_str_success[compare_index], template_better, just_print[compare_index]) compare_index += 1 # print accuracy from test dataset template_better = is_template_better(just_print[compare_index], can_be_better_than_template[compare_index], h2o_accuracy_test, template_accuracy_test, True) failed_test_number += compare_two_arrays([h2o_accuracy_test], [template_accuracy_test], ignored_eps, allowed_diff, compare_att_str[compare_index], h2o_att_str[compare_index], template_att_str[compare_index], att_str_fail[compare_index], att_str_success[compare_index], template_better, just_print[compare_index]) # print end test banner print(end_test_str) print("*******************************************************************************************") sys.stdout.flush() return failed_test_number def is_template_better(just_print, can_be_better_than_template, h2o_att, template_att, bigger_is_better): """ This function is written to determine if the system under test performs better than the template model performance. :param just_print: bool representing if we are just interested in printing the attribute values :param can_be_better_than_template: bool stating that it is okay in this case for the system under test to perform better than the template system. :param h2o_att: number representing the h2o attribute under test :param template_att: number representing the template attribute :param bigger_is_better: bool representing if metric is perceived to be better if its value is higher :return: bool indicating if the template attribute is better. """ if just_print: # not interested in comparison, just want to print attribute values return True # does not matter what we return here else: if bigger_is_better: # metric is better if it is greater return not(h2o_att > template_att) else: # metric is better if it is less return not(h2o_att < template_att) def grab_model_params_metrics(model_h2o, h2o_model_test_metrics, family_type): """ This function will extract and return the various metrics from a H2O GLM model and the corresponding H2O model test metrics. :param model_h2o: GLM H2O model :param h2o_model_test_metrics: performance on test data set from H2O GLM model :param family_type: string representing 'gaussian', 'binomial' or 'multinomial' :return: tuple containing weight, logloss/confusion matrix/prediction accuracy calculated from training data set and test data set respectively """ # grab weight from h2o model h2o_weight = get_train_glm_params(model_h2o, 'weights', family_type=family_type) # grab other performance metrics h2o_logloss_train = model_h2o.logloss() h2o_confusion_matrix_train = get_train_glm_params(model_h2o, 'confusion_matrix', family_type=family_type) last_index = len(h2o_confusion_matrix_train.cell_values)-1 h2o_logloss_test = h2o_model_test_metrics.logloss() if 'multinomial' in family_type.lower(): h2o_confusion_matrix_test = h2o_model_test_metrics.confusion_matrix() h2o_accuracy_train = 1-h2o_confusion_matrix_train.cell_values[last_index][last_index] h2o_accuracy_test = 1-h2o_confusion_matrix_test.cell_values[last_index][last_index] elif 'binomial' in family_type.lower(): h2o_confusion_matrix_test = h2o_model_test_metrics.confusion_matrix().table real_last_index = last_index+1 h2o_accuracy_train = 1-float(h2o_confusion_matrix_train.cell_values[last_index][real_last_index]) h2o_accuracy_test = 1-float(h2o_confusion_matrix_test.cell_values[last_index][real_last_index]) else: assert False, "Only 'multinomial' and 'binomial' distribution families are supported for " \ "grab_model_params_metrics function!" return h2o_weight, h2o_logloss_train, h2o_confusion_matrix_train, h2o_accuracy_train, h2o_logloss_test,\ h2o_confusion_matrix_test, h2o_accuracy_test def prepare_data_sklearn_multinomial(training_data_xy): """ Sklearn model requires that the input matrix should contain a column of ones in order for it to generate the intercept term. In addition, it wants the response vector to be in a certain format as well. :param training_data_xy: matrix containing both the predictors and response column :return: tuple containing the predictor columns with a column of ones as the first column and the response vector in the format that Sklearn wants. """ (num_row, num_col) = training_data_xy.shape # change response to be enum and not real y_ind = num_col-1 training_data_xy[y_ind] = training_data_xy[y_ind].astype(int) # prepare response column for sklearn logistic regression response_y = training_data_xy[:, y_ind] response_y = np.ravel(response_y) training_data = training_data_xy[:, range(0, y_ind)] # added column of ones into data matrix X_MAT temp_ones = np.asmatrix(np.ones(num_row)).transpose() x_mat = np.concatenate((temp_ones, training_data), axis=1) return response_y, x_mat def get_gridables(params_in_json): """ This function is written to walk through all parameters of a model and grab the parameters, its type and its default values as three lists of all the gridable parameters. :param params_in_json: a list of parameters associated with a H2O model. Each list is a dict containing fields of interest like name, type, gridable, default values, .... :return: three lists: gridable_params, gridable_types and gridable_defaults containing the names of the parameter, its associated type like int, float, unicode, bool and default parameter values """ # grab all gridable parameters and its type gridable_parameters = [] gridable_types = [] gridable_defaults = [] for each_param in params_in_json: if each_param['gridable']: gridable_parameters.append(str(each_param["name"])) gridable_types.append(each_param["type"]) if type(each_param["default_value"]) == 'unicode': # hyper-parameters cannot be unicode gridable_defaults.append(str(each_param["default_value"])) else: gridable_defaults.append(each_param["default_value"]) return gridable_parameters, gridable_types, gridable_defaults def add_fold_weights_offset_columns(h2o_frame, nfold_max_weight_offset, column_names, column_type='fold_assignment'): """ Add fold_columns to H2O training frame specified in h2o_frame according to nfold. The new added columns should use the names in column_names. Returns a h2o_frame with newly added fold_columns. Copied from Eric's code. :param h2o_frame: H2O frame containing training data :param nfold_max_weight_offset: integer, number of fold in the cross-validation or maximum weight scale or offset :param column_names: list of strings denoting the column names for the new fold columns :param column_type: optional string denoting whether we are trying to generate fold_assignment or weights_column or offset_column :return: H2O frame with added fold column assignments """ number_row = h2o_frame.nrow # copied this part from Eric's code for index in range(len(column_names)): if 'fold_assignment' in column_type: temp_a = np.random.random_integers(0, nfold_max_weight_offset - 1, [number_row, 1]) # inclusive elif 'weights_column' in column_type: temp_a = np.random.uniform(0, nfold_max_weight_offset, [number_row, 1]) elif 'offset_column' in column_type: temp_a = random.uniform(0, nfold_max_weight_offset)*np.asmatrix(np.ones(number_row)).transpose() else: assert False, "column_type must be either 'fold_assignment' or 'weights_column'!" fold_assignments = h2o.H2OFrame(temp_a) fold_assignments.set_names([column_names[index]]) h2o_frame = h2o_frame.cbind(fold_assignments) return h2o_frame def gen_grid_search(model_params, hyper_params, exclude_parameters, gridable_parameters, gridable_types, gridable_defaults, max_int_number, max_int_val, min_int_val, max_real_number, max_real_val, min_real_val, quantize_level='1.00000000'): """ This function is written to randomly generate griddable parameters for a gridsearch. For parameters already found in hyper_params, no random list will be generated. In addition, we will check to make sure that the griddable parameters are actually used by the model before adding them to the hyper_params dict. :param model_params: list of string containing names of argument to the model :param hyper_params: dict structure containing a list of gridable parameters names with their list :param exclude_parameters: list containing parameter names not to be added to hyper_params :param gridable_parameters: list of gridable parameter names :param gridable_types: list of gridable parameter types :param gridable_defaults: list of gridable parameter default values :param max_int_number: integer, size of integer gridable parameter list :param max_int_val: integer, maximum integer value for integer gridable parameter :param min_int_val: integer, minimum integer value for integer gridable parameter :param max_real_number: integer, size of real gridable parameter list :param max_real_val: float, maximum real value for real gridable parameter :param min_real_val: float, minimum real value for real gridable parameter :param quantize_level: string representing the quantization level of floating point values generated randomly. :return: a tuple of hyper_params: dict of hyper parameters for gridsearch, true_gridable_parameters: a list of string containing names of truely gridable parameters, true_gridable_types: a list of string denoting parameter types and true_gridable_defaults: default values of those truly gridable parameters """ count_index = 0 true_gridable_parameters = [] true_gridable_types = [] true_gridable_defaults = [] for para_name in gridable_parameters: # parameter must not in exclusion list if (para_name in model_params) and (para_name not in exclude_parameters): true_gridable_parameters.append(para_name) true_gridable_types.append(gridable_types[count_index]) true_gridable_defaults.append(gridable_defaults[count_index]) if para_name not in hyper_params.keys(): # add default value to user defined parameter list # gridable parameter not seen before. Randomly generate values for it if ('int' in gridable_types[count_index]) or ('long' in gridable_types[count_index]): # make sure integer values are not duplicated, using set action to remove duplicates hyper_params[para_name] = list(set([random.randint(min_int_val, max_int_val) for p in range(0, max_int_number)])) elif ('double' in gridable_types[count_index]) or ('float' in gridable_types[count_index]): hyper_params[para_name] = fix_float_precision(list(np.random.uniform(min_real_val, max_real_val, max_real_number)), quantize_level=quantize_level) count_index += 1 return hyper_params, true_gridable_parameters, true_gridable_types, true_gridable_defaults def fix_float_precision(float_list, quantize_level='1.00000000'): """ This function takes in a floating point tuple and attempt to change it to floating point number with fixed precision. :param float_list: tuple/list of floating point numbers :param quantize_level: string, optional, represent the number of fix points we care :return: tuple of floats to the exact precision specified in quantize_level """ fixed_float = [] for num in float_list: fixed_float.append(float(Decimal(num).quantize(Decimal(quantize_level)))) return list(set(fixed_float)) def extract_used_params_xval(a_grid_model, model_param_names, params_dict, algo="GBM"): """ This function performs similar functions to function extract_used_params. However, for max_runtime_secs, we need to go into each cross-valudation model and grab the max_runtime_secs and add them up in order to get the correct value. In addition, we put your algo model specific parameters into params_dict. :param a_grid_model: list of models generated by gridsearch :param model_param_names: hyper-parameter names that are specified for the gridsearch. :param params_dict: dict containing name/value pairs specified to an algo. :param algo: string, optional, denoting the algo we are looking at. :return: params_used: a dict structure containing parameters that take on values as name/value pairs which will be used to build a model by hand using the same parameter setting as the model built by gridsearch. """ params_used = dict() # need to extract the max_runtime_secs ONE cross-validation model or the base model if a_grid_model._is_xvalidated: xv_keys = a_grid_model._xval_keys for id in xv_keys: # only need to get info from one model each_xv_model = h2o.get_model(id) # get each model params_used = extract_used_params(model_param_names, each_xv_model.params, params_dict, algo) break else: params_used = extract_used_params(model_param_names, a_grid_model.params, params_dict, algo) return params_used def extract_used_params(model_param_names, grid_model_params, params_dict, algo="GLM"): """ This function is used to build a dict out of parameters used by our gridsearch to build a H2O model given the dict structure that describes the parameters and their values used by gridsearch to build that particular mode. :param model_param_names: list contains parameter names that we are interested in extracting :param grid_model_params: dict contains key as names of parameter and values as list of two values: default and actual. :param params_dict: dict containing extra parameters to add to params_used like family, e.g. 'gaussian', 'binomial', ... :return: params_used: a dict structure containing parameters that take on values as name/value pairs which will be used to build a model by hand using the same parameter setting as the model built by gridsearch. """ params_used = dict() grid_model_params_keys = grid_model_params.keys() for each_parameter in model_param_names: parameter_name = str(each_parameter) if parameter_name in grid_model_params_keys: params_used[parameter_name] = grid_model_params[each_parameter]['actual'] if params_dict: for key, value in params_dict.items(): params_used[key] = value # add distribution family to parameters used list # only for GLM, change lambda to Lambda if algo =="GLM": if 'lambda' in params_used.keys(): params_used['Lambda'] = params_used['lambda'] del params_used['lambda'] return params_used def insert_error_grid_search(hyper_params, gridable_parameters, gridable_types, error_number): """ This function will randomly introduce errors into a copy of hyper_params. Depending on the random number error_number generated, the following errors can be introduced: error_number = 0: randomly alter the name of a hyper-parameter name; error_number = 1: randomly choose a hyper-parameter and remove all elements in its list error_number = 2: add randomly generated new hyper-parameter names with random list error_number other: randomly choose a hyper-parameter and insert an illegal type into it :param hyper_params: dict containing all legal hyper-parameters for our grid search :param gridable_parameters: name of griddable parameters (some may not be griddable) :param gridable_types: type of griddable parameters :param error_number: integer representing which errors to introduce into the gridsearch hyper-parameters :return: new dict with errors in either parameter names or parameter values """ error_hyper_params = copy.deepcopy(hyper_params) # error_hyper_params = {k : v for k, v in hyper_params.items()} param_index = random.randint(0, len(hyper_params)-1) param_name = list(hyper_params)[param_index] param_type = gridable_types[gridable_parameters.index(param_name)] if error_number == 0: # grab a hyper-param randomly and copy its name twice new_name = param_name+param_name error_hyper_params[new_name] = error_hyper_params[param_name] del error_hyper_params[param_name] elif error_number == 1: error_hyper_params[param_name] = [] elif error_number == 2: new_param = generate_random_words(random.randint(20,100)) error_hyper_params[new_param] = error_hyper_params[param_name] else: error_hyper_params = insert_bad_value(error_hyper_params, param_name, param_type) return error_hyper_params def insert_bad_value(error_hyper_params, param_name, param_type): """ This function is written to insert a value that is of a different type into an array than the one its other elements are for. :param error_hyper_params: dict containing all hyper-parameters for a grid search :param param_name: string denoting the hyper-parameter we want to insert bad element to :param param_type: string denoting hyper-parameter type :return: dict containing new inserted error value """ if 'int' in param_type: # insert a real number into integer error_hyper_params[param_name].append(random.uniform(-10,10)) elif 'enum' in param_type: # insert an float into enums error_hyper_params[param_name].append(random.uniform(-10,10)) elif 'double' in param_type: # insert an enum into float error_hyper_params[param_name].append(random.uniform(0,1) > 0.5) else: # insert a random string for all other cases error_hyper_params[param_name].append(generate_random_words(random.randint(20,100))) return error_hyper_params def generate_random_words(word_length): """ This function will generate a random word consisting of letters, numbers and punctuation given the word_length. :param word_length: integer denoting length of the word :return: string representing the random word """ if word_length > 0: all_chars = string.ascii_letters + string.digits + string.punctuation return ''.join((random.choice(all_chars)) for index in range(int(word_length))) else: assert False, "word_length must be an integer greater than 0." def generate_redundant_parameters(hyper_params, gridable_parameters, gridable_defaults, error_number): """ This function will randomly choose a set of hyper_params and make a dict out of it so we can duplicate the parameter specification in both the model and grid search. :param hyper_params: dict containing all griddable parameters as hyper_param to grid search :param gridable_parameters: list of gridable parameters (not truly) :param gridable_defaults: list of default values for gridable parameters :param error_number: int, indicate ways to change the model parameter and the hyper-parameter Here are the actions performed on the model parameter and hyper-parameters. error_number = 0: set model parameter to be a value out of the hyper-parameter value list, should not generate error; error_number = 1: set model parameter to be default value, should not generate error in this case; error_number = 3: make sure model parameter is not set to default and choose a value not in the hyper-parameter value list. :return: 2 dicts containing duplicated parameters with specification, new hyperparameter specification """ error_hyper_params = copy.deepcopy(hyper_params) # error_hyper_params = {k : v for k, v in hyper_params.items()} params_dict = {} num_params = random.randint(1, len(error_hyper_params)) params_list = list(error_hyper_params) # remove default values out of hyper_params for key in params_list: default_value = gridable_defaults[gridable_parameters.index(key )] if default_value in error_hyper_params[key]: error_hyper_params[key].remove(default_value) for index in range(num_params): param_name = params_list[index] hyper_params_len = len(error_hyper_params[param_name]) if error_number == 0: # randomly assigned the parameter to take one value out of the list param_value_index = random.randint(0, len(error_hyper_params[param_name])-1) params_dict[param_name] = error_hyper_params[param_name][param_value_index] elif error_number == 1: param_value_index = gridable_parameters.index(param_name) params_dict[param_name] = gridable_defaults[param_value_index] else: # randomly assign model parameter to one of the hyper-parameter values, should create error condition here param_value_index = random.randint(0, hyper_params_len-1) params_dict[param_name] = error_hyper_params[param_name][param_value_index] # final check to make sure lambda is Lambda if 'lambda' in list(params_dict): params_dict["Lambda"] = params_dict['lambda'] del params_dict["lambda"] return params_dict, error_hyper_params def count_models(hyper_params): """ Given a hyper_params dict, this function will return the maximum number of models that can be built out of all the combination of hyper-parameters. :param hyper_params: dict containing parameter name and a list of values to iterate over :return: max_model_number: int representing maximum number of models built """ max_model_number = 1 for key in list(hyper_params): max_model_number *= len(hyper_params[key]) return max_model_number def error_diff_2_models(grid_table1, grid_table2, metric_name): """ This function will take two models generated by gridsearch and calculate the mean absolute differences of the metric values specified by the metric_name in the two model. It will return the mean differences. :param grid_table1: first H2OTwoDimTable generated by gridsearch :param grid_table2: second H2OTwoDimTable generated by gridsearch :param metric_name: string, name of the metric of interest :return: real number which is the mean absolute metric difference between the two models """ num_model = len(grid_table1.cell_values) metric_diff = 0 for model_index in range(num_model): metric_diff += abs(grid_table1.cell_values[model_index][-1] - grid_table2.cell_values[model_index][-1]) if (num_model > 0): return metric_diff/num_model else: assert False, "error_diff_2_models: your table contains zero models." def find_grid_runtime(model_list): """ This function given a grid_model built by gridsearch will go into the model and calculate the total amount of time it took to actually build all the models in second :param model_list: list of model built by gridsearch, cartesian or randomized with cross-validation enabled. :return: total_time_sec: total number of time in seconds in building all the models """ total_time_sec = 0 for each_model in model_list: total_time_sec += each_model._model_json["output"]["run_time"] # time in ms # if cross validation is used, need to add those run time in here too if each_model._is_xvalidated: xv_keys = each_model._xval_keys for id in xv_keys: each_xv_model = h2o.get_model(id) total_time_sec += each_xv_model._model_json["output"]["run_time"] return total_time_sec/1000.0 # return total run time in seconds def evaluate_metrics_stopping(model_list, metric_name, bigger_is_better, search_criteria, possible_model_number): """ This function given a list of dict that contains the value of metric_name will manually go through the early stopping condition and see if the randomized grid search will give us the correct number of models generated. Note that you cannot assume the model_list is in the order of when a model is built. It actually already come sorted which we do not want.... :param model_list: list of models built sequentially that contains metric of interest among other fields :param metric_name: string representing name of metric that we want to based our stopping condition on :param bigger_is_better: bool indicating if the metric is optimized by getting bigger if True and vice versa :param search_criteria: dict structure containing the search criteria for randomized gridsearch :param possible_model_number: integer, represent the absolute possible number of models built based on the hyper-parameter size :return: bool indicating if the early topping condition is justified """ tolerance = search_criteria["stopping_tolerance"] stop_round = search_criteria["stopping_rounds"] min_list_len = 2*stop_round # minimum length of metrics needed before we start early stopping evaluation metric_list = [] # store metric of optimization stop_now = False # provide metric list sorted by time. Oldest model appear first. metric_list_time_ordered = sort_model_by_time(model_list, metric_name) for metric_value in metric_list_time_ordered: metric_list.append(metric_value) if len(metric_list) > min_list_len: # start early stopping evaluation now stop_now = evaluate_early_stopping(metric_list, stop_round, tolerance, bigger_is_better) if stop_now: if len(metric_list) < len(model_list): # could have stopped early in randomized gridsearch return False else: # randomized gridsearch stopped at the correct condition return True if len(metric_list) == possible_model_number: # never meet early stopping condition at end of random gridsearch return True # if max number of model built, still ok else: return False # early stopping condition never met but random gridsearch did not build all models, bad! def sort_model_by_time(model_list, metric_name): """ This function is written to sort the metrics that we care in the order of when the model was built. The oldest model metric will be the first element. :param model_list: list of models built sequentially that contains metric of interest among other fields :param metric_name: string representing name of metric that we want to based our stopping condition on :return: model_metric_list sorted by time """ model_num = len(model_list) model_metric_list = [None] * model_num for index in range(model_num): model_index = int(model_list[index]._id.split('_')[-1]) model_metric_list[model_index] = \ model_list[index]._model_json["output"]["cross_validation_metrics"]._metric_json[metric_name] return model_metric_list def evaluate_early_stopping(metric_list, stop_round, tolerance, bigger_is_better): """ This function mimics the early stopping function as implemented in ScoreKeeper.java. Please see the Java file comment to see the explanation of how the early stopping works. :param metric_list: list containing the optimization metric under consideration for gridsearch model :param stop_round: integer, determine averaging length :param tolerance: real, tolerance to see if the grid search model has improved enough to keep going :param bigger_is_better: bool: True if metric is optimized as it gets bigger and vice versa :return: bool indicating if we should stop early and sorted metric_list """ metric_len = len(metric_list) metric_list.sort(reverse=bigger_is_better) shortest_len = 2*stop_round bestInLastK = 1.0*sum(metric_list[0:stop_round])/stop_round lastBeforeK = 1.0*sum(metric_list[stop_round:shortest_len])/stop_round if not(np.sign(bestInLastK) == np.sign(lastBeforeK)): return False ratio = bestInLastK/lastBeforeK if math.isnan(ratio): return False if bigger_is_better: return not (ratio > 1+tolerance) else: return not (ratio < 1-tolerance) def check_and_count_models(hyper_params, params_zero_one, params_more_than_zero, params_more_than_one, params_zero_positive, max_grid_model): """ This function will look at the hyper-parameter space set in hyper_params, generate a new hyper_param space that will contain a smaller number of grid_models. It will determine how many models will be built from this new hyper_param space. In order to arrive at the correct answer, it must discount parameter settings that are illegal. :param hyper_params: dict containing model parameter names and list of values to set it to :param params_zero_one: list containing model parameter names whose values must be between 0 and 1 :param params_more_than_zero: list containing model parameter names whose values must exceed zero :param params_more_than_one: list containing model parameter names whose values must exceed one :param params_zero_positive: list containing model parameter names whose values must equal to or exceed zero :param max_grid_model: maximum number of grid_model that can be generated from the new hyper_params space :return: total model: integer denoting number of grid models that can be built from all legal parameter settings in new hyper_parameter space final_hyper_params: dict of new hyper parameter space derived from the original hyper_params """ total_model = 1 param_len = 0 hyper_keys = list(hyper_params) shuffle(hyper_keys) # get all hyper_parameter names in random order final_hyper_params = dict() for param in hyper_keys: # this param should be > 0 and <= 2 if param == "col_sample_rate_change_per_level": param_len = len([x for x in hyper_params["col_sample_rate_change_per_level"] if (x > 0) and (x <= 2)]) elif param in params_zero_one: param_len = len([x for x in hyper_params[param] if (x >= 0) and (x <= 1)]) elif param in params_more_than_zero: param_len = len([x for x in hyper_params[param] if (x > 0)]) elif param in params_more_than_one: param_len = len([x for x in hyper_params[param] if (x > 1)]) elif param in params_zero_positive: param_len = len([x for x in hyper_params[param] if (x >= 0)]) else: param_len = len(hyper_params[param]) if (param_len >= 0) and ((total_model*param_len) <= max_grid_model): total_model *= param_len final_hyper_params[param] = hyper_params[param] elif (total_model*param_len) > max_grid_model: break return total_model, final_hyper_params def write_hyper_parameters_json(dir1, dir2, json_filename, hyper_parameters): """ Write a json file of the hyper_parameters in directories dir1 and dir2 for debugging purposes. :param dir1: String containing first directory where you want to write the json file to :param dir2: String containing second directory where you want to write the json file to :param json_filename: String containing json file name :param hyper_parameters: dict containing hyper-parameters used """ # save hyper-parameter file in test directory with open(os.path.join(dir1, json_filename), 'w') as test_file: json.dump(hyper_parameters, test_file) # save hyper-parameter file in sandbox with open(os.path.join(dir2, json_filename), 'w') as test_file: json.dump(hyper_parameters, test_file) def compare_frames(frame1, frame2, numElements, tol_time=0, tol_numeric=0, strict=False, compare_NA=True): """ This function will compare two H2O frames to make sure their dimension, and values in all cells are the same. It will not compare the column names though. :param frame1: H2O frame to be compared :param frame2: H2O frame to be compared :param numElements: integer to denote number of rows to compare. Done to reduce compare time. Set to 0 or negative number if you want to compare all elements. :param tol_time: optional parameter to limit time value difference. :param tol_numerica: optional parameter to limit numeric value difference. :param strict: optional parameter to enforce strict comparison or not. If True, column type must match in order to pass the test. :param compare_NA: optional parameter to compare NA or not. For csv file generated from orc file, the NAs are represented as some other symbol but our CSV will not be able to parse it correctly as NA. In this case, do not compare the number of NAs. :return: boolean: True, the two frames are equal and False otherwise. """ # check frame dimensions rows1, cols1 = frame1.dim rows2, cols2 = frame2.dim assert rows1 == rows2 and cols1 == cols2, "failed dim check! frame 1 rows:{0} frame 2 rows:{1} frame 1 cols:{2} " \ "frame2 cols:{3}".format(rows1, rows2, cols1, cols2) na_frame1 = frame1.isna().sum().sum(axis=1)[:,0] na_frame2 = frame2.isna().sum().sum(axis=1)[:,0] if compare_NA: # check number of missing values assert na_frame1.flatten() == na_frame2.flatten(), "failed numbers of NA check! Frame 1 NA number: {0}, frame 2 " \ "NA number: {1}".format(na_frame1, na_frame2) # check column types are the same before proceeding to check each row content. for col_ind in range(cols1): c1_key = frame1.columns[col_ind] c2_key = frame2.columns[col_ind] c2_type = frame2.types[c2_key] c1_type = frame1.types[c1_key] print("###### Comparing column: {0} and column type is {1}.".format(col_ind, c1_type)) if strict: # every column type must match assert c1_type == c2_type, "failed column type check! frame1 col type: {0}, frame2 col type: " \ "{1}".format(c1_type, c2_type) else: if str(c2_type) == 'enum': # orc files do not have enum column type. We convert it here frame1[col_ind].asfactor() # compare string if (str(c1_type) == 'string') or (str(c1_type) == 'enum'): compareOneStringColumn(frame1, frame2, col_ind, rows1, numElements) else: if str(c2_type) == 'time': # compare time columns compareOneNumericColumn(frame1, frame2, col_ind, rows1, tol_time, numElements) else: compareOneNumericColumn(frame1, frame2, col_ind, rows1, tol_numeric, numElements) return True def compareOneStringColumn(frame1, frame2, col_ind, rows, numElements): """ This function will compare two String columns of two H2O frames to make sure that they are the same. :param frame1: H2O frame to be compared :param frame2: H2O frame to be compared :param col_ind: integer denoting column index to compare the two frames :param rows: integer denoting number of rows in the column :param numElements: integer to denote number of rows to compare. Done to reduce compare time :return: None. Will throw exceptions if comparison failed. """ row_indices = list(range(rows)) if numElements > 0: random.shuffle(row_indices) else: numElements = rows for ele_ind in range(numElements): row_ind = row_indices[ele_ind] val1 = frame1[row_ind, col_ind] val2 = frame2[row_ind, col_ind] assert val1 == val2, "failed frame values check! frame1 value: {0}, frame2 value: {1} at row {2}, column " \ "{3}".format(val1, val2, row_ind, col_ind) def compareOneNumericColumn(frame1, frame2, col_ind, rows, tolerance, numElements): """ This function compares two numeric columns of two H2O frames to make sure that they are close. :param frame1: H2O frame to be compared :param frame2: H2O frame to be compared :param col_ind: integer denoting column index to compare the two frames :param rows: integer denoting number of rows in the column :param tolerance: double parameter to limit numerical value difference. :param numElements: integer to denote number of rows to compare. Done to reduce compare time. :return: None. Will throw exceptions if comparison failed. """ row_indices = [] if numElements > 0: row_indices = random.sample(range(rows), numElements) else: numElements = rows # Compare all elements row_indices = list(range(rows)) for ele_ind in range(numElements): row_ind = row_indices[ele_ind] val1 = frame1[row_ind, col_ind] val2 = frame2[row_ind, col_ind] if not(math.isnan(val1)) and not(math.isnan(val2)): # both frames contain valid elements diff = abs(val1-val2)/max(1, abs(val1), abs(val2)) assert diff <= tolerance, "failed frame values check! frame1 value = {0}, frame2 value = {1}, " \ "at row {2}, column {3}. The difference is {4}.".format(val1, val2, row_ind, col_ind, diff) elif math.isnan(val1) and math.isnan(val2): # both frame contains missing values continue else: # something is wrong, one frame got a missing value while the other is fine. assert 1 == 2, "failed frame values check! frame1 value {0}, frame2 value {1} at row {2}, " \ "column {3}".format(val1, val2, row_ind, col_ind) import warnings def expect_warnings(filewithpath, warn_phrase="warn", warn_string_of_interest="warn", number_of_times=1, in_hdfs=False): """ This function will execute a command to run and analyze the print outs of running the command. The goal here is to capture any warnings that we may expect out of running those commands. :param filewithpath: name of file to be parsed with path :param warn_phrase: capture the warning header, sometimes it is warn or userwarn. :param warn_string_of_interest: specific warning message string :param number_of_times: number of warning lines we are expecting. :return: True if warning was found and False otherwise """ number_warngings = 0 buffer = StringIO() # redirect warning messages to string buffer for later analysis sys.stderr = buffer frame = None if in_hdfs: frame = h2o.import_file(filewithpath) else: frame = h2o.import_file(path=locate(filewithpath)) sys.stderr = sys.__stderr__ # redirect it back to stdout. try: # for python 2.7 if len(buffer.buflist) > 0: for index in range(len(buffer.buflist)): print("*** captured warning message: {0}".format(buffer.buflist[index])) if (warn_phrase in buffer.buflist[index]) and (warn_string_of_interest in buffer.buflist[index]): number_warngings = number_warngings+1 except: # for python 3. warns = buffer.getvalue() print("*** captured warning message: {0}".format(warns)) if (warn_phrase in warns) and (warn_string_of_interest in warns): number_warngings = number_warngings+1 print("Number of warnings found: {0} and number of times that warnings should appear {1}.".format(number_warngings, number_of_times)) if number_warngings >= number_of_times: return True else: return False def compare_frame_summary(frame1_summary, frame2_summary, compareNames=False, compareTypes=False): """ This method is written to compare the frame summary between two frames. :param frame1_summary: :param frame2_summary: :param compareNames: :param compareTypes: :return: """ frame1_column_number = len(frame1_summary) frame2_column_number = len(frame2_summary) assert frame1_column_number == frame2_column_number, "failed column number check! Frame 1 column number: {0}," \ "frame 2 column number: {1}".format(frame1_column_number, frame2_column_number) for col_index in range(frame1_column_number): # check summary for each column for key_val in list(frame1_summary[col_index]): if not(compareNames) and (str(key_val) == 'label'): continue if not(compareTypes) and (str(key_val) == 'type'): continue if str(key_val) == 'precision': # skip comparing precision continue val1 = frame1_summary[col_index][key_val] val2 = frame2_summary[col_index][key_val] if isinstance(val1, list) or isinstance(val1, dict): if isinstance(val1, dict): assert val1 == val2, "failed column summary comparison for column {0} and summary " \ "type {1}, frame 1 value is {2}, frame 2 value is " \ "{3}".format(col_index, str(key_val), val1, val2) else: if len(val1) > 0: # find if elements are float float_found = False for ind in range(len(val1)): if isinstance(val1[ind], float): float_found = True break if float_found: for ind in range(len(val1)): if not(str(val1[ind] == 'NaN')): assert abs(val1[ind]-val2[ind]) < 1e-5, "failed column summary comparison for " \ "column {0} and summary type {1}, frame 1" \ " value is {2}, frame 2 value is " \ "{3}".format(col_index, str(key_val), val1[ind], val2[ind]) else: assert val1 == val2, "failed column summary comparison for column {0} and summary" \ " type {1}, frame 1 value is {2}, frame 2 value is " \ "{3}".format(col_index, str(key_val), val1, val2) else: if isinstance(val1, float): assert abs(val1-val2) < 1e-5, "failed column summary comparison for column {0} and summary type " \ "{1}, frame 1 value is {2}, frame 2 value is " \ "{3}".format(col_index, str(key_val), val1, val2) else: assert val1 == val2, "failed column summary comparison for column {0} and summary type " \ "{1}, frame 1 value is {2}, frame 2 value is " \ "{3}".format(col_index, str(key_val), val1, val2) def cannaryHDFSTest(hdfs_name_node, file_name): """ This function is written to detect if the hive-exec version is too old. It will return True if it is too old and false otherwise. :param hdfs_name_node: :param file_name: :return: """ url_orc = "hdfs://{0}{1}".format(hdfs_name_node, file_name) try: tempFrame = h2o.import_file(url_orc) h2o.remove(tempFrame) print("Your hive-exec version is good. Parsing success for {0}.".format(url_orc)) return False except Exception as e: print("Error exception is {0}".format(str(e))) if "NoSuchFieldError: vector" in str(e): return True else: # exception is caused by other reasons. return False def extract_scoring_history_field(aModel, fieldOfInterest, takeFirst=False): """ Given a fieldOfInterest that are found in the model scoring history, this function will extract the list of field values for you from the model. :param aModel: H2O model where you want to extract a list of fields from the scoring history :param fieldOfInterest: string representing a field of interest. :return: List of field values or None if it cannot be found """ return extract_from_twoDimTable(aModel._model_json["output"]["scoring_history"], fieldOfInterest, takeFirst) def extract_from_twoDimTable(metricOfInterest, fieldOfInterest, takeFirst=False): """ Given a fieldOfInterest that are found in the model scoring history, this function will extract the list of field values for you from the model. :param aModel: H2O model where you want to extract a list of fields from the scoring history :param fieldOfInterest: string representing a field of interest. :return: List of field values or None if it cannot be found """ allFields = metricOfInterest._col_header if fieldOfInterest in allFields: cellValues = [] fieldIndex = allFields.index(fieldOfInterest) for eachCell in metricOfInterest.cell_values: cellValues.append(eachCell[fieldIndex]) if takeFirst: # only grab the result from the first iteration. break return cellValues else: return None def model_run_time_sorted_by_time(model_list): """ This function is written to sort the metrics that we care in the order of when the model was built. The oldest model metric will be the first element. :param model_list: list of models built sequentially that contains metric of interest among other fields :return: model run time in secs sorted by order of building """ model_num = len(model_list) model_runtime_sec_list = [None] * model_num for index in range(model_num): model_index = int(model_list[index]._id.split('_')[-1]) model_runtime_sec_list[model_index] = \ (model_list[index]._model_json["output"]["run_time"]/1000.0) return model_runtime_sec_list def model_seed_sorted_by_time(model_list): """ This function is written to find the seed used by each model in the order of when the model was built. The oldest model metric will be the first element. :param model_list: list of models built sequentially that contains metric of interest among other fields :return: model seed sorted by order of building """ model_num = len(model_list) model_seed_list = [None] * model_num for index in range(model_num): model_index = int(model_list[index]._id.split('_')[-1]) for pIndex in range(len(model_list.models[0]._model_json["parameters"])): if model_list.models[index]._model_json["parameters"][pIndex]["name"]=="seed": model_seed_list[model_index]=model_list.models[index]._model_json["parameters"][pIndex]["actual_value"] break return model_seed_list def check_ignore_cols_automl(models,names,x,y): models = sum(models.as_data_frame().values.tolist(),[]) for model in models: if "StackedEnsemble" in model: continue else: assert set(h2o.get_model(model).params["ignored_columns"]["actual"]) == set(names) - {y} - set(x), \ "ignored columns are not honored for model " + model def compare_numeric_frames(f1, f2, prob=0.5, tol=1e-6): assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes." temp1 = f1.asnumeric() temp2 = f2.asnumeric() for colInd in range(f1.ncol): for rowInd in range(f2.nrow): if (random.uniform(0,1) < prob): if (math.isnan(temp1[rowInd, colInd])): assert math.isnan(temp2[rowInd, colInd]), "Failed frame values check at row {2} and column {3}! " \ "frame1 value: {0}, frame2 value: " \ "{1}".format(temp1[rowInd, colInd], temp2[rowInd, colInd], rowInd, colInd) else: diff = abs(temp1[rowInd, colInd]-temp2[rowInd, colInd])/max(1.0, abs(temp1[rowInd, colInd]), abs(temp2[rowInd, colInd])) assert diff<=tol, "Failed frame values check at row {2} and column {3}! frame1 value: {0}, frame2 value: " \ "{1}".format(temp1[rowInd, colInd], temp2[rowInd, colInd], rowInd, colInd) def check_sorted_2_columns(frame1, sorted_column_indices, prob=0.5, ascending=[True, True]): for colInd in sorted_column_indices: for rowInd in range(0, frame1.nrow-1): if (random.uniform(0.0,1.0) < prob): if colInd == sorted_column_indices[0]: if not(math.isnan(frame1[rowInd, colInd])) and not(math.isnan(frame1[rowInd+1,colInd])): if ascending[colInd]: assert frame1[rowInd,colInd] <= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \ "row {2}: {3}".format(rowInd, frame1[rowInd,colInd], rowInd+1, frame1[rowInd+1,colInd]) else: assert frame1[rowInd,colInd] >= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \ "row {2}: {3}".format(rowInd, frame1[rowInd,colInd], rowInd+1, frame1[rowInd+1,colInd]) else: # for second column if not(math.isnan(frame1[rowInd, sorted_column_indices[0]])) and not(math.isnan(frame1[rowInd+1,sorted_column_indices[0]])): if (frame1[rowInd,sorted_column_indices[0]]==frame1[rowInd+1, sorted_column_indices[0]]): # meaningful to compare row entries then if not(math.isnan(frame1[rowInd, colInd])) and not(math.isnan(frame1[rowInd+1,colInd])): if ascending[colInd]: assert frame1[rowInd,colInd] <= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \ "row {2}: {3}".format(rowInd, frame1[rowInd,colInd], rowInd+1, frame1[rowInd+1,colInd]) else: assert frame1[rowInd,colInd] >= frame1[rowInd+1,colInd], "Wrong sort order: value at row {0}: {1}, value at " \ "row {2}: {3}".format(rowInd, frame1[rowInd,colInd], rowInd+1, frame1[rowInd+1,colInd]) def assert_correct_frame_operation(sourceFrame, h2oResultFrame, operString): """ This method checks each element of a numeric H2OFrame and throw an assert error if its value does not equal to the same operation carried out by python. :param sourceFrame: original H2OFrame. :param h2oResultFrame: H2OFrame after operation on original H2OFrame is carried out. :param operString: str representing one of 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh', 'ceil', 'cos', 'cosh', 'cospi', 'cumprod', 'cumsum', 'digamma', 'exp', 'expm1', 'floor', 'round', 'sin', 'sign', 'round', 'sinh', 'tan', 'tanh' :return: None. """ validStrings = ['acos', 'acosh', 'asin', 'asinh', 'atan', 'atanh', 'ceil', 'cos', 'cosh', 'exp', 'floor', 'gamma', 'lgamma', 'log', 'log10', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trigamma', 'expm1'] npValidStrings = ['log2', 'sign'] nativeStrings = ['round', 'abs', 'cumsum'] multpi = ['cospi', 'sinpi', 'tanpi'] others = ['log1p', 'signif', 'trigamma', 'digamma', 'cumprod'] # check for valid operString assert operString in validStrings+npValidStrings+nativeStrings+multpi+others, "Illegal operator " \ "{0} specified.".format(operString) result_comp = lambda x:x # default method if operString == "log1p": result_comp = lambda x:math.log(x+1) elif operString == 'signif': result_comp = lambda x:round(x, 7) elif operString == 'trigamma': result_comp = lambda x:scipy.special.polygamma(1, x) elif operString == 'digamma': result_comp = lambda x:scipy.special.polygamma(0, x) elif operString=='cumprod': result_comp = lambda x:factorial(x) # stringOperations = 'result_val = factorial(sourceFrame[row_ind, col_ind])' elif operString in validStrings: result_comp = lambda x:getattr(math, operString)(x) elif operString in nativeStrings: result_comp =lambda x:__builtins__.get(operString)(x) stringOperations = 'result_val = '+operString+'(sourceFrame[row_ind, col_ind])' elif operString in npValidStrings: result_comp = lambda x:getattr(np, operString)(x) # stringOperations = 'result_val = np.'+operString+'(sourceFrame[row_ind, col_ind])' elif operString in multpi: result_comp = lambda x:getattr(math, operString.split('p')[0])(x*math.pi) #stringOperations = 'result_val = math.'+operString.split('p')[0]+'(sourceFrame[row_ind, col_ind]*math.pi)' for col_ind in range(sourceFrame.ncols): for row_ind in range(sourceFrame.nrows): result_val = result_comp(sourceFrame[row_ind, col_ind]) assert abs(h2oResultFrame[row_ind, col_ind]-result_val) <= 1e-6, \ " command {0}({3}) is not working. Expected: {1}. Received: {2}".format(operString, result_val, h2oResultFrame[row_ind, col_ind], sourceFrame[row_ind, col_ind]) def factorial(n): """ Defined my own factorial just in case using python2.5 or less. :param n: :return: """ if n>0 and n<2: return 1 if n>=2: return n*factorial(n-1) def cumop(items, op, colInd=0): # take in one column only res = [None]*len(items) for index in range(len(items)): res[index] = op(res[index-1], items[index, colInd]) if index > 0 else items[index, colInd] return res def compare_frames_local(f1, f2, prob=0.5, tol=1e-6): temp1 = f1.as_data_frame(use_pandas=False) temp2 = f2.as_data_frame(use_pandas=False) assert (f1.nrow==f2.nrow) and (f1.ncol==f2.ncol), "The two frames are of different sizes." for colInd in range(f1.ncol): for rowInd in range(1,f2.nrow): if (random.uniform(0,1) < prob): if (math.isnan(float(temp1[rowInd][colInd]))): assert math.isnan(float(temp2[rowInd][colInd])), "Failed frame values check at row {2} and column {3}! " \ "frame1 value: {0}, frame2 value: " \ "{1}".format(temp1[rowInd][colInd], temp2[rowInd][colInd], rowInd, colInd) else: v1 = float(temp1[rowInd][colInd]) v2 = float(temp2[rowInd][colInd]) diff = abs(v1-v2)/max(1.0, abs(v1), abs(v2)) assert diff<=tol, "Failed frame values check at row {2} and column {3}! frame1 value: {0}, frame2 value: " \ "{1}".format(v1, v2, rowInd, colInd) def build_save_model_GLM(params, x, train, respName): # build a model model = H2OGeneralizedLinearEstimator(**params) model.train(x=x, y=respName, training_frame=train) # save model regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]") MOJONAME = regex.sub("_", model._id) print("Downloading Java prediction model code from H2O") TMPDIR = os.path.normpath(os.path.join(os.path.dirname(os.path.realpath('__file__')), "..", "results", MOJONAME)) os.makedirs(TMPDIR) model.download_mojo(path=TMPDIR) # save mojo return model # generate random dataset, copied from Pasha def random_dataset(response_type, verbose=True, NTESTROWS=200): """Create and return a random dataset.""" if verbose: print("\nCreating a dataset for a %s problem:" % response_type) fractions = {k + "_fraction": random.random() for k in "real categorical integer time string binary".split()} fractions["string_fraction"] = 0 # Right now we are dropping string columns, so no point in having them. fractions["binary_fraction"] /= 3 fractions["time_fraction"] /= 2 sum_fractions = sum(fractions.values()) for k in fractions: fractions[k] /= sum_fractions if response_type == 'binomial': response_factors = 2 else: response_factors = random.randint(3, 10) df = h2o.create_frame(rows=random.randint(15000, 25000) + NTESTROWS, cols=random.randint(3, 20), missing_fraction=0, has_response=True, response_factors=response_factors, positive_response=True, factors=10, **fractions) if verbose: print() df.show() return df def getMojoName(modelID): regex = re.compile("[+\\-* !@#$%^&()={}\\[\\]|;:'\"<>,.?/]") return regex.sub("_", modelID)
spennihana/h2o-3
h2o-py/tests/pyunit_utils/utilsPY.py
Python
apache-2.0
175,962
[ "Gaussian" ]
b8b048f55d6dc79f6cec3b66f604c7c02d325aec1d59b4529495c509fb44dc37
#!/usr/bin/env python2 # -*- coding: utf-8 -*- """ Auxiliary Functions and resources to COSMETICS00 Created on Wed Dec 06 18:04:00 2018 :author: Ruyman Azzollini """ # IMPORT STUFF from pdb import set_trace as stop import numpy as np import os from collections import OrderedDict import string as st import copy from matplotlib import cm from vison.datamodel import cdp from vison.flat import BF01aux from vison.plot import figclasses from vison.plot import trends #from MOT_FF import extract_overscan_profiles # END IMPORT def gt_meta_MASK_dict(test, masktype): ntest = test.replace( '_', '\_') return dict( figname='%s_MASK_2Dimgshow_%s.png' % (test, masktype), caption='%s: Masks CCDs [%s]. Smoothed with gaussian kernel to highlight details.' % (ntest, masktype), meta=dict( doLegend=False, doColorbar=False, suptitle='%s/%s:Quadrant Images.' % (ntest, masktype), corekwargs=dict( cmap=cm.gray, aspect='auto', norm=None, origin='lower left'))) def get_COS_figs(): """ """ COS_figs = OrderedDict() COS_figs['Masks_DARK'] = [figclasses.Fig_BeamImgShow, gt_meta_MASK_dict('COSMETICS00', 'DARK')] COS_figs['Masks_FLAT'] = [figclasses.Fig_BeamImgShow, gt_meta_MASK_dict('COSMETICS00', 'FLAT')] COS_figs['Masks_MERGE'] = [ figclasses.Fig_BeamImgShow, gt_meta_MASK_dict( 'COSMETICS00', 'MERGE')] COS_figs['BlueScreen'] = [figclasses.BlueScreen, dict()] return COS_figs def get_CDP_lib(): CDP_lib = OrderedDict() def_tb_cdp = cdp.Tables_CDP() def_tb_cdp.rootname = 'COSMETICS00_DEFECTS_TB' CDP_lib['DEF_TB'] = def_tb_cdp return CDP_lib
ruymanengithub/vison
vison/other/COSaux.py
Python
gpl-3.0
1,709
[ "Gaussian" ]
0d4e1bd298acb4b606141710a30dfd90012885caf35627a71aef01edd81dabe8
#!/usr/bin/python import numpy as np BOLTZCONST = 8.617e-5 #eV/K def read_doscar(input_): """ Reads in a doscar file to grab the density of states as a function of energy. The argument input_ is assumed to be a file object. """ n_atoms = input_.readline().split()[0] # Discard header information for i in range(4): input_.readline() # Read in Fermi Energy line = input_.readline().split() e_max = float(line[0]); e_min = float(line[1]); e_fermi = float(line[3]) energy = []; n_tot = []; n_spin = [] for line in input_: line = line.split() energy.append(float(line[0])) if len(line) == 3: n_tot.append(float(line[1])) # DOS includes spin up and down n_spin.append(0.0) elif len(line) == 5: n_tot.append(float(line[1])+float(line[2])) n_spin.append(float(line[1])-float(line[2])) energy = np.array(energy) n_tot = np.array(n_tot); n_spin = np.array(n_spin) energy = energy - e_fermi return e_fermi,energy,n_tot,n_spin def get_bandgap(energy,dos): """ Finds the band gap of a DOS around the fermi energy. """ step_size = energy[1] - energy[0] i = 0 not_found = True while not_found: if energy[i] < 0 and dos[i] > 1e-3: bot = energy[i] elif energy[i] > 0 and dos[i] > 1e-3: top = energy[i] not_found = False i += 1 if top - bot < 2*step_size: top = bot = 0 return bot,top def integrate_dos(energy,dos,weight,start,end,args=None): """ Takes numpy arrays containing the energy and dos and integrates them over the range [start,end] with the weighting function weight. Weight should take as an argument the integrated energy and a list of other arguements args. """ from scipy.interpolate import UnivariateSpline from scipy.integrate import quad dos = UnivariateSpline(energy,dos,s=0) #def integrand(x,dos,weight,args): # return dos(x)*weight(x,args) #result = quad(integrand,start,end,args=(dos,weight,args)) result = quad(dos,start,end,limit=100) return result def sum_dos(energy,dos,weight,start,end,args=None): """ Sums the weighted density of states """ condition = (energy - start == 0) or (energy - end == 0) cond_arr = np.extract(condition,energy) e_start = cond_arr[0]; e_end = cond_arr[1] sum = 0. for i in range(e_start,e_end): sum += dos[i]*weight(energy[i],args) return sum def fermi_dirac_dist(x,args): """ Calculates the Fermi-Dirac distribution for an energy x, temperature args[0], and electron chemical potential args[1]. """ T = args[0]; mu = args[1] return 1./(np.exp((x-mu)/BOLTZCONST*T)+1.) def calc_np(argv): doscar = open(str(argv[0])) if len(argv) > 1: T = float(argv[1]) else: T = 300. e_fermi,energy,n_tot,n_spin = read_doscar(doscar) vbm,cbm = get_bandgap(energy,n_tot) start = energy[0] end = energy[len(energy)-1] #n = integrate_dos(energy,n_tot,fermi_dirac_dist,cbm,end,args=(T,e_fermi)) #p = integrate_dos(energy,n_tot,fermi_dirac_dist,start,vbm,args=(T,e_fermi)) #p = integrate_dos(energy,n_tot,fermi_dirac_dist,start,end,args=(T,e_fermi)) p = sum_dos(energy,n_tot,fermi_dirac_dist,start,end,args=(T,e_fermi)) return p def test_integrand(argv): doscar = open(str(argv[0])) if len(argv) > 1: T = float(argv[1]) else: T = 300. e_fermi,energy,n_tot,n_spin = read_doscar(doscar) vbm,cbm = get_bandgap(energy,n_tot) start = energy[0] end = energy[len(energy)-1] print dos fermi_dirac_dist() def main(argv): import matplotlib.pyplot as plt doscar = open(str(argv[0])) e_fermi,energy,n_tot,n_spin = read_doscar(doscar) plt.plot(energy,n_tot) if len(argv) > 1: doscar2 = open(str(argv[1])) e_fermi2,energy2,n_tot2,n_spin2 = read_doscar(doscar2) plt.plot(energy2,n_tot2) plt.show() if __name__ == "__main__": import sys n,p = calc_np(sys.argv[1:]) print n,p
jeffwdoak/free_energies
free_energies/read_doscar.py
Python
mit
4,141
[ "DIRAC" ]
574b00ffc2c2d7a0c6e591db015d5d515cf154c628e14d9f69a27abcde2e699e
""" Storage Factory Class - creates instances of various Storage plugins from the Core DIRAC or extensions This Class has three public methods: getStorageName(): Resolves links in the CS to the target SE name. getStorage(): This creates a single storage stub based on the parameters passed in a dictionary. This dictionary must have the following keys: 'StorageName','PluginName','Protocol' Other optional keys are 'Port','Host','Path','SpaceToken' getStorages() This takes a DIRAC SE definition and creates storage stubs for the protocols found in the CS. By providing an optional list of protocols it is possible to limit the created stubs. """ __RCSID__ = "$Id$" from DIRAC import gLogger, S_OK, S_ERROR, gConfig from DIRAC.ConfigurationSystem.Client.Helpers import getInstalledExtensions from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus from DIRAC.ConfigurationSystem.Client.Helpers.Resources import Resources, getSiteForResource from DIRAC.ConfigurationSystem.Client.Helpers.Path import cfgPath from DIRAC.Core.Utilities.ObjectLoader import ObjectLoader from DIRAC.Core.Utilities.List import sortList import os class StorageFactory: def __init__( self, useProxy = False, vo = None ): self.proxy = False self.proxy = useProxy self.resourceStatus = ResourceStatus() self.resourcesHelper = Resources( vo = vo ) self.vo = vo ########################################################################################### # # Below are public methods for obtaining storage objects # def getStorageName( self, initialName ): return self._getConfigStorageName( initialName ) def getStorage( self, parameterDict ): """ This instantiates a single storage for the details provided and doesn't check the CS. """ # The storage name must be supplied. if parameterDict.has_key( 'StorageName' ): storageName = parameterDict['StorageName'] else: errStr = "StorageFactory.getStorage: StorageName must be supplied" gLogger.error( errStr ) return S_ERROR( errStr ) # PluginName must be supplied otherwise nothing with work. if parameterDict.has_key( 'PluginName' ): pluginName = parameterDict['PluginName'] # Temporary fix for backward compatibility elif parameterDict.has_key( 'ProtocolName' ): pluginName = parameterDict['ProtocolName'] else: errStr = "StorageFactory.getStorage: PluginName must be supplied" gLogger.error( errStr ) return S_ERROR( errStr ) return self.__generateStorageObject( storageName, pluginName, parameterDict ) def getStorages( self, storageName, pluginList = [] ): """ Get an instance of a Storage based on the DIRAC SE name based on the CS entries CS 'storageName' is the DIRAC SE name i.e. 'CERN-RAW' 'pluginList' is an optional list of protocols if a sub-set is desired i.e ['SRM2','SRM1'] """ self.remotePlugins = [] self.localPlugins = [] self.name = '' self.options = {} self.protocolDetails = [] self.storages = [] if not self.vo: return S_ERROR( 'Mandatory vo parameter is not defined' ) # Get the name of the storage provided res = self._getConfigStorageName( storageName ) if not res['OK']: return res storageName = res['Value'] self.name = storageName # Get the options defined in the CS for this storage res = self._getConfigStorageOptions( storageName ) if not res['OK']: return res self.options = res['Value'] # Get the protocol specific details res = self._getConfigStorageProtocols( storageName ) if not res['OK']: return res self.protocolDetails = res['Value'] requestedLocalPlugins = [] requestedRemotePlugins = [] requestedProtocolDetails = [] turlProtocols = [] # Generate the protocol specific plug-ins self.storages = [] for protocolDict in self.protocolDetails: pluginName = protocolDict.get( 'PluginName' ) if pluginList and pluginName not in pluginList: continue protocol = protocolDict['Protocol'] result = self.__generateStorageObject( storageName, pluginName, protocolDict ) if result['OK']: self.storages.append( result['Value'] ) if pluginName in self.localPlugins: turlProtocols.append( protocol ) requestedLocalPlugins.append( pluginName ) if pluginName in self.remotePlugins: requestedRemotePlugins.append( pluginName ) requestedProtocolDetails.append( protocolDict ) else: gLogger.info( result['Message'] ) if len( self.storages ) > 0: resDict = {} resDict['StorageName'] = self.name resDict['StorageOptions'] = self.options resDict['StorageObjects'] = self.storages resDict['LocalPlugins'] = requestedLocalPlugins resDict['RemotePlugins'] = requestedRemotePlugins resDict['ProtocolOptions'] = requestedProtocolDetails resDict['TurlProtocols'] = turlProtocols return S_OK( resDict ) else: errStr = "StorageFactory.getStorages: Failed to instantiate any storage protocols." gLogger.error( errStr, self.name ) return S_ERROR( errStr ) ########################################################################################### # # Below are internal methods for obtaining section/option/value configuration # def _getConfigStorageName( self, storageName ): """ This gets the name of the storage the configuration service. If the storage is an alias for another the resolution is performed. 'storageName' is the storage section to check in the CS """ result = self.resourcesHelper.getStorageElementOptionsDict( storageName ) if not result['OK']: errStr = "StorageFactory._getConfigStorageName: Failed to get storage options" gLogger.error( errStr, result['Message'] ) return S_ERROR( errStr ) if not result['Value']: errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist." gLogger.error( errStr, storageName ) return S_ERROR( errStr ) if 'Alias' in result['Value']: #FIXME This cannot work as self.rootConfigPath is undefined configPath = '%s/%s/Alias' % ( self.rootConfigPath, storageName ) aliasName = gConfig.getValue( configPath ) result = self._getConfigStorageName( aliasName ) if not result['OK']: errStr = "StorageFactory._getConfigStorageName: Supplied storage doesn't exist." gLogger.error( errStr, configPath ) return S_ERROR( errStr ) resolvedName = result['Value'] else: resolvedName = storageName return S_OK( resolvedName ) def _getConfigStorageOptions( self, storageName ): """ Get the options associated to the StorageElement as defined in the CS """ result = self.resourcesHelper.getStorageElementOptionsDict( storageName ) if not result['OK']: errStr = "StorageFactory._getStorageOptions: Failed to get storage options." gLogger.error( errStr, "%s: %s" % ( storageName, result['Message'] ) ) return S_ERROR( errStr ) optionsDict = result['Value'] result = self.resourceStatus.getStorageStatus( storageName, 'ReadAccess' ) if not result[ 'OK' ]: errStr = "StorageFactory._getStorageOptions: Failed to get storage status" gLogger.error( errStr, "%s: %s" % ( storageName, result['Message'] ) ) return S_ERROR( errStr ) #optionsDict.update( result[ 'Value' ][ storageName ] ) return S_OK( optionsDict ) def _getConfigStorageProtocols( self, storageName ): """ Protocol specific information is present as sections in the Storage configuration """ result = getSiteForResource( storageName ) if not result['OK']: return result site = result['Value'] result = self.resourcesHelper.getEligibleNodes( 'AccessProtocol', {'Site': site, 'Resource': storageName } ) if not result['OK']: return result nodesList = result['Value'] protocols = [] for node in nodesList: protocols.append( node ) protocolDetails = [] for protocol in protocols: result = self._getConfigStorageProtocolDetails( protocol ) if not result['OK']: return result protocolDetails.append( result['Value'] ) self.protocols = self.localProtocols + self.remoteProtocols return S_OK( protocolDetails ) def _getConfigStorageProtocolDetails( self, protocol ): """ Parse the contents of the protocol block """ result = self.resourcesHelper.getAccessProtocolOptionsDict( protocol ) if not result['OK']: return result optionsDict = result['Value'] # We must have certain values internally even if not supplied in CS protocolDict = {'Access':'', 'Host':'', 'Path':'', 'Port':'', 'Protocol':'', 'ProtocolName':'', 'SpaceToken':'', 'WSUrl':''} for option in optionsDict: protocolDict[option] = optionsDict[option] # Now update the local and remote protocol lists. # A warning will be given if the Access option is not set. if protocolDict['Access'].lower() == 'remote': self.remotePlugins.append( protocolDict['PluginName'] ) elif protocolDict['Access'].lower() == 'local': self.localPlugins.append( protocolDict['PluginName'] ) else: errStr = "StorageFactory.__getProtocolDetails: The 'Access' option for %s:%s is neither 'local' or 'remote'." % ( storageName, protocolSection ) gLogger.warn( errStr ) # The PluginName option must be defined if not protocolDict['PluginName']: errStr = "StorageFactory.__getProtocolDetails: 'PluginName' option is not defined." gLogger.error( errStr, "%s: %s" % ( storageName, protocolSection ) ) return S_ERROR( errStr ) return S_OK( protocolDict ) ########################################################################################### # # Below is the method for obtaining the object instantiated for a provided storage configuration # def __generateStorageObject( self, storageName, pluginName, parameters ): storageType = pluginName if self.proxy: storageType = 'Proxy' objectLoader = ObjectLoader() result = objectLoader.loadObject( 'Resources.Storage.%sStorage' % storageType, storageType + 'Storage' ) if not result['OK']: gLogger.error( 'Failed to load storage object: %s' % result['Message'] ) return result storageClass = result['Value'] try: storage = storageClass( storageName, parameters ) except Exception, x: errStr = "StorageFactory._generateStorageObject: Failed to instantiate %s: %s" % ( storageName, x ) gLogger.exception( errStr ) return S_ERROR( errStr ) return S_OK( storage )
Sbalbp/DIRAC
Resources/Storage/StorageFactory.py
Python
gpl-3.0
11,003
[ "DIRAC" ]
f9c657d875c1922a4d1ae8b858971f3fd029f62a8b79b11213a5bb357be7128a
#pylint: disable=no-init,invalid-name from __future__ import (absolute_import, division, print_function) from mantid.api import * from mantid.kernel import * from mantid.simpleapi import * import os from time import strftime from mantid.kernel import Direction COMPRESS_TOL_TOF = .01 EXTENSIONS_NXS = ["_event.nxs", ".nxs.h5"] def getBasename(filename): name = os.path.split(filename)[-1] for extension in EXTENSIONS_NXS: name = name.replace(extension, '') return name #pylint: disable=too-many-instance-attributes class CalibrateRectangularDetectors(PythonAlgorithm): _filterBadPulses = None _xpixelbin = None _ypixelbin = None _grouping = None _smoothoffsets = None _smoothGroups = None _peakpos = None _peakpos1 = None _peakmin = None _peakmax = None _peakpos2 = None _peakmin2 = None _peakmax2 = None _peakpos3 = None _peakmin3 = None _peakmax3 = None _lastpixel = None _lastpixel2 = None _lastpixel3 = None _ccnumber = None _maxoffset = None _diffractionfocus = None _outDir = None _outTypes = None _binning = None def category(self): return "Diffraction\\Calibration" def seeAlso(self): return [ "GetDetectorOffsets" ] def name(self): return "CalibrateRectangularDetectors" def summary(self): return "Calibrate the detector pixels and write a calibration file" def PyInit(self): self.declareProperty(MultipleFileProperty(name="RunNumber", extensions=EXTENSIONS_NXS), "Event file") validator = IntArrayBoundedValidator() validator.setLower(0) self.declareProperty(IntArrayProperty("Background", values=[0], direction=Direction.Input, validator=validator)) self.declareProperty("XPixelSum", 1, "Sum detector pixels in X direction. Must be a factor of X total pixels. Default is 1.") self.declareProperty("YPixelSum", 1, "Sum detector pixels in Y direction. Must be a factor of Y total pixels. Default is 1.") self.declareProperty("SmoothSummedOffsets", False, "If the data was summed for calibration, smooth the resulting offsets workspace.") self.declareProperty("SmoothGroups", "", "Comma delimited number of points for smoothing pixels in each group. Default is no Smoothing.") self.declareProperty("UnwrapRef", 0., "Reference total flight path for frame unwrapping. Zero skips the correction") self.declareProperty("LowResRef", 0., "Reference DIFC for resolution removal. Zero skips the correction") self.declareProperty("MaxOffset", 1.0, "Maximum absolute value of offsets; default is 1") self.declareProperty("CrossCorrelation", True, "CrossCorrelation if True; minimize using many peaks if False.") validator = FloatArrayBoundedValidator() validator.setLower(0.) self.declareProperty(FloatArrayProperty("PeakPositions", []), "Comma delimited d-space positions of reference peaks. Use 1-3 for Cross Correlation. "+ "Unlimited for many peaks option.") self.declareProperty("PeakWindowMax", 0., "Maximum window around a peak to search for it. Optional.") self.declareProperty(ITableWorkspaceProperty("FitwindowTableWorkspace", "", Direction.Input, PropertyMode.Optional), "Name of input table workspace containing the fit window information for each spectrum. ") self.declareProperty("MinimumPeakHeight", 2., "Minimum value allowed for peak height") self.declareProperty("MinimumPeakHeightObs", 0., "Minimum value of a peak's maximum observed Y value for this peak to be used to calculate offset.") self.declareProperty(MatrixWorkspaceProperty("DetectorResolutionWorkspace", "", Direction.Input, PropertyMode.Optional), "Name of optional input matrix workspace for each detector's resolution (D(d)/d).") self.declareProperty(FloatArrayProperty("AllowedResRange", [0.25, 4.0], direction=Direction.Input), "Range of allowed individual peak's resolution factor to input detector's resolution.") self.declareProperty("PeakFunction", "Gaussian", StringListValidator(["BackToBackExponential", "Gaussian", "Lorentzian"]), "Type of peak to fit. Used only with CrossCorrelation=False") self.declareProperty("BackgroundType", "Flat", StringListValidator(['Flat', 'Linear', 'Quadratic']), "Used only with CrossCorrelation=False") self.declareProperty(IntArrayProperty("DetectorsPeaks", []), "Comma delimited numbers of detector banks for each peak if using 2-3 peaks for Cross Correlation. "+ "Default is all.") self.declareProperty("PeakHalfWidth", 0.05, "Half width of d-space around peaks for Cross Correlation. Default is 0.05") self.declareProperty("CrossCorrelationPoints", 100, "Number of points to find peak from Cross Correlation. Default is 100") self.declareProperty(FloatArrayProperty("Binning", [0.,0.,0.]), "Min, Step, and Max of d-space bins. Logarithmic binning is used if Step is negative.") self.declareProperty("DiffractionFocusWorkspace", False, "Diffraction focus by detectors. Default is False") grouping = ["All", "Group", "Column", "bank"] self.declareProperty("GroupDetectorsBy", "All", StringListValidator(grouping), "Detector groups to use for future focussing: All detectors as one group, "+ "Groups (East,West for SNAP), Columns for SNAP, detector banks") self.declareProperty("FilterBadPulses", True, "Filter out events measured while proton charge is more than 5% below average") self.declareProperty("FilterByTimeMin", 0., "Relative time to start filtering by in seconds. Applies only to sample.") self.declareProperty("FilterByTimeMax", 0., "Relative time to stop filtering by in seconds. Applies only to sample.") outfiletypes = ['dspacemap', 'calibration', 'dspacemap and calibration'] self.declareProperty("SaveAs", "calibration", StringListValidator(outfiletypes)) self.declareProperty(FileProperty("OutputDirectory", "", FileAction.Directory)) self.declareProperty("OutputFilename", "", Direction.Output) return def validateInputs(self): """ Validate inputs :return: """ messages = {} detectors = self.getProperty("DetectorsPeaks").value if self.getProperty("CrossCorrelation").value: positions = self.getProperty("PeakPositions").value if len(detectors) <= 1: if len(positions) != 1: messages["PeakPositions"] = "Can only have one cross correlation peak without " \ "specifying 'DetectorsPeaks'" else: if len(detectors) != len(positions): messages["PeakPositions"] = "Must be the same length as 'DetectorsPeaks' (%d != %d)" \ % (len(positions), len(detectors)) messages["DetectorsPeaks"] = "Must be the same length as 'PeakPositions' or empty" elif len(detectors) > 3: messages["DetectorsPeaks"] = "Up to 3 peaks are supported" elif bool(detectors): messages["DetectorsPeaks"] = "Only allowed for CrossCorrelation=True" return messages def _loadData(self, filename, filterWall=None): if filename is None or len(filename) <= 0: return None kwargs = {"Precount":False} if filterWall is not None: if filterWall[0] > 0.: kwargs["FilterByTimeStart"] = filterWall[0] if filterWall[1] > 0.: kwargs["FilterByTimeStop"] = filterWall[1] wkspName = getBasename(filename) LoadEventNexus(Filename=filename, OutputWorkspace=wkspName, **kwargs) FilterBadPulses(InputWorkspace=wkspName, OutputWorkspace=wkspName) CompressEvents(InputWorkspace=wkspName, OutputWorkspace=wkspName, Tolerance=COMPRESS_TOL_TOF) # 100ns return wkspName def _saveCalibration(self, wkspName, calibFilePrefix): outfilename = None if "dspacemap" in self._outTypes: outfilename = calibFilePrefix.replace('_d', '_dspacemap_d') + '.dat' if os.path.exists(outfilename): os.unlink(outfilename) #write Dspacemap file SaveDspacemap(InputWorkspace=wkspName+"offset", DspacemapFile=outfilename) if "calibration" in self._outTypes: # for the sake of legacy SaveCalFile(OffsetsWorkspace=wkspName+"offset", GroupingWorkspace=wkspName+"group", MaskWorkspace=wkspName+"mask",Filename=calibFilePrefix + '.cal') # the real version outfilename = calibFilePrefix + '.h5' if os.path.exists(outfilename): os.unlink(outfilename) ConvertDiffCal(OffsetsWorkspace=wkspName+"offset", OutputWorkspace=wkspName+"cal") SaveDiffCal(CalibrationWorkspace=wkspName+"cal", GroupingWorkspace=wkspName+"group", MaskWorkspace=wkspName+"mask", Filename=outfilename) if outfilename is not None: self.setProperty("OutputFilename", outfilename) def _createGrouping(self, wkspName): (_, numGroupedSpectra, numGroups) = CreateGroupingWorkspace(InputWorkspace=wkspName, GroupDetectorsBy=self._grouping, OutputWorkspace=wkspName+"group") if (numGroupedSpectra==0) or (numGroups==0): raise RuntimeError("%d spectra will be in %d groups" % (numGroupedSpectra, numGroups)) #pylint: disable=too-many-branches def _cccalibrate(self, wksp): if wksp is None: return None # Bin events in d-Spacing Rebin(InputWorkspace=wksp, OutputWorkspace=wksp, Params=str(self._peakmin)+","+str(abs(self._binning[1]))+","+str(self._peakmax)) #Find good peak for reference ymax = 0 for s in range(0,mtd[wksp].getNumberHistograms()): y_s = mtd[wksp].readY(s) midBin = int(mtd[wksp].blocksize()/2) if y_s[midBin] > ymax: refpixel = s ymax = y_s[midBin] self.log().information("Reference spectra=%s" % refpixel) # Cross correlate spectra using interval around peak at peakpos (d-Spacing) if self._lastpixel == 0: self._lastpixel = mtd[wksp].getNumberHistograms()-1 else: self._lastpixel = int(mtd[wksp].getNumberHistograms()*self._lastpixel/self._lastpixel3) - 1 self.log().information("Last pixel=%s" % self._lastpixel) CrossCorrelate(InputWorkspace=wksp, OutputWorkspace=wksp+"cc", ReferenceSpectra=refpixel, WorkspaceIndexMin=0, WorkspaceIndexMax=self._lastpixel, XMin=self._peakmin, XMax=self._peakmax) # Get offsets for pixels using interval around cross correlations center and peak at peakpos (d-Spacing) GetDetectorOffsets(InputWorkspace=wksp+"cc", OutputWorkspace=wksp+"offset", Step=abs(self._binning[1]), DReference=self._peakpos1, XMin=-self._ccnumber, XMax=self._ccnumber, MaxOffset=self._maxoffset, MaskWorkspace=wksp+"mask") if AnalysisDataService.doesExist(wksp+"cc"): AnalysisDataService.remove(wksp+"cc") if self._peakpos2 > 0.0: Rebin(InputWorkspace=wksp, OutputWorkspace=wksp, Params=str(self._peakmin2)+","+str(abs(self._binning[1]))+","+str(self._peakmax2)) #Find good peak for reference ymax = 0 for s in range(0,mtd[wksp].getNumberHistograms()): y_s = mtd[wksp].readY(s) midBin = int(mtd[wksp].blocksize()/2) if y_s[midBin] > ymax: refpixel = s ymax = y_s[midBin] msg = "Reference spectra = %s, lastpixel_3 = %s" % (refpixel, self._lastpixel3) self.log().information(msg) self._lastpixel2 = int(mtd[wksp].getNumberHistograms()*self._lastpixel2/self._lastpixel3) - 1 CrossCorrelate(InputWorkspace=wksp, OutputWorkspace=wksp+"cc2", ReferenceSpectra=refpixel, WorkspaceIndexMin=self._lastpixel+1, WorkspaceIndexMax=self._lastpixel2, XMin=self._peakmin2, XMax=self._peakmax2) # Get offsets for pixels using interval around cross correlations center and peak at peakpos (d-Spacing) GetDetectorOffsets(InputWorkspace=wksp+"cc2", OutputWorkspace=wksp+"offset2", Step=abs(self._binning[1]), DReference=self._peakpos2, XMin=-self._ccnumber, XMax=self._ccnumber, MaxOffset=self._maxoffset, MaskWorkspace=wksp+"mask2") Plus(LHSWorkspace=wksp+"offset", RHSWorkspace=wksp+"offset2", OutputWorkspace=wksp+"offset") Plus(LHSWorkspace=wksp+"mask", RHSWorkspace=wksp+"mask2", OutputWorkspace=wksp+"mask") for ws in [wksp+"cc2", wksp+"offset2", wksp+"mask2"]: if AnalysisDataService.doesExist(ws): AnalysisDataService.remove(ws) if self._peakpos3 > 0.0: Rebin(InputWorkspace=wksp, OutputWorkspace=wksp, Params=str(self._peakmin3)+","+str(abs(self._binning[1]))+","+str(self._peakmax3)) #Find good peak for reference ymax = 0 for s in range(0,mtd[wksp].getNumberHistograms()): y_s = mtd[wksp].readY(s) midBin = mtd[wksp].blocksize()/2 if y_s[midBin] > ymax: refpixel = s ymax = y_s[midBin] self.log().information("Reference spectra=%s" % refpixel) CrossCorrelate(InputWorkspace=wksp, OutputWorkspace=wksp+"cc3", ReferenceSpectra=refpixel, WorkspaceIndexMin=self._lastpixel2+1, WorkspaceIndexMax=mtd[wksp].getNumberHistograms()-1, XMin=self._peakmin3, XMax=self._peakmax3) # Get offsets for pixels using interval around cross correlations center and peak at peakpos (d-Spacing) GetDetectorOffsets(InputWorkspace=wksp+"cc3", OutputWorkspace=wksp+"offset3", Step=abs(self._binning[1]), DReference=self._peakpos3, XMin=-self._ccnumber, XMax=self._ccnumber, MaxOffset=self._maxoffset, MaskWorkspace=wksp+"mask3") Plus(LHSWorkspace=wksp+"offset", RHSWorkspace=wksp+"offset3", OutputWorkspace=str(wksp)+"offset") Plus(LHSWorkspace=wksp+"mask", RHSWorkspace=wksp+"mask3", OutputWorkspace=wksp+"mask") for ws in [wksp+"cc3", wksp+"offset3", wksp+"mask3"]: if AnalysisDataService.doesExist(ws): AnalysisDataService.remove(ws) return str(wksp) #pylint: disable=too-many-branches def _multicalibrate(self, wksp): if wksp is None: return None # Bin events in d-Spacing Rebin(InputWorkspace=wksp, OutputWorkspace=wksp, Params=str(self._binning[0])+","+str((self._binning[1]))+","+str(self._binning[2])) if len(self._smoothGroups) > 0: SmoothData(InputWorkspace=wksp, OutputWorkspace=wksp, NPoints=self._smoothGroups, GroupingWorkspace=wksp+"group") # Get the fit window input workspace fitwinws = self.getProperty("FitwindowTableWorkspace").value # Set up resolution workspace resws = self.getProperty("DetectorResolutionWorkspace").value if resws is not None: resrange = self.getProperty("AllowedResRange").value if len(resrange) < 2: raise NotImplementedError("With input of 'DetectorResolutionWorkspace', "+ "number of allowed resolution range must be equal to 2.") reslowf = resrange[0] resupf = resrange[1] if reslowf >= resupf: raise NotImplementedError("Allowed resolution range factor, lower boundary "+ "(%f) must be smaller than upper boundary (%f)." % (reslowf, resupf)) else: reslowf = 0.0 resupf = 0.0 # Get offsets for pixels using interval around cross correlations center and peak at peakpos (d-Spacing) GetDetOffsetsMultiPeaks(InputWorkspace=wksp, OutputWorkspace=wksp+"offset", DReference=self._peakpos, FitWindowMaxWidth=self.getProperty("PeakWindowMax").value, MinimumPeakHeight=self.getProperty("MinimumPeakHeight").value, MinimumPeakHeightObs=self.getProperty("MinimumPeakHeightObs").value, BackgroundType=self.getProperty("BackgroundType").value, MaxOffset=self._maxoffset, NumberPeaksWorkspace=wksp+"peaks", MaskWorkspace=wksp+"mask", FitwindowTableWorkspace = fitwinws, InputResolutionWorkspace=resws, MinimumResolutionFactor = reslowf, MaximumResolutionFactor = resupf) #Fixed SmoothNeighbours for non-rectangular and rectangular if self._smoothoffsets and self._xpixelbin*self._ypixelbin>1: # Smooth data if it was summed SmoothNeighbours(InputWorkspace=wksp+"offset", OutputWorkspace=wksp+"offset", WeightedSum="Flat", AdjX=self._xpixelbin, AdjY=self._ypixelbin) Rebin(InputWorkspace=wksp, OutputWorkspace=wksp, Params=str(self._binning[0])+","+str((self._binning[1]))+","+str(self._binning[2])) return str(wksp) def _focus(self, wksp): if wksp is None: return None MaskDetectors(Workspace=wksp, MaskedWorkspace=str(wksp)+"mask") wksp = AlignDetectors(InputWorkspace=wksp, OutputWorkspace=wksp, CalibrationWorkspace=str(wksp)+"cal") # Diffraction focusing using new calibration file with offsets if self._diffractionfocus: wksp = DiffractionFocussing(InputWorkspace=wksp, OutputWorkspace=wksp, GroupingWorkspace=str(wksp)+"group") wksp = Rebin(InputWorkspace=wksp, OutputWorkspace=wksp, Params=self._binning) return wksp def _initCCpars(self): self._peakpos1 = self._peakpos[0] self._peakpos2 = 0 self._peakpos3 = 0 self._lastpixel = 0 self._lastpixel2 = 0 self._lastpixel3 = 0 peakhalfwidth = self.getProperty("PeakHalfWidth").value self._peakmin = self._peakpos1-peakhalfwidth self._peakmax = self._peakpos1+peakhalfwidth if len(self._peakpos) >= 2: self._peakpos2 = self._peakpos[1] self._peakmin2 = self._peakpos2-peakhalfwidth self._peakmax2 = self._peakpos2+peakhalfwidth if len(self._peakpos) >= 3: self._peakpos3 = self._peakpos[2] self._peakmin3 = self._peakpos3-peakhalfwidth self._peakmax3 = self._peakpos3+peakhalfwidth detectors = self.getProperty("DetectorsPeaks").value if len(detectors) == 0: detectors = [0] if detectors[0]: self._lastpixel = int(detectors[0]) self._lastpixel3 = self._lastpixel if len(detectors) >= 2: self._lastpixel2 = self._lastpixel+int(detectors[1]) self._lastpixel3 = self._lastpixel2 if len(detectors) >= 3: self._lastpixel3 = self._lastpixel2+int(detectors[2]) self._ccnumber = self.getProperty("CrossCorrelationPoints").value #pylint: disable=too-many-branches def PyExec(self): # get generic information self._binning = self.getProperty("Binning").value if len(self._binning) != 1 and len(self._binning) != 3: raise RuntimeError("Can only specify (width) or (start,width,stop) for binning. Found %d values." % len(self._binning)) if len(self._binning) == 3: if self._binning[0] == 0. and self._binning[1] == 0. and self._binning[2] == 0.: raise RuntimeError("Failed to specify the binning") self._grouping = self.getProperty("GroupDetectorsBy").value self._xpixelbin = self.getProperty("XPixelSum").value self._ypixelbin = self.getProperty("YPixelSum").value self._smoothoffsets = self.getProperty("SmoothSummedOffsets").value self._smoothGroups = self.getProperty("SmoothGroups").value self._peakpos = self.getProperty("PeakPositions").value if self.getProperty("CrossCorrelation").value: self._initCCpars() self._maxoffset = self.getProperty("MaxOffset").value self._diffractionfocus = self.getProperty("DiffractionFocusWorkspace").value self._filterBadPulses = self.getProperty("FilterBadPulses").value self._outDir = self.getProperty("OutputDirectory").value+"/" self._outTypes = self.getProperty("SaveAs").value samRuns = self.getProperty("RunNumber").value backRuns = self.getProperty("Background").value if len(samRuns) != len(backRuns): if (len(backRuns) == 1 and backRuns[0] == 0) or (len(backRuns) <= 0): backRuns = [0]*len(samRuns) else: raise RuntimeError("Number of samples and backgrounds must match (%d!=%d)" % (len(samRuns), len(backRuns))) filterWall = (self.getProperty("FilterByTimeMin").value, self.getProperty("FilterByTimeMax").value) stuff = getBasename(samRuns[0]) stuff = stuff.split('_') (instrument, runNumber) = ('_'.join(stuff[:-1]), stuff[-1]) calib = instrument+"_calibrate_d"+runNumber+strftime("_%Y_%m_%d") calib = os.path.join(self._outDir, calib) for (samNum, backNum) in zip(samRuns, backRuns): # first round of processing the sample samRun = self._loadData(samNum, filterWall) samRun = str(samRun) if backNum > 0: backRun = self._loadData(instrument+'_'+str(backNum), filterWall) Minus(LHSWorkspace=samRun, RHSWorkspace=backRun, OutputWorkspace=samRun) DeleteWorkspace(backRun) CompressEvents(samRun, OutputWorkspace=samRun, Tolerance=COMPRESS_TOL_TOF) # 100ns self._createGrouping(samRun) LRef = self.getProperty("UnwrapRef").value DIFCref = self.getProperty("LowResRef").value # super special Jason stuff if LRef > 0: UnwrapSNS(InputWorkspace=samRun, OutputWorkspace=samRun, LRef=LRef) if DIFCref > 0: RemoveLowResTOF(InputWorkspace=samRun, OutputWorkspace=samRun, ReferenceDIFC=DIFCref) ConvertUnits(InputWorkspace=samRun, OutputWorkspace=samRun, Target="dSpacing") # Sum pixelbin X pixelbin blocks of pixels if self._xpixelbin*self._ypixelbin>1: SumNeighbours(InputWorkspace=samRun, OutputWorkspace=samRun, SumX=self._xpixelbin, SumY=self._ypixelbin) if self.getProperty("CrossCorrelation").value: samRun = self._cccalibrate(samRun) else: samRun = self._multicalibrate(samRun) self._saveCalibration(samRun, calib) if self._xpixelbin*self._ypixelbin>1 or len(self._smoothGroups) > 0: if AnalysisDataService.doesExist(samRun): AnalysisDataService.remove(samRun) samRun = self._loadData(samNum, filterWall) LRef = self.getProperty("UnwrapRef").value DIFCref = self.getProperty("LowResRef").value # super special Jason stuff if LRef > 0: samRun = UnwrapSNS(InputWorkspace=samRun, OutputWorkspace=samRun, LRef=LRef) if DIFCref > 0: samRun = RemoveLowResTOF(InputWorkspace=samRun, OutputWorkspace=samRun, ReferenceDIFC=DIFCref) else: samRun = ConvertUnits(InputWorkspace=samRun, OutputWorkspace=samRun, Target="TOF") samRun = self._focus(samRun) RenameWorkspace(InputWorkspace=samRun, OutputWorkspace=str(samRun)+"_calibrated") AlgorithmFactory.subscribe(CalibrateRectangularDetectors)
ScreamingUdder/mantid
Framework/PythonInterface/plugins/algorithms/CalibrateRectangularDetectors.py
Python
gpl-3.0
26,365
[ "Gaussian" ]
dfd9d14221a888bcc68a4e9d62387c8f23e0cc9105dbe7684e9ee8e871b4a3de
''' Test_RSS_Policy_AlwaysActivePolicy ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function __RCSID__ = '$Id: $' import unittest import DIRAC.ResourceStatusSystem.Policy.CEAvailabilityPolicy as moduleTested ################################################################################ class CEAvailabilityPolicy_TestCase( unittest.TestCase ): def setUp( self ): ''' Setup ''' self.moduleTested = moduleTested self.testClass = self.moduleTested.CEAvailabilityPolicy def tearDown( self ): ''' TearDown ''' del self.testClass del self.moduleTested ################################################################################ # Tests class CEAvailabilityPolicy_Success( CEAvailabilityPolicy_TestCase ): def test_instantiate( self ): ''' tests that we can instantiate one object of the tested class ''' policy = self.testClass() self.assertEqual( 'CEAvailabilityPolicy', policy.__class__.__name__ ) def test_evaluate( self ): ''' tests the evaluate method ''' policy = self.testClass() commandResult = {'OK': True, 'Value': {'Reason': "All queues in 'Production'", 'Status': 'Production', 'cccreamceli05.in2p3.fr:8443/cream-sge-long': 'Production', 'cccreamceli05.in2p3.fr:8443/cream-sge-verylong': 'Production' } } res = policy._evaluate(commandResult) self.assertTrue(res['OK']) self.assertEqual( 'Active', res[ 'Value' ][ 'Status' ] ) commandResult = {'OK': True, 'Value': { 'Reason': "All queues in 'Production'", 'Status': 'Degraded', 'cccreamceli05.in2p3.fr:8443/cream-sge-long': 'Production', 'cccreamceli05.in2p3.fr:8443/cream-sge-verylong': 'Production' } } res = policy._evaluate(commandResult) self.assertTrue(res['OK']) self.assertEqual( 'Banned', res[ 'Value' ][ 'Status' ] ) ################################################################################ if __name__ == '__main__': suite = unittest.defaultTestLoader.loadTestsFromTestCase( CEAvailabilityPolicy_TestCase ) suite.addTest( unittest.defaultTestLoader.loadTestsFromTestCase( CEAvailabilityPolicy_Success ) ) testResult = unittest.TextTestRunner( verbosity = 2 ).run( suite ) #EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
yujikato/DIRAC
src/DIRAC/ResourceStatusSystem/Policy/test/Test_RSS_Policy_CEAvailabilityPolicy.py
Python
gpl-3.0
2,666
[ "DIRAC" ]
c849a627ac6a858ed4cb05ed1e1bed58c93b91080fc0f6b95210628a8abd16bc
# encoding: utf-8 # Written for Python 3.6 import os import sys import math import cage import numpy as np import pymatgen as pmg import pymatgen.io.nwchem as nwchem """ Script to set up the calculations for a chain of paths connecting the non equivalent facets of a cage molecule. """ # TODO Make these parameters defaults, but allow the user to change them with arguments in the CLI # TODO The current set up does not work very well for molecules that are not spherically shaped -> improve method set_up_landscape # Facetsetup parameter IGNORE = (pmg.Element('Li'), pmg.Element('Na'), pmg.Element('H'), pmg.Element('I'), pmg.Element('Br'), pmg.Element('Cl'), pmg.Element('F')) # Landscape parameters CATION = "Na" # Cation to place on the landscape # Distance endpoints between the center of the molecule and the cation ENDPOINT_RADII = (3.4,4) # TODO For some reason, using the density to set the number of radii did not work. However, that seems much more sensible. Fix it. N_RADII = 1 # Number of radius points for the landscape ANGLE_DENSITY = 50 # Density of points along the angle coordinate # Calculation parameters # BASIS = {"*": "aug-pcseg-1"} BASIS = {"*": "aug-cc-pVDZ"} THEORY_SETUP = {"iterations": "300", "xc": "xpbe96 xpbe96", "direct": "", "smear": "0.01", "convergence energy": "1e-4", "convergence density": "1e-2", "convergence gradient": "1e-2", "convergence damp": "70"} GEO_SETUP = {"noautosym", "noautoz", "nocenter", "units angstroms"} ALT_SETUP = {} DRIVER_SETUP = {"loose": "", "maxiter": "100"} OPERATION = "energy" # Input check try: # Take the filename argument from the user filename = sys.argv[2] # Take the operation input OPERATION = sys.argv[1] except IndexError: # Take the filename argument from the user try: filename = sys.argv[1] except IndexError: raise IOError("No POSCAR file provided.") def main(): # Load the POSCAR into a Cage mol = cage.core.Cage.from_poscar(filename) mol.find_surface_facets(IGNORE) # Find the chain paths paths = mol.find_noneq_chain_connections() total_mol = mol.copy() # For each facet, set up the calculation input files edge_number = 1 for path in paths: # Set up the edge directory edge_dir = "edge" + str(edge_number) try: os.mkdir(edge_dir) except FileExistsError: pass # Write out the molecule and path facets to the edge directory mol.to(fmt="json", filename=os.path.join(edge_dir, "mol.json")) path[0].to(fmt="json", filename=os.path.join(edge_dir, "init_facet.json")) path[1].to(fmt="json", filename=os.path.join(edge_dir, "final_facet.json")) # Get copies so the originals aren't mutated edge_mol = mol.copy() facet1 = path[0].copy() facet2 = path[1].copy() # Set up the landscape landscape = set_up_edge_landscape(facet1, facet2, endpoint_radii=ENDPOINT_RADII, number_of_radii=N_RADII, angle_density=ANGLE_DENSITY) # Get the molecule for each landscape point molecules = set_up_molecules(edge_mol, landscape, CATION) # Set up an xyz file to visualize the edge for point in landscape.points: try: total_mol.append(pmg.Specie(CATION, 1), point, validate_proximity=False) edge_mol.append(pmg.Specie(CATION, 1), point, validate_proximity=False) except ValueError: pass edge_mol.to(fmt="xyz", filename=os.path.join(edge_dir, "edge.xyz")) # In case the molecules must be optimized, add the constraints and # optimization setup (DRIVER) if OPERATION == "optimize": fixed_facet = mol.find_farthest_facet(landscape.center) ALT_SETUP["constraints"] = find_constraints(mol, fixed_facet) ALT_SETUP["driver"] = DRIVER_SETUP # Set up the task for the calculations tasks = [nwchem.NwTask(molecules[0].charge, None, BASIS, theory="dft", operation=OPERATION, theory_directives=THEORY_SETUP, alternate_directives=ALT_SETUP)] # Set up the input files study = cage.study.Study(molecules, tasks) study.set_up_input(edge_dir, sort_comp=False, geometry_options=GEO_SETUP) edge_number += 1 # Set up an xyz file with all the paths total_mol.to(fmt="xyz", filename="total_mol.xyz") ########### # METHODS # ########### def set_up_edge_landscape(facet1, facet2, endpoint_radii=(2, 5), number_of_radii=None, angle_density=50): """ Set up the Landscape to study the energy landscape between two facets. The script creates a line, whose direction is determined by the vector connecting the origin to the center of the first facet. The line endpoints are determined by the endpoint_radii parameter. The line Landscape is then extended by rotation along the axis defined by the cross product of the two facet normals. :param facet1: :param facet2: :param endpoint_radii: :param number_of_radii: :param angle_density: :return: """ line_vector = facet1.center/np.linalg.norm(facet1.center) lands = cage.landscape.Landscape.from_vertices( [line_vector * endpoint_radii[0], line_vector * endpoint_radii[1]], num=number_of_radii ) axis = np.cross(facet1.normal, facet2.normal) angle = math.asin(np.linalg.norm(axis)) axis = axis * angle / np.linalg.norm(axis) lands.extend_by_rotation(axis, angle_density, remove_endline=True) return lands def set_up_molecules(mol, landscape, cation): """ Set up the List of molecules from Landscape by adding the cation on the various points of the landscape. :param mol: :param landscape: :param cation: :return: """ # Set up the cation Species cation = pmg.Specie(cation, +1) # Set up the List of Molecules molecules = [] for point in landscape.points: configuration = mol.copy() # If carbon is in the molecule, the charge is -1, -2 otherwise # TODO Find a good way to calculate the charge, if possible if pmg.Element("C") in [site.specie for site in configuration.sites]: configuration.set_charge_and_spin(charge=0) else: configuration.set_charge_and_spin(charge=-1) # Add the cation to the Molecule try: configuration.append(cation, point) molecules.append(configuration) except ValueError: print("ValueError detected when appending the Li site. " "Ignoring this point in the energy landscape.") return molecules def find_constraints(mol, facet): """ Find the constraints for the calculation, by fixing a facet and the cation. In order to make sure the Molecule does not rotate, the script fixes an entire facet of the Molecule. The cation is assumed to be the last element in the molecule. :return: """ # Find the corresponding atom numbers in string format site_numbers = [] for i in range(len(mol.sites)): if mol.sites[i] in facet.sites: site_numbers.append(str(i + 1) + " ") # Add the cation site site_numbers.append(str(len(mol.sites) + 1)) # Join the strings of the site numbers site_numbers = "".join(site_numbers) # Return the constraints on the atoms return {"fix atom": site_numbers} if __name__ == '__main__': main()
mbercx/cage
cage/scripts/chainsetup.py
Python
mit
8,112
[ "NWChem", "pymatgen" ]
3bd7bf11066b38c47aa704b2990de4036e89059eda25fc9807d110f96e43f5f7
#!/usr/bin/env python """ Save polynomial basis on reference elements or on a mesh for visualization into a given output directory. """ import sys sys.path.append('.') import os from optparse import OptionParser import numpy as nm from sfepy.base.base import output, Struct from sfepy.base.ioutils import get_print_info, ensure_path from sfepy.discrete import FieldVariable, Variables from sfepy.discrete.fem import Mesh, FEDomain, Field from sfepy.discrete.fem.geometry_element import GeometryElement from sfepy.discrete.fem.poly_spaces import PolySpace from sfepy.discrete.fem.linearizer import create_output from sfepy.discrete.fem.fields_base import create_expression_output usage = '%prog [options] output_dir\n' + __doc__.rstrip() help = { 'basis' : 'name of the FE basis [default: %default]', 'derivative' : 'save d-th derivative of FE basis, can be 0 or 1 [default: %default]', 'max_order' : 'maximum order of polynomials [default: %default]', 'geometry' : 'reference element geometry, one of "2_3", "2_4", "3_4", "3_8"' ' [default: %default]', 'mesh' : 'name of the mesh file - alternative to --geometry [default: %default]', 'permutations' : 'list of geometry element permutations for each element, e.g. 0,1 is a' ' single permutation for two elements, 0,1,0,2,1,0 are three permutations' ' for two elements. Special value "all" can be used to save all possible' ' permutations for given reference element. Works only with --mesh option' ' [default: %default]', 'dofs' : 'if given, save only the DOFs specified as a comma-separated list' ' [default: %default]', 'lin_options' : 'linearizer options [default: %default]', 'plot_dofs' : 'plot local and global DOF numberings, with --mesh option', } def get_dofs(dofs, n_total): if dofs is None: dofs = range(n_total) else: dofs = [int(ii) for ii in dofs.split(',')] return dofs def save_basis_on_mesh(mesh, options, output_dir, lin, permutations=None, suffix=''): if permutations is not None: mesh = mesh.copy() gel = GeometryElement(mesh.descs[0]) perms = gel.get_conn_permutations()[permutations] conn = mesh.cmesh.get_cell_conn() n_el, n_ep = conn.num, gel.n_vertex offsets = nm.arange(n_el) * n_ep conn.indices[:] = conn.indices.take((perms + offsets[:, None]).ravel()) domain = FEDomain('domain', mesh) omega = domain.create_region('Omega', 'all') field = Field.from_args('f', nm.float64, shape=1, region=omega, approx_order=options.max_order, poly_space_base=options.basis) var = FieldVariable('u', 'unknown', field) if options.plot_dofs: import sfepy.postprocess.plot_dofs as pd import sfepy.postprocess.plot_cmesh as pc ax = pc.plot_wireframe(None, mesh.cmesh) ax = pd.plot_global_dofs(ax, field.get_coor(), field.ap.econn) ax = pd.plot_local_dofs(ax, field.get_coor(), field.ap.econn) if options.dofs is not None: ax = pd.plot_nodes(ax, field.get_coor(), field.ap.econn, field.ap.interp.poly_spaces['v'].nodes, get_dofs(options.dofs, var.n_dof)) pd.plt.show() output('dofs: %d' % var.n_dof) vec = nm.empty(var.n_dof, dtype=var.dtype) n_digit, _format = get_print_info(var.n_dof, fill='0') name_template = os.path.join(output_dir, 'dof_%s%s.vtk' % (_format, suffix)) for ip in get_dofs(options.dofs, var.n_dof): output('dof %d...' % ip) vec.fill(0.0) vec[ip] = 1.0 var.set_data(vec) if options.derivative == 0: out = var.create_output(vec, linearization=lin) else: out = create_expression_output('ev_grad.ie.Elements(u)', 'u', 'f', {'f' : field}, None, Variables([var]), mode='qp', verbose=False, min_level=lin.min_level, max_level=lin.max_level, eps=lin.eps) name = name_template % ip ensure_path(name) out['u'].mesh.write(name, out=out) output('...done (%s)' % name) def main(): parser = OptionParser(usage=usage, version='%prog') parser.add_option('-b', '--basis', metavar='name', action='store', dest='basis', default='lagrange', help=help['basis']) parser.add_option('-d', '--derivative', metavar='d', type=int, action='store', dest='derivative', default=0, help=help['derivative']) parser.add_option('-n', '--max-order', metavar='order', type=int, action='store', dest='max_order', default=2, help=help['max_order']) parser.add_option('-g', '--geometry', metavar='name', action='store', dest='geometry', default='2_4', help=help['geometry']) parser.add_option('-m', '--mesh', metavar='mesh', action='store', dest='mesh', default=None, help=help['mesh']) parser.add_option('', '--permutations', metavar='permutations', action='store', dest='permutations', default=None, help=help['permutations']) parser.add_option('', '--dofs', metavar='dofs', action='store', dest='dofs', default=None, help=help['dofs']) parser.add_option('-l', '--lin-options', metavar='options', action='store', dest='lin_options', default='min_level=2,max_level=5,eps=1e-3', help=help['lin_options']) parser.add_option('', '--plot-dofs', action='store_true', dest='plot_dofs', default=False, help=help['plot_dofs']) options, args = parser.parse_args() if len(args) == 1: output_dir = args[0] else: parser.print_help(), return output('polynomial space:', options.basis) output('max. order:', options.max_order) lin = Struct(kind='adaptive', min_level=2, max_level=5, eps=1e-3) for opt in options.lin_options.split(','): key, val = opt.split('=') setattr(lin, key, eval(val)) if options.mesh is None: dim, n_ep = int(options.geometry[0]), int(options.geometry[2]) output('reference element geometry:') output(' dimension: %d, vertices: %d' % (dim, n_ep)) gel = GeometryElement(options.geometry) gps = PolySpace.any_from_args(None, gel, 1, base=options.basis) ps = PolySpace.any_from_args(None, gel, options.max_order, base=options.basis) n_digit, _format = get_print_info(ps.n_nod, fill='0') name_template = os.path.join(output_dir, 'bf_%s.vtk' % _format) for ip in get_dofs(options.dofs, ps.n_nod): output('shape function %d...' % ip) def eval_dofs(iels, rx): if options.derivative == 0: bf = ps.eval_base(rx).squeeze() rvals = bf[None, :, ip:ip+1] else: bfg = ps.eval_base(rx, diff=True) rvals = bfg[None, ..., ip] return rvals def eval_coors(iels, rx): bf = gps.eval_base(rx).squeeze() coors = nm.dot(bf, gel.coors)[None, ...] return coors (level, coors, conn, vdofs, mat_ids) = create_output(eval_dofs, eval_coors, 1, ps, min_level=lin.min_level, max_level=lin.max_level, eps=lin.eps) out = { 'bf' : Struct(name='output_data', mode='vertex', data=vdofs, var_name='bf', dofs=None) } mesh = Mesh.from_data('bf_mesh', coors, None, [conn], [mat_ids], [options.geometry]) name = name_template % ip ensure_path(name) mesh.write(name, out=out) output('...done (%s)' % name) else: mesh = Mesh.from_file(options.mesh) output('mesh geometry:') output(' dimension: %d, vertices: %d, elements: %d' % (mesh.dim, mesh.n_nod, mesh.n_el)) if options.permutations: if options.permutations == 'all': from sfepy.linalg import cycle gel = GeometryElement(mesh.descs[0]) n_perms = gel.get_conn_permutations().shape[0] all_permutations = [ii for ii in cycle(mesh.n_el * [n_perms])] else: all_permutations = [int(ii) for ii in options.permutations.split(',')] all_permutations = nm.array(all_permutations) np = len(all_permutations) all_permutations.shape = (np / mesh.n_el, mesh.n_el) output('using connectivity permutations:\n', all_permutations) else: all_permutations = [None] for ip, permutations in enumerate(all_permutations): if permutations is None: suffix = '' else: suffix = '_' + '_'.join('%d' % ii for ii in permutations) save_basis_on_mesh(mesh, options, output_dir, lin, permutations, suffix) if __name__ == '__main__': main()
RexFuzzle/sfepy
script/save_basis.py
Python
bsd-3-clause
9,961
[ "VTK" ]
cd55e7d18665dfe59eda66b7f393f333d74f5c9768e1a4fcd02ff17ff6eac25e
#! /usr/bin/env python """ Handy little script for generating RVO input. Requires a set of output files compatible with the Atomic Simulation Environment. """ import ase.io import argparse def main(paths): for path in paths: A = ase.io.read(path) cell = A.get_cell() print A.get_total_energy(), for x in cell.flatten(): print ',{0}'.format(x), print '' if __name__ == "__main__": parser=argparse.ArgumentParser(description='Generate 10-column input file for RVO from given quantum chemistry calculation files') parser.add_argument('paths', nargs='+') args = parser.parse_args() main(args.paths)
WMD-Bath/dvxc
ase_generate_input.py
Python
gpl-3.0
672
[ "ASE" ]
fb90d0150b159ba5586db6fa1b1e0cfc5c4b5cab23bb32e2de6a1c962428e852
# iCraft is Copyright 2010 both # # The Archives team: # <Adam Guy> adam@adam-guy.com AKA "Adam01" # <Andrew Godwin> andrew@aeracode.org AKA "Aera" # <Dylan Lukes> lukes.dylan@gmail.com AKA "revenant" # <Gareth Coles> colesgareth2@hotmail.com AKA "gdude2002" # # And, # # The iCraft team: # <Andrew Caluzzi> tehcid@gmail.com AKA "tehcid" # <Andrew Dolgov> fox@bah.org.ru AKA "gothfox" # <Andrew Horn> Andrew@GJOCommunity.com AKA "AndrewPH" # <Brad Reardon> brad@bradness.co.cc AKA "PixelEater" # <Clay Sweetser> CDBKJmom@aol.com AKA "Varriount" # <James Kirslis> james@helplarge.com AKA "iKJames" # <Jason Sayre> admin@erronjason.com AKA "erronjason" # <Jonathon Dunford> sk8rjwd@yahoo.com AKA "sk8rjwd" # <Joseph Connor> destroyerx100@gmail.com AKA "destroyerx1" # <Joshua Connor> fooblock@live.com AKA "Fooblock" # <Kamyla Silva> supdawgyo@hotmail.com AKA "NotMeh" # <Kristjan Gunnarsson> kristjang@ffsn.is AKA "eugo" # <Nathan Coulombe> NathanCoulombe@hotmail.com AKA "Saanix" # <Nick Tolrud> ntolrud@yahoo.com AKA "ntfwc" # <Noel Benzinger> ronnygmod@gmail.com AKA "Dwarfy" # <Randy Lyne> qcksilverdragon@gmail.com AKA "goober" # <Willem van der Ploeg> willempieeploeg@live.nl AKA "willempiee" # # Disclaimer: Parts of this code may have been contributed by the end-users. # # iCraft is licensed under the Creative Commons # Attribution-NonCommercial-ShareAlike 3.0 Unported License. # To view a copy of this license, visit http://creativecommons.org/licenses/by-nc-sa/3.0/ # Or, send a letter to Creative Commons, 171 2nd Street, # Suite 300, San Francisco, California, 94105, USA. var_cango = True try: blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(x,y-1,z)]) if blocktocheck != 0: var_cango = False except: var_cango = False if var_cango: block = '\x00' world[x, y, z] = block self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world) self.client.sendBlock(x, y, z, block) world[x, y+1, z] = block self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world) self.client.sendBlock(x, y+1, z, block) var_position = (x,y-1,z) x,y,z = var_position block = chr(48) world[x, y, z] = block self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world) self.client.sendBlock(x, y, z, block) block = chr(48) world[x, y+1, z] = block self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world) self.client.sendBlock(x, y+1, z, block) else: closestposition = (0,0) closestdistance = None for entry in userpositionlist: var_pos = entry[1] i,j,k = var_pos distance = ((i-x)**2+(j-y)**2+(k-z)**2)**0.5 if closestdistance == None: closestdistance = distance closestposition = (var_pos[0],var_pos[2]) else: if distance < closestdistance: closestdistance = distance closestposition = (var_pos[0],var_pos[2]) var_continue = True if closestdistance < 4: self.client.sendServerMessage("sssssssssssssSSSSSSSS") if closestdistance < 3: self.client.sendServerMessage("BOOM") entitylist.append(["tnt",(x,y,z),1,1,True,0,5]) var_dellist.append(index) var_continue = False if var_continue: i,k = closestposition distance = ((i-x)**2+(k-z)**2)**0.5 if distance != 0: target = [int((i-x)/(distance/1.75)) + x,y,int((k-z)/(distance/1.75)) + z] i,j,k = target var_cango = True try: blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j,k)]) if blocktocheck != 0: var_cango = False blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j+1,k)]) if blocktocheck != 0: var_cango = False except: var_cango = False if var_cango: block = '\x00' world[x, y, z] = block self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world) self.client.sendBlock(x, y, z, block) world[x, y+1, z] = block self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world) self.client.sendBlock(x, y+1, z, block) var_position = target x,y,z = var_position block = chr(48) world[x, y, z] = block self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world) self.client.sendBlock(x, y, z, block) block = chr(48) world[x, y+1, z] = block self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world) self.client.sendBlock(x, y+1, z, block) else: var_cango = True target[1] = target[1] + 1 j = target[1] try: blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j,k)]) if blocktocheck != 0: var_cango = False blocktocheck = ord(world.blockstore.raw_blocks[world.blockstore.get_offset(i,j+1,k)]) if blocktocheck != 0: var_cango = False except: var_cango = False if var_cango: block = '\x00' world[x, y, z] = block self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world) self.client.sendBlock(x, y, z, block) world[x, y+1, z] = block self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world) self.client.sendBlock(x, y+1, z, block) var_position = target x,y,z = var_position block = chr(48) world[x, y, z] = block self.client.queueTask(TASK_BLOCKSET, (x, y, z, block), world=world) self.client.sendBlock(x, y, z, block) block = chr(48) world[x, y+1, z] = block self.client.queueTask(TASK_BLOCKSET, (x, y+1, z, block), world=world) self.client.sendBlock(x, y+1, z, block)
TheArchives/Nexus
core/entities/creeper.py
Python
bsd-2-clause
6,866
[ "VisIt" ]
250998752f389b32c74f0482bb53d8f9f42c40b774b1fc5186b2dba0654c06aa
#!/usr/bin/env python ''' TracPy class ''' import tracpy import numpy as np from matplotlib.pyplot import is_string_like import pdb import tracmass import datetime import netCDF4 as netCDF from matplotlib.mlab import find class Tracpy(object): ''' TracPy class. ''' def __init__(self, currents_filename, grid_filename=None, vert_filename=None, nsteps=1, ndays=1, ff=1, tseas=3600., ah=0., av=0., z0='s', zpar=1, do3d=0, doturb=0, name='test', dostream=0, N=1, time_units='seconds since 1970-01-01', dtFromTracmass=None, zparuv=None, tseas_use=None, usebasemap=False, savell=True, doperiodic=0, usespherical=True, grid=None): ''' Initialize class. Note: GCM==General Circulation Model, meaning the predicted u/v velocity fields that are input into TracPy to run the drifters. :param currents_filename: NetCDF file name (with extension), list of file names, or OpenDAP url to GCM output. :param grid_filename=None: NetCDF grid file name or OpenDAP url to GCM grid. :param vert_filename=None: If vertical grid information is not included in the grid file, or if all grid info is not in output file, use two. :param nsteps=1: sets the max time step between GCM model outputs between drifter steps. (iter in TRACMASS) Does not control the output sampling anymore. The velocity fields are assumed frozen while a drifter is stepped through a given grid cell. nsteps can force the reinterpolation of the fields by setting the max time before reinterpolation. :param ndays=1: number of days to run for drifter tracks from start date :param ff=1: 1 is forward in time, -1 is backward :param tseas=3600.: number of seconds between GCM model outputs :param ah=0.: horizontal diffusivity, in m^2/s. Only used if doturb !=0. :param av=0.: vertical diffusivity, in m^2/s. Only used if doturb !=0 and do3d==1. :param z0='s': string flag in 2D case or array of initial z locations in 3D case :param zpar=1: isoslice value to in 2D case or string flag in 3D case For 3D drifter movement, use do3d=1, and z0 should be an array of initial drifter depths. The array should be the same size as lon0 and be negative for under water. Currently drifter depths need to be above the seabed for every x,y particle location for the script to run. To do 3D but start at surface, use z0=zeros(ia.shape) and have either zpar='fromMSL' choose fromMSL to have z0 starting depths be for that depth below the base time-independent sea level (or mean sea level). choose 'fromZeta' to have z0 starting depths be for that depth below the time-dependent sea surface. Haven't quite finished the 'fromZeta' case. For 2D drifter movement, turn on twodim flag in makefile. Then: set z0 to 's' for 2D along a terrain-following slice and zpar to be the index of s level you want to use (0 to km-1) set z0 to 'rho' for 2D along a density surface and zpar to be the density value you want to use Can do the same thing with salinity ('salt') or temperature ('temp') The model output doesn't currently have density though. set z0 to 'z' for 2D along a depth slice and zpar to be the constant (negative) depth value you want to use To simulate drifters at the surface, set z0 to 's' and zpar = grid['km']-1 to put them in the upper s level :param do3d=0: 1 for 3D or 0 for 2D :param doturb=0: 0 for no added diffusion, 1 for diffusion via velocity fluctuation, 2/3 for diffusion via random walk (3 for aligned with isobaths) :param name='test': name for output :param dostream=0: 1 to calculate transport for lagrangian stream functions, 0 to not :param N=None: number of steps between GCM model outputs for outputting drifter locations. Defaults to output at nsteps. If dtFromTracmass is being used, N is set by that. :param time_units='seconds since 1970-01-01': Reference for time, for changing between numerical times and datetime format :param dtFromTracmass=None: Time period for exiting from TRACMASS. If uninitialized, this is set to tseas so that it only exits TRACMASS when it has gone through a full model output. If initialized by the user, TRACMASS will run for 1 time step of length dtFromTracmass before exiting to the loop. :param zparuv=None: Defaults to zpar. Use this if the k index for the model output fields (e.g, u, v) is different from the k index in the grid This might happen if, for example, only the surface current were saved, but the model run originally did have many layers. This parameter represents the k index for the u and v output, not for the grid. :param tseas_use=None: Defaults to tseas. Desired time between outputs in seconds, as opposed to the actual time between outputs (tseas). Should be >= tseas since this is just an ability to use model output at less frequency than is available, probably just for testing purposes or matching other models. Should be a multiple of tseas (or will be rounded later). :param usebasemap=False: whether to use basemap for projections in readgrid or not. Not is faster, but using basemap allows for plotting. :param savell=True: True to save drifter tracks in lon/lat and False to save them in grid coords :param doperiodic=0: Whether to use periodic boundary conditions for drifters and, if so, on which walls. 0: do not use periodic boundary conditions 1: use a periodic boundary condition in the east-west/x/i direction 2: use a periodic boundary condition in the north-south/y/j direction :param usespherical=True: True if want to use spherical (lon/lat) coordinates and False for idealized applications where it isn't necessary to project from spherical coordinates. :param grid=None: Grid is initialized to None and is found subsequently normally, but can be set with the TracPy object in order to save time when running a series of simulations. ''' self.currents_filename = currents_filename self.grid_filename = grid_filename # If grid_filename is distinct, assume we need a separate vert_filename for vertical grid info # use what is input or use info from currents_filename if grid_filename is not None: if vert_filename is not None: self.vert_filename = vert_filename else: if type(currents_filename)==str: # there is one input filename self.vert_filename = currents_filename else: # we have a list of names self.vert_filename = currents_filename[0] else: self.vert_filename = vert_filename # this won't be used though self.grid = grid # Initial parameters self.nsteps = nsteps self.ndays = ndays self.ff = ff self.tseas = float(tseas) self.ah = ah self.av = av self.z0 = z0 self.zpar = zpar self.do3d = do3d self.doturb = doturb self.name = name self.dostream = dostream self.N = N self.time_units = time_units self.usebasemap = usebasemap self.savell = savell self.doperiodic = doperiodic self.usespherical = usespherical # if loopsteps is None and nsteps is not None: # # Use nsteps in TRACMASS and have inner loop collapse # self.loopsteps = 1 # elif loopsteps is not None and nsteps is None: # # This means to use the inner loop (with loopsteps) and nsteps=1 to just do 1 step per call to TRACMASS # self.nsteps = 1 # elif loopsteps is None and nsteps is None: # print 'need to input a value for nsteps or loopsteps.' # break if dtFromTracmass is None: self.dtFromTracmass = tseas else: # If using dtFromTracmass, N=1, for steps between tracmass exits self.N = 1 # # If using dtFromTracmass, N is set according to that. # self.N = (self.ndays*3600*24.)/self.tseas # this is the total number of model_step_is_done self.dtFromTracmass = dtFromTracmass # Find number of interior loop steps in case dtFromTracmass is not equal to tseas # NEEDS TO BE EVEN NUMBER FOR NOW: NEED TO GENERALIZE THIS LATER self.nsubsteps = int(self.tseas/self.dtFromTracmass) if zparuv is None: self.zparuv = zpar else: self.zparuv = zparuv if tseas_use is None: self.tseas_use = tseas # Calculate parameters that derive from other parameters # Number of model outputs to use (based on tseas, actual amount of model output) # This should not be updated with tstride since it represents the full amount of # indices in the original model output. tstride will be used separately to account # for the difference. # Adding one index so that all necessary indices are captured by this number. # Then the run loop uses only the indices determined by tout instead of needing # an extra one beyond # now rounding up instead of down self.tout = np.int(np.ceil((ndays*(24*3600))/tseas + 1)) # Calculate time outputs stride. Will be 1 if want to use all model output. self.tstride = int(self.tseas_use/self.tseas) # will round down # For later use # fluxes self.uf = None self.vf = None self.dzt = None self.zrt = None self.zwt = None def _readgrid(self): ''' Read in horizontal and vertical grid. ''' # if vertical grid information is not included in the grid file, or if all grid info # is not in output file, use two if self.grid_filename is not None: self.grid = tracpy.inout.readgrid(self.grid_filename, self.vert_filename, usebasemap=self.usebasemap, usespherical=self.usespherical) else: self.grid = tracpy.inout.readgrid(self.currents_filename, usebasemap=self.usebasemap, usespherical=self.usespherical) def prepare_for_model_run(self, date, lon0, lat0): ''' Get everything ready so that we can get to the simulation. ''' # # Convert date to number # date = netCDF.date2num(date, self.time_units) # Figure out what files will be used for this tracking nc, tinds = tracpy.inout.setupROMSfiles(self.currents_filename, date, self.ff, self.tout, self.time_units, tstride=self.tstride) # Read in grid parameters into dictionary, grid, if haven't already if self.grid is None: self._readgrid() # Interpolate to get starting positions in grid space if self.usespherical: # convert from assumed input lon/lat coord locations to grid space xstart0, ystart0, _ = tracpy.tools.interpolate2d(lon0, lat0, self.grid, 'd_ll2ij') else: # assume input seed locations are in projected/idealized space and change to index space xstart0, ystart0, _ = tracpy.tools.interpolate2d(lon0, lat0, self.grid, 'd_xy2ij') # Do z a little lower down # Initialize seed locations ia = np.ceil(xstart0) ja = np.ceil(ystart0) # don't use nan's # pdb.set_trace() ind2 = ~np.isnan(ia) * ~np.isnan(ja) ia = ia[ind2] ja = ja[ind2] xstart0 = xstart0[ind2] ystart0 = ystart0[ind2] dates = nc.variables['ocean_time'][:] t0save = dates[tinds[0]] # time at start of drifter test from file in seconds since 1970-01-01, add this on at the end since it is big # Initialize drifter grid positions and indices xend = np.ones((ia.size,(len(tinds)-1)*self.N+1))*np.nan yend = np.ones((ia.size,(len(tinds)-1)*self.N+1))*np.nan zend = np.ones((ia.size,(len(tinds)-1)*self.N+1))*np.nan zp = np.ones((ia.size,(len(tinds)-1)*self.N+1))*np.nan ttend = np.zeros((ia.size,(len(tinds)-1)*self.N+1)) flag = np.zeros((ia.size),dtype=np.int) # initialize all exit flags for in the domain # Initialize vertical stuff and fluxes # Read initial field in - to 'new' variable since will be moved # at the beginning of the time loop ahead lx = self.grid['xr'].shape[0] ly = self.grid['xr'].shape[1] lk = self.grid['sc_r'].size if is_string_like(self.z0): # isoslice case # Now that we have the grid, initialize the info for the two bounding model # steps using the grid size self.uf = np.asfortranarray(np.ones((lx-1, ly, lk-1, 2)))*np.nan self.vf = np.asfortranarray(np.ones((lx, ly-1, lk-1, 2)))*np.nan self.dzt = np.asfortranarray(np.ones((lx, ly, lk-1, 2)))*np.nan self.zrt = np.asfortranarray(np.ones((lx, ly, lk-1, 2)))*np.nan self.zwt = np.asfortranarray(np.ones((lx, ly, lk, 2)))*np.nan self.uf[:,:,:,1], self.vf[:,:,:,1], \ self.dzt[:,:,:,1], self.zrt[:,:,:,1], \ self.zwt[:,:,:,1] = tracpy.inout.readfields(tinds[0], self.grid, nc, self.z0, self.zpar, zparuv=self.zparuv) else: # 3d case # Now that we have the grid, initialize the info for the two bounding model # steps using the grid size self.uf = np.asfortranarray(np.ones((lx-1, ly, lk-1, 2)))*np.nan self.vf = np.asfortranarray(np.ones((lx, ly-1, lk-1, 2)))*np.nan self.dzt = np.asfortranarray(np.ones((lx, ly, lk-1, 2)))*np.nan self.zrt = np.asfortranarray(np.ones((lx, ly, lk-1, 2)))*np.nan self.zwt = np.asfortranarray(np.ones((lx, ly, lk, 2)))*np.nan self.uf[:,:,:,1], self.vf[:,:,:,1], \ self.dzt[:,:,:,1], self.zrt[:,:,:,1], \ self.zwt[:,:,:,1] = tracpy.inout.readfields(tinds[0], self.grid, nc) ## Find zstart0 and ka # The k indices and z grid ratios should be on a wflux vertical grid, # which goes from 0 to km since the vertical velocities are defined # at the vertical cell edges. A drifter's grid cell is vertically bounded # above by the kth level and below by the (k-1)th level if is_string_like(self.z0): # then doing a 2d isoslice # there is only one vertical grid cell, but with two vertically- # bounding edges, 0 and 1, so the initial ka value is 1 for all # isoslice drifters. ka = np.ones(ia.size) # for s level isoslice, place drifters vertically at the center # of the grid cell since that is where the u/v flux info is from. # For a rho/temp/density isoslice, we treat it the same way, such # that the u/v flux info taken at a specific rho/temp/density value # is treated as being at the center of the grid cells vertically. zstart0 = np.ones(ia.size)*0.5 else: # 3d case # Convert initial real space vertical locations to grid space # first find indices of grid cells vertically ka = np.ones(ia.size)*np.nan zstart0 = np.ones(ia.size)*np.nan if self.zpar == 'fromMSL': # print 'zpar==''fromMSL'' not implemented yet...' raise NotImplementedError("zpar==''fromMSL'' not implemented yet...") # for i in xrange(ia.size): # # pdb.set_trace() # ind = (self.grid['zwt0'][ia[i],ja[i],:]<=self.z0[i]) # # check to make sure there is at least one true value, so the z0 is shallower than the seabed # if np.sum(ind): # ka[i] = find(ind)[-1] # find value that is just shallower than starting vertical position # # if the drifter starting vertical location is too deep for the x,y location, complain about it # else: # Maybe make this nan or something later # print 'drifter vertical starting location is too deep for its x,y location. Try again.' # if (self.z0[i] != self.grid['zwt0'][ia[i],ja[i],ka[i]]) and (ka[i] != self.grid['km']): # check this # ka[i] = ka[i]+1 # # Then find the vertical relative position in the grid cell by adding on the bit of grid cell # zstart0[i] = ka[i] - abs(self.z0[i]-self.grid['zwt0'][ia[i],ja[i],ka[i]]) \ # /abs(self.grid['zwt0'][ia[i],ja[i],ka[i]-1]-self.grid['zwt0'][ia[i],ja[i],ka[i]]) elif self.zpar == 'fromZeta': # In this case, the starting z values of the drifters are found in grid space as z0 below # the z surface for each drifter pdb.set_trace() for i in xrange(ia.size): # ravel to z0 = self.z0.ravel() ind = (self.zwt[ia[i],ja[i],:,1]<=z0[i]) ka[i] = find(ind)[-1] # find value that is just shallower than starting vertical position if (z0[i] != self.zwt[ia[i],ja[i],ka[i],1]) and (ka[i] != self.grid['km']): # check this ka[i] = ka[i]+1 # Then find the vertical relative position in the grid cell by adding on the bit of grid cell zstart0[i] = ka[i] - abs(z0[i]-self.zwt[ia[i],ja[i],ka[i],1]) \ /abs(self.zwt[ia[i],ja[i],ka[i]-1,1]-self.zwt[ia[i],ja[i],ka[i],1]) # Find initial cell depths to concatenate to beginning of drifter tracks later zsave = tracpy.tools.interpolate3d(xstart0, ystart0, zstart0, self.zwt[:,:,:,1]) # Initialize x,y,z with initial seeded positions xend[:,0] = xstart0 yend[:,0] = ystart0 zend[:,0] = zstart0 return tinds, nc, t0save, xend, yend, zend, zp, ttend, flag def prepare_for_model_step(self, tind, nc, flag, xend, yend, zend, j, nsubstep, T0): ''' Already in a step, get ready to actually do step ''' xstart = xend[:,j*self.N] ystart = yend[:,j*self.N] zstart = zend[:,j*self.N] # mask out drifters that have exited the domain xstart = np.ma.masked_where(flag[:]==1,xstart) ystart = np.ma.masked_where(flag[:]==1,ystart) zstart = np.ma.masked_where(flag[:]==1,zstart) if T0 is not None: T0 = np.ma.masked_where(flag[:]==1,T0) # Move previous new time step to old time step info self.uf[:,:,:,0] = self.uf[:,:,:,1].copy() self.vf[:,:,:,0] = self.vf[:,:,:,1].copy() self.dzt[:,:,:,0] = self.dzt[:,:,:,1].copy() self.zrt[:,:,:,0] = self.zrt[:,:,:,1].copy() self.zwt[:,:,:,0] = self.zwt[:,:,:,1].copy() # Read stuff in for next time loop if is_string_like(self.z0): # isoslice case self.uf[:,:,:,1],self.vf[:,:,:,1],self.dzt[:,:,:,1],self.zrt[:,:,:,1],self.zwt[:,:,:,1] = tracpy.inout.readfields(tind, self.grid, nc, self.z0, self.zpar, zparuv=self.zparuv) else: # 3d case self.uf[:,:,:,1],self.vf[:,:,:,1],self.dzt[:,:,:,1],self.zrt[:,:,:,1],self.zwt[:,:,:,1] = tracpy.inout.readfields(tind, self.grid, nc) # Find the fluxes of the immediately bounding range for the desired time step, which can be less than 1 model output # SHOULD THIS BE PART OF SELF TOO? Leave uf and vf as is, though, because they may be used for interpolating the # input fluxes for substeps. ufsub = np.ones(self.uf.shape)*np.nan vfsub = np.ones(self.vf.shape)*np.nan # for earlier bounding flux info rp = nsubstep/self.nsubsteps # weighting for later time step rm = 1 - rp # timing for earlier time step ufsub[:,:,:,0] = rm*self.uf[:,:,:,0] + rp*self.uf[:,:,:,1] vfsub[:,:,:,0] = rm*self.vf[:,:,:,0] + rp*self.vf[:,:,:,1] # for later bounding flux info rp = (nsubstep+1)/self.nsubsteps # weighting for later time step rm = 1 - rp # timing for earlier time step ufsub[:,:,:,1] = rm*self.uf[:,:,:,0] + rp*self.uf[:,:,:,1] vfsub[:,:,:,1] = rm*self.vf[:,:,:,0] + rp*self.vf[:,:,:,1] # Change the horizontal indices from python to fortran indexing # (vertical are zero-based in tracmass) xstart, ystart = tracpy.tools.convert_indices('py2f',xstart,ystart) return xstart, ystart, zstart, ufsub, vfsub, T0 def step(self, xstart, ystart, zstart, ufsub, vfsub, T0, U, V): ''' Take some number of steps between a start and end time. FIGURE OUT HOW TO KEEP TRACK OF TIME FOR EACH SET OF LINES :param tind: Time index to use for stepping FILL IN ''' # Figure out where in time we are if T0 is not None: xend, yend, zend, flag,\ ttend, U, V = \ tracmass.step(np.ma.compressed(xstart), np.ma.compressed(ystart), np.ma.compressed(zstart), self.tseas_use, ufsub, vfsub, self.ff, self.grid['kmt'].astype(int), self.dzt, self.grid['dxdy'], self.grid['dxv'], self.grid['dyu'], self.grid['h'], self.nsteps, self.ah, self.av, self.do3d, self.doturb, self.doperiodic, self.dostream, self.N, t0=np.ma.compressed(T0), ut=U, vt=V) else: xend, yend, zend, flag,\ ttend, U, V = \ tracmass.step(np.ma.compressed(xstart), np.ma.compressed(ystart), np.ma.compressed(zstart), self.tseas_use, ufsub, vfsub, self.ff, self.grid['kmt'].astype(int), self.dzt, self.grid['dxdy'], self.grid['dxv'], self.grid['dyu'], self.grid['h'], self.nsteps, self.ah, self.av, self.do3d, self.doturb, self.doperiodic, self.dostream, self.N) # return the new positions or the delta lat/lon return xend, yend, zend, flag, ttend, U, V def model_step_is_done(self, xend, yend, zend, ttend, tstart): ''' Stuff to do after a call to TRACMASS ''' # Add initial step time to ttend ttend = (ttend.T + tstart).T # Change the horizontal indices from python to fortran indexing xend, yend = tracpy.tools.convert_indices('f2py', xend, yend) # Skip calculating real z position if we are doing surface-only drifters anyway if self.z0 != 's' and self.zpar != self.grid['km']-1: # Calculate real z position r = np.linspace(1./self.N,1,self.N) # linear time interpolation constant that is used in tracmass for n in xrange(self.N): # loop through time steps # interpolate to a specific output time # pdb.set_trace() zwt = (1.-r[n])*self.zwt[:,:,:,0] + r[n]*self.zwt[:,:,:,1] zp, dt = tracpy.tools.interpolate3d(xend, yend, zend, zwt) else: zp = zend # return the new positions or the delta lat/lon return xend, yend, zend, zp, ttend def finishSimulation(self, ttend, t0save, xend, yend, zp, T0, U, V): ''' Wrap up simulation. NOT DOING TRANSPORT YET ''' ttend = ttend + t0save # add back in base time in seconds ## map coordinates interpolation if saving tracks as lon/lat if self.savell: if self.usespherical: lonp, latp, dt = tracpy.tools.interpolate2d(xend, yend, self.grid, 'm_ij2ll', mode='constant', cval=np.nan) else: lonp, latp, dt = tracpy.tools.interpolate2d(xend, yend, self.grid, 'm_ij2xy', mode='constant', cval=np.nan) else: # rename grid index locations as lon/lat to fit in with save syntax below lonp = xend; latp = yend; # Save results to netcdf file tracpy.inout.savetracks(lonp, latp, zp, ttend, self.name, self.nsteps, self.N, self.ff, self.tseas_use, self.ah, self.av, self.do3d, self.doturb, self.currents_filename, self.doperiodic, self.time_units, T0, U, V, savell=self.savell) return lonp, latp, zp, ttend, T0, U, V
dcherian/tracpy
tracpy/tracpy_class.py
Python
mit
25,805
[ "NetCDF" ]
3096bd2e44bb7dc44c51ae0a1d904e42b25e5e2245029f65e70c297907418455
# -*- coding: utf-8 -*- """ .. _tut-point-spread: ====================================== Corrupt known signal with point spread ====================================== The aim of this tutorial is to demonstrate how to put a known signal at a desired location(s) in a :class:`mne.SourceEstimate` and then corrupt the signal with point-spread by applying a forward and inverse solution. """ # %% import os.path as op import numpy as np import mne from mne.datasets import sample from mne.minimum_norm import read_inverse_operator, apply_inverse from mne.simulation import simulate_stc, simulate_evoked # %% # First, we set some parameters. seed = 42 # parameters for inverse method method = 'sLORETA' snr = 3. lambda2 = 1.0 / snr ** 2 # signal simulation parameters # do not add extra noise to the known signals nave = np.inf T = 100 times = np.linspace(0, 1, T) dt = times[1] - times[0] # Paths to MEG data data_path = sample.data_path() subjects_dir = op.join(data_path, 'subjects') fname_fwd = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-oct-6-fwd.fif') fname_inv = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-oct-6-meg-fixed-inv.fif') fname_evoked = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif') # %% # Load the MEG data # ----------------- fwd = mne.read_forward_solution(fname_fwd) fwd = mne.convert_forward_solution(fwd, force_fixed=True, surf_ori=True, use_cps=False) fwd['info']['bads'] = [] inv_op = read_inverse_operator(fname_inv) raw = mne.io.read_raw_fif(op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')) raw.set_eeg_reference(projection=True) events = mne.find_events(raw) event_id = {'Auditory/Left': 1, 'Auditory/Right': 2} epochs = mne.Epochs(raw, events, event_id, baseline=(None, 0), preload=True) epochs.info['bads'] = [] evoked = epochs.average() labels = mne.read_labels_from_annot('sample', subjects_dir=subjects_dir) label_names = [label.name for label in labels] n_labels = len(labels) # %% # Estimate the background noise covariance from the baseline period # ----------------------------------------------------------------- cov = mne.compute_covariance(epochs, tmin=None, tmax=0.) # %% # Generate sinusoids in two spatially distant labels # -------------------------------------------------- # The known signal is all zero-s off of the two labels of interest signal = np.zeros((n_labels, T)) idx = label_names.index('inferiorparietal-lh') signal[idx, :] = 1e-7 * np.sin(5 * 2 * np.pi * times) idx = label_names.index('rostralmiddlefrontal-rh') signal[idx, :] = 1e-7 * np.sin(7 * 2 * np.pi * times) # %% # Find the center vertices in source space of each label # ------------------------------------------------------ # # We want the known signal in each label to only be active at the center. We # create a mask for each label that is 1 at the center vertex and 0 at all # other vertices in the label. This mask is then used when simulating # source-space data. hemi_to_ind = {'lh': 0, 'rh': 1} for i, label in enumerate(labels): # The `center_of_mass` function needs labels to have values. labels[i].values.fill(1.) # Restrict the eligible vertices to be those on the surface under # consideration and within the label. surf_vertices = fwd['src'][hemi_to_ind[label.hemi]]['vertno'] restrict_verts = np.intersect1d(surf_vertices, label.vertices) com = labels[i].center_of_mass(subjects_dir=subjects_dir, restrict_vertices=restrict_verts, surf='white') # Convert the center of vertex index from surface vertex list to Label's # vertex list. cent_idx = np.where(label.vertices == com)[0][0] # Create a mask with 1 at center vertex and zeros elsewhere. labels[i].values.fill(0.) labels[i].values[cent_idx] = 1. # Print some useful information about this vertex and label if 'transversetemporal' in label.name: dist, _ = label.distances_to_outside( subjects_dir=subjects_dir) dist = dist[cent_idx] area = label.compute_area(subjects_dir=subjects_dir) # convert to equivalent circular radius r = np.sqrt(area / np.pi) print(f'{label.name} COM vertex is {dist * 1e3:0.1f} mm from edge ' f'(label area equivalent to a circle with r={r * 1e3:0.1f} mm)') # %% # Create source-space data with known signals # ------------------------------------------- # # Put known signals onto surface vertices using the array of signals and # the label masks (stored in labels[i].values). stc_gen = simulate_stc(fwd['src'], labels, signal, times[0], dt, value_fun=lambda x: x) # %% # Plot original signals # --------------------- # # Note that the original signals are highly concentrated (point) sources. # kwargs = dict(subjects_dir=subjects_dir, hemi='split', smoothing_steps=4, time_unit='s', initial_time=0.05, size=1200, views=['lat', 'med']) clim = dict(kind='value', pos_lims=[1e-9, 1e-8, 1e-7]) brain_gen = stc_gen.plot(clim=clim, **kwargs) # %% # Simulate sensor-space signals # ----------------------------- # # Use the forward solution and add Gaussian noise to simulate sensor-space # (evoked) data from the known source-space signals. The amount of noise is # controlled by ``nave`` (higher values imply less noise). # evoked_gen = simulate_evoked(fwd, stc_gen, evoked.info, cov, nave, random_state=seed) # Map the simulated sensor-space data to source-space using the inverse # operator. stc_inv = apply_inverse(evoked_gen, inv_op, lambda2, method=method) # %% # Plot the point-spread of corrupted signal # ----------------------------------------- # # Notice that after applying the forward- and inverse-operators to the known # point sources that the point sources have spread across the source-space. # This spread is due to the minimum norm solution so that the signal leaks to # nearby vertices with similar orientations so that signal ends up crossing the # sulci and gyri. brain_inv = stc_inv.plot(**kwargs) # %% # Exercises # --------- # - Change the ``method`` parameter to either ``'dSPM'`` or ``'MNE'`` to # explore the effect of the inverse method. # - Try setting ``evoked_snr`` to a small, finite value, e.g. 3., to see the # effect of noise.
mne-tools/mne-python
tutorials/simulation/70_point_spread.py
Python
bsd-3-clause
6,489
[ "Gaussian" ]
b709d26b913de256dff6610103f18fb58176d8cf33bf9a19820fe84231edf263
# 2-electron VMC code for 2dim quantum dot with importance sampling # No Coulomb interaction # Using gaussian rng for new positions and Metropolis- Hastings # Energy minimization using standard gradient descent # Common imports import os # Where to save the figures and data files PROJECT_ROOT_DIR = "Results" FIGURE_ID = "Results/FigureFiles" if not os.path.exists(PROJECT_ROOT_DIR): os.mkdir(PROJECT_ROOT_DIR) if not os.path.exists(FIGURE_ID): os.makedirs(FIGURE_ID) def image_path(fig_id): return os.path.join(FIGURE_ID, fig_id) def save_fig(fig_id): plt.savefig(image_path(fig_id) + ".png", format='png') from math import exp, sqrt from random import random, seed, normalvariate import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm from matplotlib.ticker import LinearLocator, FormatStrFormatter import sys from numba import jit from scipy.optimize import minimize import multiprocessing as mp # Trial wave function for the 2-electron quantum dot in two dims def WaveFunction(r,alpha): r1 = r[0,0]**2 + r[0,1]**2 r2 = r[1,0]**2 + r[1,1]**2 return exp(-0.5*alpha*(r1+r2)) # Local energy for the 2-electron quantum dot in two dims, using analytical local energy def LocalEnergy(r,alpha): r1 = (r[0,0]**2 + r[0,1]**2) r2 = (r[1,0]**2 + r[1,1]**2) return 0.5*(1-alpha*alpha)*(r1 + r2) +2.0*alpha # Derivate of wave function ansatz as function of variational parameters def DerivativeWFansatz(r,alpha): r1 = (r[0,0]**2 + r[0,1]**2) r2 = (r[1,0]**2 + r[1,1]**2) WfDer = -0.5*(r1+r2) return WfDer # Setting up the quantum force for the two-electron quantum dot, recall that it is a vector def QuantumForce(r,alpha): qforce = np.zeros((NumberParticles,Dimension), np.double) qforce[0,:] = -2*r[0,:]*alpha qforce[1,:] = -2*r[1,:]*alpha return qforce # Computing the derivative of the energy and the energy # jit decorator tells Numba to compile this function. # The argument types will be inferred by Numba when function is called. @jit def EnergyMinimization(alpha): NumberMCcycles= 1000 # Parameters in the Fokker-Planck simulation of the quantum force D = 0.5 TimeStep = 0.05 # positions PositionOld = np.zeros((NumberParticles,Dimension), np.double) PositionNew = np.zeros((NumberParticles,Dimension), np.double) # Quantum force QuantumForceOld = np.zeros((NumberParticles,Dimension), np.double) QuantumForceNew = np.zeros((NumberParticles,Dimension), np.double) # seed for rng generator seed() energy = 0.0 DeltaE = 0.0 EnergyDer = 0.0 DeltaPsi = 0.0 DerivativePsiE = 0.0 #Initial position for i in range(NumberParticles): for j in range(Dimension): PositionOld[i,j] = normalvariate(0.0,1.0)*sqrt(TimeStep) wfold = WaveFunction(PositionOld,alpha) QuantumForceOld = QuantumForce(PositionOld,alpha) #Loop over MC MCcycles for MCcycle in range(NumberMCcycles): #Trial position moving one particle at the time for i in range(NumberParticles): for j in range(Dimension): PositionNew[i,j] = PositionOld[i,j]+normalvariate(0.0,1.0)*sqrt(TimeStep)+\ QuantumForceOld[i,j]*TimeStep*D wfnew = WaveFunction(PositionNew,alpha) QuantumForceNew = QuantumForce(PositionNew,alpha) GreensFunction = 0.0 for j in range(Dimension): GreensFunction += 0.5*(QuantumForceOld[i,j]+QuantumForceNew[i,j])*\ (D*TimeStep*0.5*(QuantumForceOld[i,j]-QuantumForceNew[i,j])-\ PositionNew[i,j]+PositionOld[i,j]) GreensFunction = 1.0#exp(GreensFunction) ProbabilityRatio = GreensFunction*wfnew**2/wfold**2 #Metropolis-Hastings test to see whether we accept the move if random() <= ProbabilityRatio: for j in range(Dimension): PositionOld[i,j] = PositionNew[i,j] QuantumForceOld[i,j] = QuantumForceNew[i,j] wfold = wfnew DeltaE = LocalEnergy(PositionOld,alpha) DerPsi = DerivativeWFansatz(PositionOld,alpha) DeltaPsi +=DerPsi energy += DeltaE DerivativePsiE += DerPsi*DeltaE # We calculate mean values energy /= NumberMCcycles DerivativePsiE /= NumberMCcycles DeltaPsi /= NumberMCcycles EnergyDer = 2*(DerivativePsiE-DeltaPsi*energy) return energy, EnergyDer #Here starts the main program with variable declarations NumberParticles = 2 Dimension = 2 # guess for variational parameters x0 = 0.5 # Set up iteration using stochastic gradient method Energy =0 ; EnergyDer = 0 pool = mp.Pool(processes=2) Energy, EnergyDer = EnergyMinimization(x0) # No adaptive search for a minimum eta = 0.5 Niterations = 50 Energies = np.zeros(Niterations) EnergyDerivatives = np.zeros(Niterations) AlphaValues = np.zeros(Niterations) Totiterations = np.zeros(Niterations) for iter in range(Niterations): gradients = EnergyDer x0 -= eta*gradients Energy, EnergyDer = EnergyMinimization(x0) Energies[iter] = Energy EnergyDerivatives[iter] = EnergyDer AlphaValues[iter] = x0 Totiterations[iter] = iter plt.subplot(2, 1, 1) plt.plot(Totiterations, Energies, 'o-') plt.title('Energy and energy derivatives') plt.ylabel('Dimensionless energy') plt.subplot(2, 1, 2) plt.plot(Totiterations, EnergyDerivatives, '.-') plt.xlabel(r'$\mathrm{Iterations}$', fontsize=15) plt.ylabel('Energy derivative') save_fig("QdotNonint") plt.show() #nice printout with Pandas import pandas as pd from pandas import DataFrame data ={'Alpha':AlphaValues, 'Energy':Energies,'Derivative':EnergyDerivatives} frame = pd.DataFrame(data) print(frame)
CompPhysics/ComputationalPhysics2
doc/src/MCsummary/src/mpqdot.py
Python
cc0-1.0
5,905
[ "Gaussian" ]
1f5c69b722986778a87ce0b3bed0845918e2d4aad9ef228eb9c227ea47d9d819
from GangaTest.Framework.tests import GangaGPITestCase #from GangaDirac.Lib.Files.DiracFile import DiracFile #from GangaGaudi.Lib.RTHandlers.RunTimeHandlerUtils import get_share_path #from GangaCore.GPIDev.Adapters.StandardJobConfig import StandardJobConfig #from GangaCore.Core.exceptions import ApplicationConfigurationError, GangaException from GangaCore.GPI import * from GangaCore.old_test import generateUniqueTempFile #import GangaDirac.Lib.Server.DiracServer as DiracServer # GangaTest.Framework.utils defines some utility methods from GangaTest.Framework.utils import sleep_until_completed, sleep_until_state import unittest import tempfile import os import string import random from GangaCore.Utility.logging import getLogger logger = getLogger() def rand_str(): import datetime import time t = datetime.datetime.now() unix_t = time.mktime(t.timetuple()) returnable = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(8)) returnable = returnable + "_" + str(unix_t) return returnable class TestDiracFile(GangaGPITestCase): def setUp(self): script = '''#!/bin/bash echo "%s" > a.root echo "%s" > b.root ''' # Having a fixed string leaves us open to GUID conflicts str1 = "HelloWorld_" + rand_str() str2 = "WorldHello_" + rand_str() script = script % (str1, str2) tmpf = tempfile.NamedTemporaryFile(delete=False) tmpf.write(script) self.root, self.filename = os.path.split(tmpf.name) tmpf.close() self.filepath = os.path.join(self.root, self.filename) logger.info("FilePath: %s" % str(self.filepath)) #import GangaCore.Core.InternalServices.Coordinator #GangaCore.Core.InternalServices.Coordinator.enableMonitoringService() def tearDown(self): os.remove(self.filepath) def test_standalone_put(self): myTempFile = generateUniqueTempFile('.txt') root, filename = os.path.split(myTempFile) d1 = DiracFile(filename, root) d1.put() self.assertNotEqual(d1.lfn, '', 'lfn not set upon return') self.assertNotEqual(d1.guid, '', 'guid not set upon return') self.assertNotEqual(d1.locations, [], 'location not set upon return') d1.remove() os.remove(myTempFile) def test_local_job_put_single_file(self): j = Job(application=Executable(exe=File(self.filepath), args=[]), outputfiles=[DiracFile('a.root')]) logger.info("App EXE: %s" % str(j.application.exe.name)) j.submit() sleep_until_completed(j) self.assertEqual(len(j.outputfiles), 1) self.assertEqual(j.outputfiles[0].namePattern, 'a.root') self.assertNotEqual(j.outputfiles[0].lfn, '') self.assertNotEqual(j.outputfiles[0].guid, '') self.assertNotEqual(j.outputfiles[0].locations, []) j.outputfiles[0].remove() def test_local_job_put_multiple_files(self): j = Job(application=Executable(exe=File(self.filepath), args=[]), outputfiles=[DiracFile('a.root'), DiracFile('b.root')]) j.submit() sleep_until_completed(j) self.assertEqual(len(j.outputfiles), 2) for df in j.outputfiles: self.assertIn(df.namePattern, ['a.root', 'b.root']) self.assertNotEqual(df.lfn, '') self.assertNotEqual(df.guid, '') self.assertNotEqual(df.locations, []) df.remove() def test_local_job_put_wildcard_files(self): j = Job(application=Executable(exe=File(self.filepath), args=[]), outputfiles=[DiracFile('*.root')]) j.submit() sleep_until_completed(j) self.assertEqual(len(j.outputfiles), 2) for df in j.outputfiles: self.assertIn(df.namePattern, ['a.root', 'b.root']) self.assertNotEqual(df.lfn, '') self.assertNotEqual(df.guid, '') self.assertNotEqual(df.locations, []) df.remove() def test_local_job_wildcard_expansion(self): j = Job(application=Executable(exe=File(self.filepath), args=[]), outputfiles=[DiracFile('*.root')]) j.submit() sleep_until_completed(j) self.assertEqual(len(j._impl.outputfiles), 1) self.assertEqual(j._impl.outputfiles[0].namePattern, '*.root') self.assertEqual(len(j._impl.outputfiles[0].subfiles), 2) for df in j._impl.outputfiles[0].subfiles: self.assertIn(df.namePattern, ['a.root', 'b.root']) df.remove() def test_Dirac_job_put_single_file(self): j = Job(application=Executable(exe=File(self.filepath), args=[]), backend=Dirac(), outputfiles=[DiracFile('a.root')]) j.submit() sleep_until_completed(j) self.assertEqual(len(j.outputfiles), 1) self.assertEqual(j.outputfiles[0].namePattern, 'a.root') self.assertNotEqual(j.outputfiles[0].lfn, '') self.assertNotEqual(j.outputfiles[0].guid, '') self.assertNotEqual(j.outputfiles[0].locations, []) j.outputfiles[0].remove() def test_Dirac_job_put_multiple_files(self): j = Job(application=Executable(exe=File(self.filepath), args=[]), backend=Dirac(), outputfiles=[DiracFile('a.root'), DiracFile('b.root')]) j.submit() sleep_until_completed(j) self.assertEqual(len(j.outputfiles), 2) for df in j.outputfiles: print("Testing: %s" % str(df.namePattern)) self.assertIn(df.namePattern, ['a.root', 'b.root']) self.assertNotEqual(df.lfn, '') self.assertNotEqual(df.guid, '') self.assertNotEqual(df.locations, []) df.remove() def test_Dirac_job_put_wildcard_files(self): j = Job(application=Executable(exe=File(self.filepath), args=[]), backend=Dirac(), outputfiles=[DiracFile('*.root')]) j.submit() sleep_until_completed(j) self.assertEqual(len(j.outputfiles), 2) for df in j.outputfiles: self.assertIn(df.namePattern, ['a.root', 'b.root']) self.assertNotEqual(df.lfn, '') self.assertNotEqual(df.guid, '') self.assertNotEqual(df.locations, []) df.remove() def test_Dirac_job_wildcard_expansion(self): j = Job(application=Executable(exe=File(self.filepath), args=[]), backend=Dirac(), outputfiles=[DiracFile('*.root')]) j.submit() sleep_until_completed(j) self.assertEqual(len(j._impl.outputfiles), 1) self.assertEqual(j._impl.outputfiles[0].namePattern, '*.root') self.assertEqual(len(j._impl.outputfiles[0].subfiles), 2) for df in j._impl.outputfiles[0].subfiles: self.assertIn(df.namePattern, ['a.root', 'b.root']) df.remove()
ganga-devs/ganga
ganga/GangaDirac/old_test/GPI/Files/TestDiracFile.py
Python
gpl-3.0
6,821
[ "DIRAC" ]
668e2e00005cbf7b14cfb6c56dc9f1a416ea0c3e1b5dc3258e586116cb63a8cc